summaryrefslogtreecommitdiff
path: root/vendor/github.com/hashicorp/go-memdb
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-07-22 17:35:49 -0600
committermo khan <mo@mokhan.ca>2025-07-22 17:35:49 -0600
commit20ef0d92694465ac86b550df139e8366a0a2b4fa (patch)
tree3f14589e1ce6eb9306a3af31c3a1f9e1af5ed637 /vendor/github.com/hashicorp/go-memdb
parent44e0d272c040cdc53a98b9f1dc58ae7da67752e6 (diff)
feat: connect to spicedb
Diffstat (limited to 'vendor/github.com/hashicorp/go-memdb')
-rw-r--r--vendor/github.com/hashicorp/go-memdb/.gitignore26
-rw-r--r--vendor/github.com/hashicorp/go-memdb/CODEOWNERS13
-rw-r--r--vendor/github.com/hashicorp/go-memdb/LICENSE365
-rw-r--r--vendor/github.com/hashicorp/go-memdb/README.md146
-rw-r--r--vendor/github.com/hashicorp/go-memdb/changes.go37
-rw-r--r--vendor/github.com/hashicorp/go-memdb/filter.go41
-rw-r--r--vendor/github.com/hashicorp/go-memdb/index.go934
-rw-r--r--vendor/github.com/hashicorp/go-memdb/memdb.go119
-rw-r--r--vendor/github.com/hashicorp/go-memdb/schema.go117
-rw-r--r--vendor/github.com/hashicorp/go-memdb/txn.go1024
-rw-r--r--vendor/github.com/hashicorp/go-memdb/watch.go155
-rw-r--r--vendor/github.com/hashicorp/go-memdb/watch_few.go120
12 files changed, 3097 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/go-memdb/.gitignore b/vendor/github.com/hashicorp/go-memdb/.gitignore
new file mode 100644
index 0000000..11b90db
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+.idea
diff --git a/vendor/github.com/hashicorp/go-memdb/CODEOWNERS b/vendor/github.com/hashicorp/go-memdb/CODEOWNERS
new file mode 100644
index 0000000..2cad39a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/CODEOWNERS
@@ -0,0 +1,13 @@
+# Each line is a file pattern followed by one or more owners.
+# More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
+
+# Default owner
+* @hashicorp/team-ip-compliance @hashicorp/raft-force
+
+# Add override rules below. Each line is a file/folder pattern followed by one or more owners.
+# Being an owner means those groups or individuals will be added as reviewers to PRs affecting
+# those areas of the code.
+# Examples:
+# /docs/ @docs-team
+# *.js @js-team
+# *.go @go-team
diff --git a/vendor/github.com/hashicorp/go-memdb/LICENSE b/vendor/github.com/hashicorp/go-memdb/LICENSE
new file mode 100644
index 0000000..f4f97ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/LICENSE
@@ -0,0 +1,365 @@
+Copyright (c) 2015 HashiCorp, Inc.
+
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-memdb/README.md b/vendor/github.com/hashicorp/go-memdb/README.md
new file mode 100644
index 0000000..080b744
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/README.md
@@ -0,0 +1,146 @@
+# go-memdb [![CircleCI](https://circleci.com/gh/hashicorp/go-memdb/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-memdb/tree/master)
+
+Provides the `memdb` package that implements a simple in-memory database
+built on immutable radix trees. The database provides Atomicity, Consistency
+and Isolation from ACID. Being that it is in-memory, it does not provide durability.
+The database is instantiated with a schema that specifies the tables and indices
+that exist and allows transactions to be executed.
+
+The database provides the following:
+
+* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees
+ the database is able to support any number of concurrent readers without locking,
+ and allows a writer to make progress.
+
+* Transaction Support - The database allows for rich transactions, in which multiple
+ objects are inserted, updated or deleted. The transactions can span multiple tables,
+ and are applied atomically. The database provides atomicity and isolation in ACID
+ terminology, such that until commit the updates are not visible.
+
+* Rich Indexing - Tables can support any number of indexes, which can be simple like
+ a single field index, or more advanced compound field indexes. Certain types like
+ UUID can be efficiently compressed from strings into byte indexes for reduced
+ storage requirements.
+
+* Watches - Callers can populate a watch set as part of a query, which can be used to
+ detect when a modification has been made to the database which affects the query
+ results. This lets callers easily watch for changes in the database in a very general
+ way.
+
+For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](https://pkg.go.dev/github.com/hashicorp/go-memdb).
+
+Example
+=======
+
+Below is a [simple example](https://play.golang.org/p/gCGE9FA4og1) of usage
+
+```go
+// Create a sample struct
+type Person struct {
+ Email string
+ Name string
+ Age int
+}
+
+// Create the DB schema
+schema := &memdb.DBSchema{
+ Tables: map[string]*memdb.TableSchema{
+ "person": &memdb.TableSchema{
+ Name: "person",
+ Indexes: map[string]*memdb.IndexSchema{
+ "id": &memdb.IndexSchema{
+ Name: "id",
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "Email"},
+ },
+ "age": &memdb.IndexSchema{
+ Name: "age",
+ Unique: false,
+ Indexer: &memdb.IntFieldIndex{Field: "Age"},
+ },
+ },
+ },
+ },
+}
+
+// Create a new data base
+db, err := memdb.NewMemDB(schema)
+if err != nil {
+ panic(err)
+}
+
+// Create a write transaction
+txn := db.Txn(true)
+
+// Insert some people
+people := []*Person{
+ &Person{"joe@aol.com", "Joe", 30},
+ &Person{"lucy@aol.com", "Lucy", 35},
+ &Person{"tariq@aol.com", "Tariq", 21},
+ &Person{"dorothy@aol.com", "Dorothy", 53},
+}
+for _, p := range people {
+ if err := txn.Insert("person", p); err != nil {
+ panic(err)
+ }
+}
+
+// Commit the transaction
+txn.Commit()
+
+// Create read-only transaction
+txn = db.Txn(false)
+defer txn.Abort()
+
+// Lookup by email
+raw, err := txn.First("person", "id", "joe@aol.com")
+if err != nil {
+ panic(err)
+}
+
+// Say hi!
+fmt.Printf("Hello %s!\n", raw.(*Person).Name)
+
+// List all the people
+it, err := txn.Get("person", "id")
+if err != nil {
+ panic(err)
+}
+
+fmt.Println("All the people:")
+for obj := it.Next(); obj != nil; obj = it.Next() {
+ p := obj.(*Person)
+ fmt.Printf(" %s\n", p.Name)
+}
+
+// Range scan over people with ages between 25 and 35 inclusive
+it, err = txn.LowerBound("person", "age", 25)
+if err != nil {
+ panic(err)
+}
+
+fmt.Println("People aged 25 - 35:")
+for obj := it.Next(); obj != nil; obj = it.Next() {
+ p := obj.(*Person)
+ if p.Age > 35 {
+ break
+ }
+ fmt.Printf(" %s is aged %d\n", p.Name, p.Age)
+}
+// Output:
+// Hello Joe!
+// All the people:
+// Dorothy
+// Joe
+// Lucy
+// Tariq
+// People aged 25 - 35:
+// Joe is aged 30
+// Lucy is aged 35
+```
+
diff --git a/vendor/github.com/hashicorp/go-memdb/changes.go b/vendor/github.com/hashicorp/go-memdb/changes.go
new file mode 100644
index 0000000..4761e92
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/changes.go
@@ -0,0 +1,37 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+// Changes describes a set of mutations to memDB tables performed during a
+// transaction.
+type Changes []Change
+
+// Change describes a mutation to an object in a table.
+type Change struct {
+ Table string
+ Before interface{}
+ After interface{}
+
+ // primaryKey stores the raw key value from the primary index so that we can
+ // de-duplicate multiple updates of the same object in the same transaction
+ // but we don't expose this implementation detail to the consumer.
+ primaryKey []byte
+}
+
+// Created returns true if the mutation describes a new object being inserted.
+func (m *Change) Created() bool {
+ return m.Before == nil && m.After != nil
+}
+
+// Updated returns true if the mutation describes an existing object being
+// updated.
+func (m *Change) Updated() bool {
+ return m.Before != nil && m.After != nil
+}
+
+// Deleted returns true if the mutation describes an existing object being
+// deleted.
+func (m *Change) Deleted() bool {
+ return m.Before != nil && m.After == nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/filter.go b/vendor/github.com/hashicorp/go-memdb/filter.go
new file mode 100644
index 0000000..2e13521
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/filter.go
@@ -0,0 +1,41 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+// FilterFunc is a function that takes the results of an iterator and returns
+// whether the result should be filtered out.
+type FilterFunc func(interface{}) bool
+
+// FilterIterator is used to wrap a ResultIterator and apply a filter over it.
+type FilterIterator struct {
+ // filter is the filter function applied over the base iterator.
+ filter FilterFunc
+
+ // iter is the iterator that is being wrapped.
+ iter ResultIterator
+}
+
+// NewFilterIterator wraps a ResultIterator. The filter function is applied
+// to each value returned by a call to iter.Next.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned FilterIterator.
+func NewFilterIterator(iter ResultIterator, filter FilterFunc) *FilterIterator {
+ return &FilterIterator{
+ filter: filter,
+ iter: iter,
+ }
+}
+
+// WatchCh returns the watch channel of the wrapped iterator.
+func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() }
+
+// Next returns the next non-filtered result from the wrapped iterator.
+func (f *FilterIterator) Next() interface{} {
+ for {
+ if value := f.iter.Next(); value == nil || !f.filter(value) {
+ return value
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/hashicorp/go-memdb/index.go
new file mode 100644
index 0000000..588e1c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/index.go
@@ -0,0 +1,934 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// Indexer is an interface used for defining indexes. Indexes are used
+// for efficient lookup of objects in a MemDB table. An Indexer must also
+// implement one of SingleIndexer or MultiIndexer.
+//
+// Indexers are primarily responsible for returning the lookup key as
+// a byte slice. The byte slice is the key data in the underlying data storage.
+type Indexer interface {
+ // FromArgs is called to build the exact index key from a list of arguments.
+ FromArgs(args ...interface{}) ([]byte, error)
+}
+
+// SingleIndexer is an interface used for defining indexes that generate a
+// single value per object
+type SingleIndexer interface {
+ // FromObject extracts the index value from an object. The return values
+ // are whether the index value was found, the index value, and any error
+ // while extracting the index value, respectively.
+ FromObject(raw interface{}) (bool, []byte, error)
+}
+
+// MultiIndexer is an interface used for defining indexes that generate
+// multiple values per object. Each value is stored as a seperate index
+// pointing to the same object.
+//
+// For example, an index that extracts the first and last name of a person
+// and allows lookup based on eitherd would be a MultiIndexer. The FromObject
+// of this example would split the first and last name and return both as
+// values.
+type MultiIndexer interface {
+ // FromObject extracts index values from an object. The return values
+ // are the same as a SingleIndexer except there can be multiple index
+ // values.
+ FromObject(raw interface{}) (bool, [][]byte, error)
+}
+
+// PrefixIndexer is an optional interface on top of an Indexer that allows
+// indexes to support prefix-based iteration.
+type PrefixIndexer interface {
+ // PrefixFromArgs is the same as FromArgs for an Indexer except that
+ // the index value returned should return all prefix-matched values.
+ PrefixFromArgs(args ...interface{}) ([]byte, error)
+}
+
+// StringFieldIndex is used to extract a field from an object
+// using reflection and builds an index on that field.
+type StringFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ isPtr := fv.Kind() == reflect.Ptr
+ fv = reflect.Indirect(fv)
+ if !isPtr && !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid %v ", s.Field, obj, isPtr)
+ }
+
+ if isPtr && !fv.IsValid() {
+ val := ""
+ return false, []byte(val), nil
+ }
+
+ val := fv.String()
+ if val == "" {
+ return false, nil, nil
+ }
+
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ val += "\x00"
+ return true, []byte(val), nil
+}
+
+func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ arg, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ arg = strings.ToLower(arg)
+ }
+ // Add the null character as a terminator
+ arg += "\x00"
+ return []byte(arg), nil
+}
+
+func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ val, err := s.FromArgs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the null terminator, the rest is a prefix
+ n := len(val)
+ if n > 0 {
+ return val[:n-1], nil
+ }
+ return val, nil
+}
+
+// StringSliceFieldIndex builds an index from a field on an object that is a
+// string slice ([]string). Each value within the string slice can be used for
+// lookup.
+type StringSliceFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+ }
+
+ if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String {
+ return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field)
+ }
+
+ length := fv.Len()
+ vals := make([][]byte, 0, length)
+ for i := 0; i < fv.Len(); i++ {
+ val := fv.Index(i).String()
+ if val == "" {
+ continue
+ }
+
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ val += "\x00"
+ vals = append(vals, []byte(val))
+ }
+ if len(vals) == 0 {
+ return false, nil, nil
+ }
+ return true, vals, nil
+}
+
+func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ arg, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ arg = strings.ToLower(arg)
+ }
+ // Add the null character as a terminator
+ arg += "\x00"
+ return []byte(arg), nil
+}
+
+func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ val, err := s.FromArgs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the null terminator, the rest is a prefix
+ n := len(val)
+ if n > 0 {
+ return val[:n-1], nil
+ }
+ return val, nil
+}
+
+// StringMapFieldIndex is used to extract a field of type map[string]string
+// from an object using reflection and builds an index on that field.
+//
+// Note that although FromArgs in theory supports using either one or
+// two arguments, there is a bug: FromObject only creates an index
+// using key/value, and does not also create an index using key. This
+// means a lookup using one argument will never actually work.
+//
+// It is currently left as-is to prevent backwards compatibility
+// issues.
+//
+// TODO: Fix this in the next major bump.
+type StringMapFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind()
+
+func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ if !fv.IsValid() {
+ return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+ }
+
+ if fv.Kind() != MapType {
+ return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field)
+ }
+
+ length := fv.Len()
+ vals := make([][]byte, 0, length)
+ for _, key := range fv.MapKeys() {
+ k := key.String()
+ if k == "" {
+ continue
+ }
+ val := fv.MapIndex(key).String()
+
+ if s.Lowercase {
+ k = strings.ToLower(k)
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ k += "\x00" + val + "\x00"
+
+ vals = append(vals, []byte(k))
+ }
+ if len(vals) == 0 {
+ return false, nil, nil
+ }
+ return true, vals, nil
+}
+
+// WARNING: Because of a bug in FromObject, this function will never return
+// a value when using the single-argument version.
+func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) > 2 || len(args) == 0 {
+ return nil, fmt.Errorf("must provide one or two arguments")
+ }
+ key, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ key = strings.ToLower(key)
+ }
+ // Add the null character as a terminator
+ key += "\x00"
+
+ if len(args) == 2 {
+ val, ok := args[1].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[1])
+ }
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+ // Add the null character as a terminator
+ key += val + "\x00"
+ }
+
+ return []byte(key), nil
+}
+
+// IntFieldIndex is used to extract an int field from an object using
+// reflection and builds an index on that field.
+type IntFieldIndex struct {
+ Field string
+}
+
+func (i *IntFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(i.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ size, ok := IsIntType(k)
+ if !ok {
+ return false, nil, fmt.Errorf("field %q is of type %v; want an int", i.Field, k)
+ }
+
+ // Get the value and encode it
+ val := fv.Int()
+ buf := encodeInt(val, size)
+
+ return true, buf, nil
+}
+
+func (i *IntFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ v := reflect.ValueOf(args[0])
+ if !v.IsValid() {
+ return nil, fmt.Errorf("%#v is invalid", args[0])
+ }
+
+ k := v.Kind()
+ size, ok := IsIntType(k)
+ if !ok {
+ return nil, fmt.Errorf("arg is of type %v; want a int", k)
+ }
+
+ val := v.Int()
+ buf := encodeInt(val, size)
+
+ return buf, nil
+}
+
+func encodeInt(val int64, size int) []byte {
+ buf := make([]byte, size)
+
+ // This bit flips the sign bit on any sized signed twos-complement integer,
+ // which when truncated to a uint of the same size will bias the value such
+ // that the maximum negative int becomes 0, and the maximum positive int
+ // becomes the maximum positive uint.
+ scaled := val ^ int64(-1<<(size*8-1))
+
+ switch size {
+ case 1:
+ buf[0] = uint8(scaled)
+ case 2:
+ binary.BigEndian.PutUint16(buf, uint16(scaled))
+ case 4:
+ binary.BigEndian.PutUint32(buf, uint32(scaled))
+ case 8:
+ binary.BigEndian.PutUint64(buf, uint64(scaled))
+ default:
+ panic(fmt.Sprintf("unsupported int size parameter: %d", size))
+ }
+
+ return buf
+}
+
+// IsIntType returns whether the passed type is a type of int and the number
+// of bytes needed to encode the type.
+func IsIntType(k reflect.Kind) (size int, okay bool) {
+ switch k {
+ case reflect.Int:
+ return strconv.IntSize / 8, true
+ case reflect.Int8:
+ return 1, true
+ case reflect.Int16:
+ return 2, true
+ case reflect.Int32:
+ return 4, true
+ case reflect.Int64:
+ return 8, true
+ default:
+ return 0, false
+ }
+}
+
+// UintFieldIndex is used to extract a uint field from an object using
+// reflection and builds an index on that field.
+type UintFieldIndex struct {
+ Field string
+}
+
+func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(u.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ size, ok := IsUintType(k)
+ if !ok {
+ return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k)
+ }
+
+ // Get the value and encode it
+ val := fv.Uint()
+ buf := encodeUInt(val, size)
+
+ return true, buf, nil
+}
+
+func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ v := reflect.ValueOf(args[0])
+ if !v.IsValid() {
+ return nil, fmt.Errorf("%#v is invalid", args[0])
+ }
+
+ k := v.Kind()
+ size, ok := IsUintType(k)
+ if !ok {
+ return nil, fmt.Errorf("arg is of type %v; want a uint", k)
+ }
+
+ val := v.Uint()
+ buf := encodeUInt(val, size)
+
+ return buf, nil
+}
+
+func encodeUInt(val uint64, size int) []byte {
+ buf := make([]byte, size)
+
+ switch size {
+ case 1:
+ buf[0] = uint8(val)
+ case 2:
+ binary.BigEndian.PutUint16(buf, uint16(val))
+ case 4:
+ binary.BigEndian.PutUint32(buf, uint32(val))
+ case 8:
+ binary.BigEndian.PutUint64(buf, val)
+ default:
+ panic(fmt.Sprintf("unsupported uint size parameter: %d", size))
+ }
+
+ return buf
+}
+
+// IsUintType returns whether the passed type is a type of uint and the number
+// of bytes needed to encode the type.
+func IsUintType(k reflect.Kind) (size int, okay bool) {
+ switch k {
+ case reflect.Uint:
+ return strconv.IntSize / 8, true
+ case reflect.Uint8:
+ return 1, true
+ case reflect.Uint16:
+ return 2, true
+ case reflect.Uint32:
+ return 4, true
+ case reflect.Uint64:
+ return 8, true
+ default:
+ return 0, false
+ }
+}
+
+// BoolFieldIndex is used to extract an boolean field from an object using
+// reflection and builds an index on that field.
+type BoolFieldIndex struct {
+ Field string
+}
+
+func (i *BoolFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(i.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ if k != reflect.Bool {
+ return false, nil, fmt.Errorf("field %q is of type %v; want a bool", i.Field, k)
+ }
+
+ // Get the value and encode it
+ buf := make([]byte, 1)
+ if fv.Bool() {
+ buf[0] = 1
+ }
+
+ return true, buf, nil
+}
+
+func (i *BoolFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// UUIDFieldIndex is used to extract a field from an object
+// using reflection and builds an index on that field by treating
+// it as a UUID. This is an optimization to using a StringFieldIndex
+// as the UUID can be more compactly represented in byte form.
+type UUIDFieldIndex struct {
+ Field string
+}
+
+func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(u.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj)
+ }
+
+ val := fv.String()
+ if val == "" {
+ return false, nil, nil
+ }
+
+ buf, err := u.parseString(val, true)
+ return true, buf, err
+}
+
+func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ switch arg := args[0].(type) {
+ case string:
+ return u.parseString(arg, true)
+ case []byte:
+ if len(arg) != 16 {
+ return nil, fmt.Errorf("byte slice must be 16 characters")
+ }
+ return arg, nil
+ default:
+ return nil,
+ fmt.Errorf("argument must be a string or byte slice: %#v", args[0])
+ }
+}
+
+func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ switch arg := args[0].(type) {
+ case string:
+ return u.parseString(arg, false)
+ case []byte:
+ return arg, nil
+ default:
+ return nil,
+ fmt.Errorf("argument must be a string or byte slice: %#v", args[0])
+ }
+}
+
+// parseString parses a UUID from the string. If enforceLength is false, it will
+// parse a partial UUID. An error is returned if the input, stripped of hyphens,
+// is not even length.
+func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) {
+ // Verify the length
+ l := len(s)
+ if enforceLength && l != 36 {
+ return nil, fmt.Errorf("UUID must be 36 characters")
+ } else if l > 36 {
+ return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l)
+ }
+
+ hyphens := strings.Count(s, "-")
+ if hyphens > 4 {
+ return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens)
+ }
+
+ // The sanitized length is the length of the original string without the "-".
+ sanitized := strings.Replace(s, "-", "", -1)
+ sanitizedLength := len(sanitized)
+ if sanitizedLength%2 != 0 {
+ return nil, fmt.Errorf("Input (without hyphens) must be even length")
+ }
+
+ dec, err := hex.DecodeString(sanitized)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid UUID: %v", err)
+ }
+
+ return dec, nil
+}
+
+// FieldSetIndex is used to extract a field from an object using reflection and
+// builds an index on whether the field is set by comparing it against its
+// type's nil value.
+type FieldSetIndex struct {
+ Field string
+}
+
+func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(f.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj)
+ }
+
+ if fv.Interface() == reflect.Zero(fv.Type()).Interface() {
+ return true, []byte{0}, nil
+ }
+
+ return true, []byte{1}, nil
+}
+
+func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// ConditionalIndex builds an index based on a condition specified by a passed
+// user function. This function may examine the passed object and return a
+// boolean to encapsulate an arbitrarily complex conditional.
+type ConditionalIndex struct {
+ Conditional ConditionalIndexFunc
+}
+
+// ConditionalIndexFunc is the required function interface for a
+// ConditionalIndex.
+type ConditionalIndexFunc func(obj interface{}) (bool, error)
+
+func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ // Call the user's function
+ res, err := c.Conditional(obj)
+ if err != nil {
+ return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err)
+ }
+
+ if res {
+ return true, []byte{1}, nil
+ }
+
+ return true, []byte{0}, nil
+}
+
+func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// fromBoolArgs is a helper that expects only a single boolean argument and
+// returns a single length byte array containing either a one or zero depending
+// on whether the passed input is true or false respectively.
+func fromBoolArgs(args []interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ if val, ok := args[0].(bool); !ok {
+ return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0])
+ } else if val {
+ return []byte{1}, nil
+ }
+
+ return []byte{0}, nil
+}
+
+// CompoundIndex is used to build an index using multiple sub-indexes
+// Prefix based iteration is supported as long as the appropriate prefix
+// of indexers support it. All sub-indexers are only assumed to expect
+// a single argument.
+type CompoundIndex struct {
+ Indexes []Indexer
+
+ // AllowMissing results in an index based on only the indexers
+ // that return data. If true, you may end up with 2/3 columns
+ // indexed which might be useful for an index scan. Otherwise,
+ // the CompoundIndex requires all indexers to be satisfied.
+ AllowMissing bool
+}
+
+func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) {
+ var out []byte
+ for i, idxRaw := range c.Indexes {
+ idx, ok := idxRaw.(SingleIndexer)
+ if !ok {
+ return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer")
+ }
+ ok, val, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break
+ } else {
+ return false, nil, nil
+ }
+ }
+ out = append(out, val...)
+ }
+ return true, out, nil
+}
+
+func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != len(c.Indexes) {
+ return nil, fmt.Errorf("non-equivalent argument count and index fields")
+ }
+ var out []byte
+ for i, arg := range args {
+ val, err := c.Indexes[i].FromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ return out, nil
+}
+
+func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) > len(c.Indexes) {
+ return nil, fmt.Errorf("more arguments than index fields")
+ }
+ var out []byte
+ for i, arg := range args {
+ if i+1 < len(args) {
+ val, err := c.Indexes[i].FromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ } else {
+ prefixIndexer, ok := c.Indexes[i].(PrefixIndexer)
+ if !ok {
+ return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i)
+ }
+ val, err := prefixIndexer.PrefixFromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ }
+ return out, nil
+}
+
+// CompoundMultiIndex is used to build an index using multiple
+// sub-indexes.
+//
+// Unlike CompoundIndex, CompoundMultiIndex can have both
+// SingleIndexer and MultiIndexer sub-indexers. However, each
+// MultiIndexer adds considerable overhead/complexity in terms of
+// the number of indexes created under-the-hood. It is not suggested
+// to use more than one or two, if possible.
+//
+// Another change from CompoundIndexer is that if AllowMissing is
+// set, not only is it valid to have empty index fields, but it will
+// still create index values up to the first empty index. This means
+// that if you have a value with an empty field, rather than using a
+// prefix for lookup, you can simply pass in less arguments. As an
+// example, if {Foo, Bar} is indexed but Bar is missing for a value
+// and AllowMissing is set, an index will still be created for {Foo}
+// and it is valid to do a lookup passing in only Foo as an argument.
+// Note that the ordering isn't guaranteed -- it's last-insert wins,
+// but this is true if you have two objects that have the same
+// indexes not using AllowMissing anyways.
+//
+// Because StringMapFieldIndexers can take a varying number of args,
+// it is currently a requirement that whenever it is used, two
+// arguments must _always_ be provided for it. In theory we only
+// need one, except a bug in that indexer means the single-argument
+// version will never work. You can leave the second argument nil,
+// but it will never produce a value. We support this for whenever
+// that bug is fixed, likely in a next major version bump.
+//
+// Prefix-based indexing is not currently supported.
+type CompoundMultiIndex struct {
+ Indexes []Indexer
+
+ // AllowMissing results in an index based on only the indexers
+ // that return data. If true, you may end up with 2/3 columns
+ // indexed which might be useful for an index scan. Otherwise,
+ // CompoundMultiIndex requires all indexers to be satisfied.
+ AllowMissing bool
+}
+
+func (c *CompoundMultiIndex) FromObject(raw interface{}) (bool, [][]byte, error) {
+ // At each entry, builder is storing the results from the next index
+ builder := make([][][]byte, 0, len(c.Indexes))
+
+forloop:
+ // This loop goes through each indexer and adds the value(s) provided to the next
+ // entry in the slice. We can then later walk it like a tree to construct the indices.
+ for i, idxRaw := range c.Indexes {
+ switch idx := idxRaw.(type) {
+ case SingleIndexer:
+ ok, val, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("single sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break forloop
+ } else {
+ return false, nil, nil
+ }
+ }
+ builder = append(builder, [][]byte{val})
+
+ case MultiIndexer:
+ ok, vals, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("multi sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break forloop
+ } else {
+ return false, nil, nil
+ }
+ }
+
+ // Add each of the new values to each of the old values
+ builder = append(builder, vals)
+
+ default:
+ return false, nil, fmt.Errorf("sub-index %d does not satisfy either SingleIndexer or MultiIndexer", i)
+ }
+ }
+
+ // Start with something higher to avoid resizing if possible
+ out := make([][]byte, 0, len(c.Indexes)^3)
+
+ // We are walking through the builder slice essentially in a depth-first fashion,
+ // building the prefix and leaves as we go. If AllowMissing is false, we only insert
+ // these full paths to leaves. Otherwise, we also insert each prefix along the way.
+ // This allows for lookup in FromArgs when AllowMissing is true that does not contain
+ // the full set of arguments. e.g. for {Foo, Bar} where an object has only the Foo
+ // field specified as "abc", it is valid to call FromArgs with just "abc".
+ var walkVals func([]byte, int)
+ walkVals = func(currPrefix []byte, depth int) {
+ if depth >= len(builder) {
+ return
+ }
+
+ if depth == len(builder)-1 {
+ // These are the "leaves", so append directly
+ for _, v := range builder[depth] {
+ outcome := make([]byte, len(currPrefix))
+ copy(outcome, currPrefix)
+ out = append(out, append(outcome, v...))
+ }
+ return
+ }
+ for _, v := range builder[depth] {
+ nextPrefix := append(currPrefix, v...)
+ if c.AllowMissing {
+ out = append(out, nextPrefix)
+ }
+ walkVals(nextPrefix, depth+1)
+ }
+ }
+
+ walkVals(nil, 0)
+
+ return true, out, nil
+}
+
+func (c *CompoundMultiIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ var stringMapCount int
+ var argCount int
+ for _, index := range c.Indexes {
+ if argCount >= len(args) {
+ break
+ }
+ if _, ok := index.(*StringMapFieldIndex); ok {
+ // We require pairs for StringMapFieldIndex, but only got one
+ if argCount+1 >= len(args) {
+ return nil, errors.New("invalid number of arguments")
+ }
+ stringMapCount++
+ argCount += 2
+ } else {
+ argCount++
+ }
+ }
+ argCount = 0
+
+ switch c.AllowMissing {
+ case true:
+ if len(args) > len(c.Indexes)+stringMapCount {
+ return nil, errors.New("too many arguments")
+ }
+
+ default:
+ if len(args) != len(c.Indexes)+stringMapCount {
+ return nil, errors.New("number of arguments does not equal number of indexers")
+ }
+ }
+
+ var out []byte
+ var val []byte
+ var err error
+ for i, idx := range c.Indexes {
+ if argCount >= len(args) {
+ // We're done; should only hit this if AllowMissing
+ break
+ }
+ if _, ok := idx.(*StringMapFieldIndex); ok {
+ if args[argCount+1] == nil {
+ val, err = idx.FromArgs(args[argCount])
+ } else {
+ val, err = idx.FromArgs(args[argCount : argCount+2]...)
+ }
+ argCount += 2
+ } else {
+ val, err = idx.FromArgs(args[argCount])
+ argCount++
+ }
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/memdb.go b/vendor/github.com/hashicorp/go-memdb/memdb.go
new file mode 100644
index 0000000..13cc6a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/memdb.go
@@ -0,0 +1,119 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Package memdb provides an in-memory database that supports transactions
+// and MVCC.
+package memdb
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/hashicorp/go-immutable-radix"
+)
+
+// MemDB is an in-memory database providing Atomicity, Consistency, and
+// Isolation from ACID. MemDB doesn't provide Durability since it is an
+// in-memory database.
+//
+// MemDB provides a table abstraction to store objects (rows) with multiple
+// indexes based on inserted values. The database makes use of immutable radix
+// trees to provide transactions and MVCC.
+//
+// Objects inserted into MemDB are not copied. It is **extremely important**
+// that objects are not modified in-place after they are inserted since they
+// are stored directly in MemDB. It remains unsafe to modify inserted objects
+// even after they've been deleted from MemDB since there may still be older
+// snapshots of the DB being read from other goroutines.
+type MemDB struct {
+ schema *DBSchema
+ root unsafe.Pointer // *iradix.Tree underneath
+ primary bool
+
+ // There can only be a single writer at once
+ writer sync.Mutex
+}
+
+// NewMemDB creates a new MemDB with the given schema.
+func NewMemDB(schema *DBSchema) (*MemDB, error) {
+ // Validate the schema
+ if err := schema.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Create the MemDB
+ db := &MemDB{
+ schema: schema,
+ root: unsafe.Pointer(iradix.New()),
+ primary: true,
+ }
+ if err := db.initialize(); err != nil {
+ return nil, err
+ }
+
+ return db, nil
+}
+
+// DBSchema returns schema in use for introspection.
+//
+// The method is intended for *read-only* debugging use cases,
+// returned schema should *never be modified in-place*.
+func (db *MemDB) DBSchema() *DBSchema {
+ return db.schema
+}
+
+// getRoot is used to do an atomic load of the root pointer
+func (db *MemDB) getRoot() *iradix.Tree {
+ root := (*iradix.Tree)(atomic.LoadPointer(&db.root))
+ return root
+}
+
+// Txn is used to start a new transaction in either read or write mode.
+// There can only be a single concurrent writer, but any number of readers.
+func (db *MemDB) Txn(write bool) *Txn {
+ if write {
+ db.writer.Lock()
+ }
+ txn := &Txn{
+ db: db,
+ write: write,
+ rootTxn: db.getRoot().Txn(),
+ }
+ return txn
+}
+
+// Snapshot is used to capture a point-in-time snapshot of the database that
+// will not be affected by any write operations to the existing DB.
+//
+// If MemDB is storing reference-based values (pointers, maps, slices, etc.),
+// the Snapshot will not deep copy those values. Therefore, it is still unsafe
+// to modify any inserted values in either DB.
+func (db *MemDB) Snapshot() *MemDB {
+ clone := &MemDB{
+ schema: db.schema,
+ root: unsafe.Pointer(db.getRoot()),
+ primary: false,
+ }
+ return clone
+}
+
+// initialize is used to setup the DB for use after creation. This should
+// be called only once after allocating a MemDB.
+func (db *MemDB) initialize() error {
+ root := db.getRoot()
+ for tName, tableSchema := range db.schema.Tables {
+ for iName := range tableSchema.Indexes {
+ index := iradix.New()
+ path := indexPath(tName, iName)
+ root, _, _ = root.Insert(path, index)
+ }
+ }
+ db.root = unsafe.Pointer(root)
+ return nil
+}
+
+// indexPath returns the path from the root to the given table index
+func indexPath(table, index string) []byte {
+ return []byte(table + "." + index)
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/schema.go b/vendor/github.com/hashicorp/go-memdb/schema.go
new file mode 100644
index 0000000..2d66f99
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/schema.go
@@ -0,0 +1,117 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+import "fmt"
+
+// DBSchema is the schema to use for the full database with a MemDB instance.
+//
+// MemDB will require a valid schema. Schema validation can be tested using
+// the Validate function. Calling this function is recommended in unit tests.
+type DBSchema struct {
+ // Tables is the set of tables within this database. The key is the
+ // table name and must match the Name in TableSchema.
+ Tables map[string]*TableSchema
+}
+
+// Validate validates the schema.
+func (s *DBSchema) Validate() error {
+ if s == nil {
+ return fmt.Errorf("schema is nil")
+ }
+
+ if len(s.Tables) == 0 {
+ return fmt.Errorf("schema has no tables defined")
+ }
+
+ for name, table := range s.Tables {
+ if name != table.Name {
+ return fmt.Errorf("table name mis-match for '%s'", name)
+ }
+
+ if err := table.Validate(); err != nil {
+ return fmt.Errorf("table %q: %s", name, err)
+ }
+ }
+
+ return nil
+}
+
+// TableSchema is the schema for a single table.
+type TableSchema struct {
+ // Name of the table. This must match the key in the Tables map in DBSchema.
+ Name string
+
+ // Indexes is the set of indexes for querying this table. The key
+ // is a unique name for the index and must match the Name in the
+ // IndexSchema.
+ Indexes map[string]*IndexSchema
+}
+
+// Validate is used to validate the table schema
+func (s *TableSchema) Validate() error {
+ if s.Name == "" {
+ return fmt.Errorf("missing table name")
+ }
+
+ if len(s.Indexes) == 0 {
+ return fmt.Errorf("missing table indexes for '%s'", s.Name)
+ }
+
+ if _, ok := s.Indexes["id"]; !ok {
+ return fmt.Errorf("must have id index")
+ }
+
+ if !s.Indexes["id"].Unique {
+ return fmt.Errorf("id index must be unique")
+ }
+
+ if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok {
+ return fmt.Errorf("id index must be a SingleIndexer")
+ }
+
+ for name, index := range s.Indexes {
+ if name != index.Name {
+ return fmt.Errorf("index name mis-match for '%s'", name)
+ }
+
+ if err := index.Validate(); err != nil {
+ return fmt.Errorf("index %q: %s", name, err)
+ }
+ }
+
+ return nil
+}
+
+// IndexSchema is the schema for an index. An index defines how a table is
+// queried.
+type IndexSchema struct {
+ // Name of the index. This must be unique among a tables set of indexes.
+ // This must match the key in the map of Indexes for a TableSchema.
+ Name string
+
+ // AllowMissing if true ignores this index if it doesn't produce a
+ // value. For example, an index that extracts a field that doesn't
+ // exist from a structure.
+ AllowMissing bool
+
+ Unique bool
+ Indexer Indexer
+}
+
+func (s *IndexSchema) Validate() error {
+ if s.Name == "" {
+ return fmt.Errorf("missing index name")
+ }
+ if s.Indexer == nil {
+ return fmt.Errorf("missing index function for '%s'", s.Name)
+ }
+ switch s.Indexer.(type) {
+ case SingleIndexer:
+ case MultiIndexer:
+ default:
+ return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name)
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/hashicorp/go-memdb/txn.go
new file mode 100644
index 0000000..f83f4fa
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/txn.go
@@ -0,0 +1,1024 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync/atomic"
+ "unsafe"
+
+ iradix "github.com/hashicorp/go-immutable-radix"
+)
+
+const (
+ id = "id"
+)
+
+var (
+ // ErrNotFound is returned when the requested item is not found
+ ErrNotFound = fmt.Errorf("not found")
+)
+
+// tableIndex is a tuple of (Table, Index) used for lookups
+type tableIndex struct {
+ Table string
+ Index string
+}
+
+// Txn is a transaction against a MemDB.
+// This can be a read or write transaction.
+type Txn struct {
+ db *MemDB
+ write bool
+ rootTxn *iradix.Txn
+ after []func()
+
+ // changes is used to track the changes performed during the transaction. If
+ // it is nil at transaction start then changes are not tracked.
+ changes Changes
+
+ modified map[tableIndex]*iradix.Txn
+}
+
+// TrackChanges enables change tracking for the transaction. If called at any
+// point before commit, subsequent mutations will be recorded and can be
+// retrieved using ChangeSet. Once this has been called on a transaction it
+// can't be unset. As with other Txn methods it's not safe to call this from a
+// different goroutine than the one making mutations or committing the
+// transaction.
+func (txn *Txn) TrackChanges() {
+ if txn.changes == nil {
+ txn.changes = make(Changes, 0, 1)
+ }
+}
+
+// readableIndex returns a transaction usable for reading the given index in a
+// table. If the transaction is a write transaction with modifications, a clone of the
+// modified index will be returned.
+func (txn *Txn) readableIndex(table, index string) *iradix.Txn {
+ // Look for existing transaction
+ if txn.write && txn.modified != nil {
+ key := tableIndex{table, index}
+ exist, ok := txn.modified[key]
+ if ok {
+ return exist.Clone()
+ }
+ }
+
+ // Create a read transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+ return indexTxn
+}
+
+// writableIndex returns a transaction usable for modifying the
+// given index in a table.
+func (txn *Txn) writableIndex(table, index string) *iradix.Txn {
+ if txn.modified == nil {
+ txn.modified = make(map[tableIndex]*iradix.Txn)
+ }
+
+ // Look for existing transaction
+ key := tableIndex{table, index}
+ exist, ok := txn.modified[key]
+ if ok {
+ return exist
+ }
+
+ // Start a new transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+
+ // If we are the primary DB, enable mutation tracking. Snapshots should
+ // not notify, otherwise we will trigger watches on the primary DB when
+ // the writes will not be visible.
+ indexTxn.TrackMutate(txn.db.primary)
+
+ // Keep this open for the duration of the txn
+ txn.modified[key] = indexTxn
+ return indexTxn
+}
+
+// Abort is used to cancel this transaction.
+// This is a noop for read transactions,
+// already aborted or commited transactions.
+func (txn *Txn) Abort() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+ txn.changes = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+}
+
+// Commit is used to finalize this transaction.
+// This is a noop for read transactions,
+// already aborted or committed transactions.
+func (txn *Txn) Commit() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Commit each sub-transaction scoped to (table, index)
+ for key, subTxn := range txn.modified {
+ path := indexPath(key.Table, key.Index)
+ final := subTxn.CommitOnly()
+ txn.rootTxn.Insert(path, final)
+ }
+
+ // Update the root of the DB
+ newRoot := txn.rootTxn.CommitOnly()
+ atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot))
+
+ // Now issue all of the mutation updates (this is safe to call
+ // even if mutation tracking isn't enabled); we do this after
+ // the root pointer is swapped so that waking responders will
+ // see the new state.
+ for _, subTxn := range txn.modified {
+ subTxn.Notify()
+ }
+ txn.rootTxn.Notify()
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+
+ // Run the deferred functions, if any
+ for i := len(txn.after); i > 0; i-- {
+ fn := txn.after[i-1]
+ fn()
+ }
+}
+
+// Insert is used to add or update an object into the given table.
+//
+// When updating an object, the obj provided should be a copy rather
+// than a value updated in-place. Modifying values in-place that are already
+// inserted into MemDB is not supported behavior.
+func (txn *Txn) Insert(table string, obj interface{}) error {
+ if !txn.write {
+ return fmt.Errorf("cannot insert in read-only transaction")
+ }
+
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return fmt.Errorf("object missing primary index")
+ }
+
+ // Lookup the object by ID first, to see if this is an update
+ idTxn := txn.writableIndex(table, id)
+ existing, update := idTxn.Get(idVal)
+
+ // On an update, there is an existing object with the given
+ // primary ID. We do the update by deleting the current object
+ // and inserting the new object.
+ for name, indexSchema := range tableSchema.Indexes {
+ indexTxn := txn.writableIndex(table, name)
+
+ // Determine the new index value
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(obj)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(obj)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if ok && !indexSchema.Unique {
+ for i := range vals {
+ vals[i] = append(vals[i], idVal...)
+ }
+ }
+
+ // Handle the update by deleting from the index first
+ if update {
+ var (
+ okExist bool
+ valsExist [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var valExist []byte
+ okExist, valExist, err = indexer.FromObject(existing)
+ valsExist = [][]byte{valExist}
+ case MultiIndexer:
+ okExist, valsExist, err = indexer.FromObject(existing)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+ if okExist {
+ for i, valExist := range valsExist {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if !indexSchema.Unique {
+ valExist = append(valExist, idVal...)
+ }
+
+ // If we are writing to the same index with the same value,
+ // we can avoid the delete as the insert will overwrite the
+ // value anyways.
+ if i >= len(vals) || !bytes.Equal(valExist, vals[i]) {
+ indexTxn.Delete(valExist)
+ }
+ }
+ }
+ }
+
+ // If there is no index value, either this is an error or an expected
+ // case and we can skip updating
+ if !ok {
+ if indexSchema.AllowMissing {
+ continue
+ } else {
+ return fmt.Errorf("missing value for index '%s'", name)
+ }
+ }
+
+ // Update the value of the index
+ for _, val := range vals {
+ indexTxn.Insert(val, obj)
+ }
+ }
+ if txn.changes != nil {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing, // might be nil on a create
+ After: obj,
+ primaryKey: idVal,
+ })
+ }
+ return nil
+}
+
+// Delete is used to delete a single object from the given table.
+// This object must already exist in the table.
+func (txn *Txn) Delete(table string, obj interface{}) error {
+ if !txn.write {
+ return fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return fmt.Errorf("object missing primary index")
+ }
+
+ // Lookup the object by ID first, check if we should continue
+ idTxn := txn.writableIndex(table, id)
+ existing, ok := idTxn.Get(idVal)
+ if !ok {
+ return ErrNotFound
+ }
+
+ // Remove the object from all the indexes
+ for name, indexSchema := range tableSchema.Indexes {
+ indexTxn := txn.writableIndex(table, name)
+
+ // Handle the update by deleting from the index first
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(existing)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(existing)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+ if ok {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ for _, val := range vals {
+ if !indexSchema.Unique {
+ val = append(val, idVal...)
+ }
+ indexTxn.Delete(val)
+ }
+ }
+ }
+ if txn.changes != nil {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing,
+ After: nil, // Now nil indicates deletion
+ primaryKey: idVal,
+ })
+ }
+ return nil
+}
+
+// DeletePrefix is used to delete an entire subtree based on a prefix.
+// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete.
+// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation.
+// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects.
+func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) {
+ if !txn.write {
+ return false, fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ if !strings.HasSuffix(prefix_index, "_prefix") {
+ return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index)
+ }
+
+ deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix")
+
+ // Get an iterator over all of the keys with the given prefix.
+ entries, err := txn.Get(table, prefix_index, prefix)
+ if err != nil {
+ return false, fmt.Errorf("failed kvs lookup: %s", err)
+ }
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return false, fmt.Errorf("invalid table '%s'", table)
+ }
+
+ foundAny := false
+ for entry := entries.Next(); entry != nil; entry = entries.Next() {
+ if !foundAny {
+ foundAny = true
+ }
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(entry)
+ if err != nil {
+ return false, fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return false, fmt.Errorf("object missing primary index")
+ }
+ if txn.changes != nil {
+ // Record the deletion
+ idTxn := txn.writableIndex(table, id)
+ existing, ok := idTxn.Get(idVal)
+ if ok {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing,
+ After: nil, // Now nil indicates deletion
+ primaryKey: idVal,
+ })
+ }
+ }
+ // Remove the object from all the indexes except the given prefix index
+ for name, indexSchema := range tableSchema.Indexes {
+ if name == deletePrefixIndex {
+ continue
+ }
+ indexTxn := txn.writableIndex(table, name)
+
+ // Handle the update by deleting from the index first
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(entry)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(entry)
+ }
+ if err != nil {
+ return false, fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+
+ if ok {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ for _, val := range vals {
+ if !indexSchema.Unique {
+ val = append(val, idVal...)
+ }
+ indexTxn.Delete(val)
+ }
+ }
+ }
+
+ }
+ if foundAny {
+ indexTxn := txn.writableIndex(table, deletePrefixIndex)
+ ok = indexTxn.DeletePrefix([]byte(prefix))
+ if !ok {
+ panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix))
+ }
+ return true, nil
+ }
+ return false, nil
+}
+
+// DeleteAll is used to delete all the objects in a given table
+// matching the constraints on the index
+func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) {
+ if !txn.write {
+ return 0, fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get all the objects
+ iter, err := txn.Get(table, index, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ // Put them into a slice so there are no safety concerns while actually
+ // performing the deletes
+ var objs []interface{}
+ for {
+ obj := iter.Next()
+ if obj == nil {
+ break
+ }
+
+ objs = append(objs, obj)
+ }
+
+ // Do the deletes
+ num := 0
+ for _, obj := range objs {
+ if err := txn.Delete(table, obj); err != nil {
+ return num, err
+ }
+ num++
+ }
+ return num, nil
+}
+
+// FirstWatch is used to return the first matching object for
+// the given constraints on the index along with the watch channel.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+//
+// The watch channel is closed when a subsequent write transaction
+// has updated the result of the query. Since each read transaction
+// operates on an isolated snapshot, a new read transaction must be
+// started to observe the changes that have been made.
+//
+// If the value of index ends with "_prefix", FirstWatch will perform a prefix
+// match instead of full match on the index. The registered indexer must implement
+// PrefixIndexer, otherwise an error is returned.
+func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
+ // Get the index value
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+
+ // Do an exact lookup
+ if indexSchema.Unique && val != nil && indexSchema.Name == index {
+ watch, obj, ok := indexTxn.GetWatch(val)
+ if !ok {
+ return watch, nil, nil
+ }
+ return watch, obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the first value
+ iter := indexTxn.Root().Iterator()
+ watch := iter.SeekPrefixWatch(val)
+ _, value, _ := iter.Next()
+ return watch, value, nil
+}
+
+// LastWatch is used to return the last matching object for
+// the given constraints on the index along with the watch channel.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+//
+// The watch channel is closed when a subsequent write transaction
+// has updated the result of the query. Since each read transaction
+// operates on an isolated snapshot, a new read transaction must be
+// started to observe the changes that have been made.
+//
+// If the value of index ends with "_prefix", LastWatch will perform a prefix
+// match instead of full match on the index. The registered indexer must implement
+// PrefixIndexer, otherwise an error is returned.
+func (txn *Txn) LastWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
+ // Get the index value
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+
+ // Do an exact lookup
+ if indexSchema.Unique && val != nil && indexSchema.Name == index {
+ watch, obj, ok := indexTxn.GetWatch(val)
+ if !ok {
+ return watch, nil, nil
+ }
+ return watch, obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the last value
+ iter := indexTxn.Root().ReverseIterator()
+ watch := iter.SeekPrefixWatch(val)
+ _, value, _ := iter.Previous()
+ return watch, value, nil
+}
+
+// First is used to return the first matching object for
+// the given constraints on the index.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) {
+ _, val, err := txn.FirstWatch(table, index, args...)
+ return val, err
+}
+
+// Last is used to return the last matching object for
+// the given constraints on the index.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) Last(table, index string, args ...interface{}) (interface{}, error) {
+ _, val, err := txn.LastWatch(table, index, args...)
+ return val, err
+}
+
+// LongestPrefix is used to fetch the longest prefix match for the given
+// constraints on the index. Note that this will not work with the memdb
+// StringFieldIndex because it adds null terminators which prevent the
+// algorithm from correctly finding a match (it will get to right before the
+// null and fail to find a leaf node). This should only be used where the prefix
+// given is capable of matching indexed entries directly, which typically only
+// applies to a custom indexer. See the unit test for an example.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) {
+ // Enforce that this only works on prefix indexes.
+ if !strings.HasSuffix(index, "_prefix") {
+ return nil, fmt.Errorf("must use '%s_prefix' on index", index)
+ }
+
+ // Get the index value.
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // This algorithm only makes sense against a unique index, otherwise the
+ // index keys will have the IDs appended to them.
+ if !indexSchema.Unique {
+ return nil, fmt.Errorf("index '%s' is not unique", index)
+ }
+
+ // Find the longest prefix match with the given index.
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ if _, value, ok := indexTxn.Root().LongestPrefix(val); ok {
+ return value, nil
+ }
+ return nil, nil
+}
+
+// getIndexValue is used to get the IndexSchema and the value
+// used to scan the index given the parameters. This handles prefix based
+// scans when the index has the "_prefix" suffix. The index must support
+// prefix iteration.
+func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) {
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Check for a prefix scan
+ prefixScan := false
+ if strings.HasSuffix(index, "_prefix") {
+ index = strings.TrimSuffix(index, "_prefix")
+ prefixScan = true
+ }
+
+ // Get the index schema
+ indexSchema, ok := tableSchema.Indexes[index]
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid index '%s'", index)
+ }
+
+ // Hot-path for when there are no arguments
+ if len(args) == 0 {
+ return indexSchema, nil, nil
+ }
+
+ // Special case the prefix scanning
+ if prefixScan {
+ prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer)
+ if !ok {
+ return indexSchema, nil,
+ fmt.Errorf("index '%s' does not support prefix scanning", index)
+ }
+
+ val, err := prefixIndexer.PrefixFromArgs(args...)
+ if err != nil {
+ return indexSchema, nil, fmt.Errorf("index error: %v", err)
+ }
+ return indexSchema, val, err
+ }
+
+ // Get the exact match index
+ val, err := indexSchema.Indexer.FromArgs(args...)
+ if err != nil {
+ return indexSchema, nil, fmt.Errorf("index error: %v", err)
+ }
+ return indexSchema, val, err
+}
+
+// ResultIterator is used to iterate over a list of results from a query on a table.
+//
+// When a ResultIterator is created from a write transaction, the results from
+// Next will reflect a snapshot of the table at the time the ResultIterator is
+// created.
+// This means that calling Insert or Delete on a transaction while iterating is
+// allowed, but the changes made by Insert or Delete will not be observed in the
+// results returned from subsequent calls to Next. For example if an item is deleted
+// from the index used by the iterator it will still be returned by Next. If an
+// item is inserted into the index used by the iterator, it will not be returned
+// by Next. However, an iterator created after a call to Insert or Delete will
+// reflect the modifications.
+//
+// When a ResultIterator is created from a write transaction, and there are already
+// modifications to the index used by the iterator, the modification cache of the
+// index will be invalidated. This may result in some additional allocations if
+// the same node in the index is modified again.
+type ResultIterator interface {
+ WatchCh() <-chan struct{}
+ // Next returns the next result from the iterator. If there are no more results
+ // nil is returned.
+ Next() interface{}
+}
+
+// Get is used to construct a ResultIterator over all the rows that match the
+// given constraints of an index. The index values must match exactly (this
+// is not a range-based or prefix-based lookup) by default.
+//
+// Prefix lookups: if the named index implements PrefixIndexer, you may perform
+// prefix-based lookups by appending "_prefix" to the index name. In this
+// scenario, the index values given in args are treated as prefix lookups. For
+// example, a StringFieldIndex will match any string with the given value
+// as a prefix: "mem" matches "memdb".
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIterator(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ watchCh := indexIter.SeekPrefixWatch(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ watchCh: watchCh,
+ }
+ return iter, nil
+}
+
+// GetReverse is used to construct a Reverse ResultIterator over all the
+// rows that match the given constraints of an index.
+// The returned ResultIterator's Next() will return the next Previous value.
+//
+// See the documentation on Get for details on arguments.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) GetReverse(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ watchCh := indexIter.SeekPrefixWatch(val)
+
+ // Create an iterator
+ iter := &radixReverseIterator{
+ iter: indexIter,
+ watchCh: watchCh,
+ }
+ return iter, nil
+}
+
+// LowerBound is used to construct a ResultIterator over all the the range of
+// rows that have an index value greater than or equal to the provide args.
+// Calling this then iterating until the rows are larger than required allows
+// range scans within an index. It is not possible to watch the resulting
+// iterator since the radix tree doesn't efficiently allow watching on lower
+// bound changes. The WatchCh returned will be nill and so will block forever.
+//
+// If the value of index ends with "_prefix", LowerBound will perform a prefix match instead of
+// a full match on the index. The registered index must implement PrefixIndexer,
+// otherwise an error is returned.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIterator(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekLowerBound(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+// ReverseLowerBound is used to construct a Reverse ResultIterator over all the
+// the range of rows that have an index value less than or equal to the
+// provide args. Calling this then iterating until the rows are lower than
+// required allows range scans within an index. It is not possible to watch the
+// resulting iterator since the radix tree doesn't efficiently allow watching
+// on lower bound changes. The WatchCh returned will be nill and so will block
+// forever.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) ReverseLowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekReverseLowerBound(val)
+
+ // Create an iterator
+ iter := &radixReverseIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+// objectID is a tuple of table name and the raw internal id byte slice
+// converted to a string. It's only converted to a string to make it comparable
+// so this struct can be used as a map index.
+type objectID struct {
+ Table string
+ IndexVal string
+}
+
+// mutInfo stores metadata about mutations to allow collapsing multiple
+// mutations to the same object into one.
+type mutInfo struct {
+ firstBefore interface{}
+ lastIdx int
+}
+
+// Changes returns the set of object changes that have been made in the
+// transaction so far. If change tracking is not enabled it wil always return
+// nil. It can be called before or after Commit. If it is before Commit it will
+// return all changes made so far which may not be the same as the final
+// Changes. After abort it will always return nil. As with other Txn methods
+// it's not safe to call this from a different goroutine than the one making
+// mutations or committing the transaction. Mutations will appear in the order
+// they were performed in the transaction but multiple operations to the same
+// object will be collapsed so only the effective overall change to that object
+// is present. If transaction operations are dependent (e.g. copy object X to Y
+// then delete X) this might mean the set of mutations is incomplete to verify
+// history, but it is complete in that the net effect is preserved (Y got a new
+// value, X got removed).
+func (txn *Txn) Changes() Changes {
+ if txn.changes == nil {
+ return nil
+ }
+
+ // De-duplicate mutations by key so all take effect at the point of the last
+ // write but we keep the mutations in order.
+ dups := make(map[objectID]mutInfo)
+ for i, m := range txn.changes {
+ oid := objectID{
+ Table: m.Table,
+ IndexVal: string(m.primaryKey),
+ }
+ // Store the latest mutation index for each key value
+ mi, ok := dups[oid]
+ if !ok {
+ // First entry for key, store the before value
+ mi.firstBefore = m.Before
+ }
+ mi.lastIdx = i
+ dups[oid] = mi
+ }
+ if len(dups) == len(txn.changes) {
+ // No duplicates found, fast path return it as is
+ return txn.changes
+ }
+
+ // Need to remove the duplicates
+ cs := make(Changes, 0, len(dups))
+ for i, m := range txn.changes {
+ oid := objectID{
+ Table: m.Table,
+ IndexVal: string(m.primaryKey),
+ }
+ mi := dups[oid]
+ if mi.lastIdx == i {
+ // This was the latest value for this key copy it with the before value in
+ // case it's different. Note that m is not a pointer so we are not
+ // modifying the txn.changeSet here - it's already a copy.
+ m.Before = mi.firstBefore
+
+ // Edge case - if the object was inserted and then eventually deleted in
+ // the same transaction, then the net affect on that key is a no-op. Don't
+ // emit a mutation with nil for before and after as it's meaningless and
+ // might violate expectations and cause a panic in code that assumes at
+ // least one must be set.
+ if m.Before == nil && m.After == nil {
+ continue
+ }
+ cs = append(cs, m)
+ }
+ }
+ // Store the de-duped version in case this is called again
+ txn.changes = cs
+ return cs
+}
+
+func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*iradix.Iterator, []byte, error) {
+ // Get the index value to scan
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ indexRoot := indexTxn.Root()
+
+ // Get an iterator over the index
+ indexIter := indexRoot.Iterator()
+ return indexIter, val, nil
+}
+
+func (txn *Txn) getIndexIteratorReverse(table, index string, args ...interface{}) (*iradix.ReverseIterator, []byte, error) {
+ // Get the index value to scan
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ indexRoot := indexTxn.Root()
+
+ // Get an interator over the index
+ indexIter := indexRoot.ReverseIterator()
+ return indexIter, val, nil
+}
+
+// Defer is used to push a new arbitrary function onto a stack which
+// gets called when a transaction is committed and finished. Deferred
+// functions are called in LIFO order, and only invoked at the end of
+// write transactions.
+func (txn *Txn) Defer(fn func()) {
+ txn.after = append(txn.after, fn)
+}
+
+// radixIterator is used to wrap an underlying iradix iterator.
+// This is much more efficient than a sliceIterator as we are not
+// materializing the entire view.
+type radixIterator struct {
+ iter *iradix.Iterator
+ watchCh <-chan struct{}
+}
+
+func (r *radixIterator) WatchCh() <-chan struct{} {
+ return r.watchCh
+}
+
+func (r *radixIterator) Next() interface{} {
+ _, value, ok := r.iter.Next()
+ if !ok {
+ return nil
+ }
+ return value
+}
+
+type radixReverseIterator struct {
+ iter *iradix.ReverseIterator
+ watchCh <-chan struct{}
+}
+
+func (r *radixReverseIterator) Next() interface{} {
+ _, value, ok := r.iter.Previous()
+ if !ok {
+ return nil
+ }
+ return value
+}
+
+func (r *radixReverseIterator) WatchCh() <-chan struct{} {
+ return r.watchCh
+}
+
+// Snapshot creates a snapshot of the current state of the transaction.
+// Returns a new read-only transaction or nil if the transaction is already
+// aborted or committed.
+func (txn *Txn) Snapshot() *Txn {
+ if txn.rootTxn == nil {
+ return nil
+ }
+
+ snapshot := &Txn{
+ db: txn.db,
+ rootTxn: txn.rootTxn.Clone(),
+ }
+
+ // Commit sub-transactions into the snapshot
+ for key, subTxn := range txn.modified {
+ path := indexPath(key.Table, key.Index)
+ final := subTxn.CommitOnly()
+ snapshot.rootTxn.Insert(path, final)
+ }
+
+ return snapshot
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/hashicorp/go-memdb/watch.go
new file mode 100644
index 0000000..4d9cc7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/watch.go
@@ -0,0 +1,155 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+import (
+ "context"
+ "time"
+)
+
+// WatchSet is a collection of watch channels. The zero value is not usable.
+// Use NewWatchSet to create a WatchSet.
+type WatchSet map[<-chan struct{}]struct{}
+
+// NewWatchSet constructs a new watch set.
+func NewWatchSet() WatchSet {
+ return make(map[<-chan struct{}]struct{})
+}
+
+// Add appends a watchCh to the WatchSet if non-nil.
+func (w WatchSet) Add(watchCh <-chan struct{}) {
+ if w == nil {
+ return
+ }
+
+ if _, ok := w[watchCh]; !ok {
+ w[watchCh] = struct{}{}
+ }
+}
+
+// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given
+// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate
+// channel. It's expected that the altCh will be the same on many calls to this
+// function, so you will exceed the soft limit a little bit if you hit this, but
+// not by much.
+//
+// This is useful if you want to track individual items up to some limit, after
+// which you watch a higher-level channel (usually a channel from start of
+// an iterator higher up in the radix tree) that will watch a superset of items.
+func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) {
+ // This is safe for a nil WatchSet so we don't need to check that here.
+ if len(w) < softLimit {
+ w.Add(watchCh)
+ } else {
+ w.Add(altCh)
+ }
+}
+
+// Watch blocks until one of the channels in the watch set is closed, or
+// timeoutCh sends a value.
+// Returns true if timeoutCh is what caused Watch to unblock.
+func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool {
+ if w == nil {
+ return false
+ }
+
+ // Create a context that gets cancelled when the timeout is triggered
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ go func() {
+ select {
+ case <-timeoutCh:
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+
+ return w.WatchCtx(ctx) == context.Canceled
+}
+
+// WatchCtx blocks until one of the channels in the watch set is closed, or
+// ctx is done (cancelled or exceeds the deadline). WatchCtx returns an error
+// if the ctx causes it to unblock, otherwise returns nil.
+//
+// WatchCtx should be preferred over Watch.
+func (w WatchSet) WatchCtx(ctx context.Context) error {
+ if w == nil {
+ return nil
+ }
+
+ if n := len(w); n <= aFew {
+ idx := 0
+ chunk := make([]<-chan struct{}, aFew)
+ for watchCh := range w {
+ chunk[idx] = watchCh
+ idx++
+ }
+ return watchFew(ctx, chunk)
+ }
+
+ return w.watchMany(ctx)
+}
+
+// watchMany is used if there are many watchers.
+func (w WatchSet) watchMany(ctx context.Context) error {
+ // Cancel all watcher goroutines when return.
+ watcherCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Set up a goroutine for each watcher.
+ triggerCh := make(chan struct{}, 1)
+ watcher := func(chunk []<-chan struct{}) {
+ if err := watchFew(watcherCtx, chunk); err == nil {
+ select {
+ case triggerCh <- struct{}{}:
+ default:
+ }
+ }
+ }
+
+ // Apportion the watch channels into chunks we can feed into the
+ // watchFew helper.
+ idx := 0
+ chunk := make([]<-chan struct{}, aFew)
+ for watchCh := range w {
+ subIdx := idx % aFew
+ chunk[subIdx] = watchCh
+ idx++
+
+ // Fire off this chunk and start a fresh one.
+ if idx%aFew == 0 {
+ go watcher(chunk)
+ chunk = make([]<-chan struct{}, aFew)
+ }
+ }
+
+ // Make sure to watch any residual channels in the last chunk.
+ if idx%aFew != 0 {
+ go watcher(chunk)
+ }
+
+ // Wait for a channel to trigger or timeout.
+ select {
+ case <-triggerCh:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// WatchCh returns a channel that is used to wait for any channel of the watch set to trigger
+// or for the context to be cancelled. WatchCh creates a new goroutine each call, so
+// callers may need to cache the returned channel to avoid creating extra goroutines.
+func (w WatchSet) WatchCh(ctx context.Context) <-chan error {
+ // Create the outgoing channel
+ triggerCh := make(chan error, 1)
+
+ // Create a goroutine to collect the error from WatchCtx
+ go func() {
+ triggerCh <- w.WatchCtx(ctx)
+ }()
+
+ return triggerCh
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/watch_few.go b/vendor/github.com/hashicorp/go-memdb/watch_few.go
new file mode 100644
index 0000000..ccdbff0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/watch_few.go
@@ -0,0 +1,120 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+//go:generate sh -c "go run watch-gen/main.go >watch_few.go"
+
+import (
+ "context"
+)
+
+// aFew gives how many watchers this function is wired to support. You must
+// always pass a full slice of this length, but unused channels can be nil.
+const aFew = 32
+
+// watchFew is used if there are only a few watchers as a performance
+// optimization.
+func watchFew(ctx context.Context, ch []<-chan struct{}) error {
+ select {
+
+ case <-ch[0]:
+ return nil
+
+ case <-ch[1]:
+ return nil
+
+ case <-ch[2]:
+ return nil
+
+ case <-ch[3]:
+ return nil
+
+ case <-ch[4]:
+ return nil
+
+ case <-ch[5]:
+ return nil
+
+ case <-ch[6]:
+ return nil
+
+ case <-ch[7]:
+ return nil
+
+ case <-ch[8]:
+ return nil
+
+ case <-ch[9]:
+ return nil
+
+ case <-ch[10]:
+ return nil
+
+ case <-ch[11]:
+ return nil
+
+ case <-ch[12]:
+ return nil
+
+ case <-ch[13]:
+ return nil
+
+ case <-ch[14]:
+ return nil
+
+ case <-ch[15]:
+ return nil
+
+ case <-ch[16]:
+ return nil
+
+ case <-ch[17]:
+ return nil
+
+ case <-ch[18]:
+ return nil
+
+ case <-ch[19]:
+ return nil
+
+ case <-ch[20]:
+ return nil
+
+ case <-ch[21]:
+ return nil
+
+ case <-ch[22]:
+ return nil
+
+ case <-ch[23]:
+ return nil
+
+ case <-ch[24]:
+ return nil
+
+ case <-ch[25]:
+ return nil
+
+ case <-ch[26]:
+ return nil
+
+ case <-ch[27]:
+ return nil
+
+ case <-ch[28]:
+ return nil
+
+ case <-ch[29]:
+ return nil
+
+ case <-ch[30]:
+ return nil
+
+ case <-ch[31]:
+ return nil
+
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}