summaryrefslogtreecommitdiff
path: root/vendor/github.com/hashicorp
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-07-24 17:58:01 -0600
committermo khan <mo@mokhan.ca>2025-07-24 17:58:01 -0600
commit72296119fc9755774719f8f625ad03e0e0ec457a (patch)
treeed236ddee12a20fb55b7cfecf13f62d3a000dcb5 /vendor/github.com/hashicorp
parenta920a8cfe415858bb2777371a77018599ffed23f (diff)
parenteaa1bd3b8e12934aed06413d75e7482ac58d805a (diff)
Merge branch 'the-spice-must-flow' into 'main'
Add SpiceDB Authorization See merge request gitlab-org/software-supply-chain-security/authorization/sparkled!19
Diffstat (limited to 'vendor/github.com/hashicorp')
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/.gitignore24
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md23
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/LICENSE363
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/README.md66
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/edges.go21
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/iradix.go676
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/iter.go205
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/node.go334
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go78
-rw-r--r--vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go239
-rw-r--r--vendor/github.com/hashicorp/go-memdb/.gitignore26
-rw-r--r--vendor/github.com/hashicorp/go-memdb/CODEOWNERS13
-rw-r--r--vendor/github.com/hashicorp/go-memdb/LICENSE365
-rw-r--r--vendor/github.com/hashicorp/go-memdb/README.md146
-rw-r--r--vendor/github.com/hashicorp/go-memdb/changes.go37
-rw-r--r--vendor/github.com/hashicorp/go-memdb/filter.go41
-rw-r--r--vendor/github.com/hashicorp/go-memdb/index.go934
-rw-r--r--vendor/github.com/hashicorp/go-memdb/memdb.go119
-rw-r--r--vendor/github.com/hashicorp/go-memdb/schema.go117
-rw-r--r--vendor/github.com/hashicorp/go-memdb/txn.go1024
-rw-r--r--vendor/github.com/hashicorp/go-memdb/watch.go155
-rw-r--r--vendor/github.com/hashicorp/go-memdb/watch_few.go120
-rw-r--r--vendor/github.com/hashicorp/golang-lru/LICENSE364
-rw-r--r--vendor/github.com/hashicorp/golang-lru/simplelru/lru.go177
-rw-r--r--vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go40
-rw-r--r--vendor/github.com/hashicorp/hcl/.gitignore9
-rw-r--r--vendor/github.com/hashicorp/hcl/.travis.yml13
-rw-r--r--vendor/github.com/hashicorp/hcl/LICENSE354
-rw-r--r--vendor/github.com/hashicorp/hcl/Makefile18
-rw-r--r--vendor/github.com/hashicorp/hcl/README.md125
-rw-r--r--vendor/github.com/hashicorp/hcl/appveyor.yml19
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder.go729
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl.go11
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/ast/ast.go219
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/ast/walk.go52
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/error.go17
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/parser.go532
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go789
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/printer/printer.go66
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go652
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go241
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/token/token.go219
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/flatten.go117
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/parser.go313
-rw-r--r--vendor/github.com/hashicorp/hcl/json/scanner/scanner.go451
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/position.go46
-rw-r--r--vendor/github.com/hashicorp/hcl/json/token/token.go118
-rw-r--r--vendor/github.com/hashicorp/hcl/lex.go38
-rw-r--r--vendor/github.com/hashicorp/hcl/parse.go39
50 files changed, 10940 insertions, 0 deletions
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
new file mode 100644
index 0000000..86c6d03
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
@@ -0,0 +1,23 @@
+# UNRELEASED
+
+# 1.3.0 (September 17th, 2020)
+
+FEATURES
+
+* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)]
+
+# 1.2.0 (March 18th, 2020)
+
+FEATURES
+
+* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)]
+
+# 1.1.0 (May 22nd, 2019)
+
+FEATURES
+
+* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
+
+# 1.0.0 (August 30th, 2018)
+
+* go mod adopted
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md
new file mode 100644
index 0000000..aca15a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md
@@ -0,0 +1,66 @@
+go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
+=========
+
+Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
+The package only provides a single `Tree` implementation, optimized for sparse nodes.
+
+As a radix tree, it provides the following:
+ * O(k) operations. In many cases, this can be faster than a hash table since
+ the hash function is an O(k) operation, and hash tables have very poor cache locality.
+ * Minimum / Maximum value lookups
+ * Ordered iteration
+
+A tree supports using a transaction to batch multiple updates (insert, delete)
+in a more efficient manner than performing each operation one at a time.
+
+For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
+
+Example
+=======
+
+Below is a simple example of usage
+
+```go
+// Create a tree
+r := iradix.New()
+r, _, _ = r.Insert([]byte("foo"), 1)
+r, _, _ = r.Insert([]byte("bar"), 2)
+r, _, _ = r.Insert([]byte("foobar"), 2)
+
+// Find the longest prefix match
+m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
+if string(m) != "foo" {
+ panic("should be foo")
+}
+```
+
+Here is an example of performing a range scan of the keys.
+
+```go
+// Create a tree
+r := iradix.New()
+r, _, _ = r.Insert([]byte("001"), 1)
+r, _, _ = r.Insert([]byte("002"), 2)
+r, _, _ = r.Insert([]byte("005"), 5)
+r, _, _ = r.Insert([]byte("010"), 10)
+r, _, _ = r.Insert([]byte("100"), 10)
+
+// Range scan over the keys that sort lexicographically between [003, 050)
+it := r.Root().Iterator()
+it.SeekLowerBound([]byte("003"))
+for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
+ if key >= "050" {
+ break
+ }
+ fmt.Println(key)
+}
+// Output:
+// 005
+// 010
+```
+
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go
new file mode 100644
index 0000000..a636747
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go
@@ -0,0 +1,21 @@
+package iradix
+
+import "sort"
+
+type edges []edge
+
+func (e edges) Len() int {
+ return len(e)
+}
+
+func (e edges) Less(i, j int) bool {
+ return e[i].label < e[j].label
+}
+
+func (e edges) Swap(i, j int) {
+ e[i], e[j] = e[j], e[i]
+}
+
+func (e edges) Sort() {
+ sort.Sort(e)
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
new file mode 100644
index 0000000..168bda7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
@@ -0,0 +1,676 @@
+package iradix
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/hashicorp/golang-lru/simplelru"
+)
+
+const (
+ // defaultModifiedCache is the default size of the modified node
+ // cache used per transaction. This is used to cache the updates
+ // to the nodes near the root, while the leaves do not need to be
+ // cached. This is important for very large transactions to prevent
+ // the modified cache from growing to be enormous. This is also used
+ // to set the max size of the mutation notify maps since those should
+ // also be bounded in a similar way.
+ defaultModifiedCache = 8192
+)
+
+// Tree implements an immutable radix tree. This can be treated as a
+// Dictionary abstract data type. The main advantage over a standard
+// hash map is prefix-based lookups and ordered iteration. The immutability
+// means that it is safe to concurrently read from a Tree without any
+// coordination.
+type Tree struct {
+ root *Node
+ size int
+}
+
+// New returns an empty Tree
+func New() *Tree {
+ t := &Tree{
+ root: &Node{
+ mutateCh: make(chan struct{}),
+ },
+ }
+ return t
+}
+
+// Len is used to return the number of elements in the tree
+func (t *Tree) Len() int {
+ return t.size
+}
+
+// Txn is a transaction on the tree. This transaction is applied
+// atomically and returns a new tree when committed. A transaction
+// is not thread safe, and should only be used by a single goroutine.
+type Txn struct {
+ // root is the modified root for the transaction.
+ root *Node
+
+ // snap is a snapshot of the root node for use if we have to run the
+ // slow notify algorithm.
+ snap *Node
+
+ // size tracks the size of the tree as it is modified during the
+ // transaction.
+ size int
+
+ // writable is a cache of writable nodes that have been created during
+ // the course of the transaction. This allows us to re-use the same
+ // nodes for further writes and avoid unnecessary copies of nodes that
+ // have never been exposed outside the transaction. This will only hold
+ // up to defaultModifiedCache number of entries.
+ writable *simplelru.LRU
+
+ // trackChannels is used to hold channels that need to be notified to
+ // signal mutation of the tree. This will only hold up to
+ // defaultModifiedCache number of entries, after which we will set the
+ // trackOverflow flag, which will cause us to use a more expensive
+ // algorithm to perform the notifications. Mutation tracking is only
+ // performed if trackMutate is true.
+ trackChannels map[chan struct{}]struct{}
+ trackOverflow bool
+ trackMutate bool
+}
+
+// Txn starts a new transaction that can be used to mutate the tree
+func (t *Tree) Txn() *Txn {
+ txn := &Txn{
+ root: t.root,
+ snap: t.root,
+ size: t.size,
+ }
+ return txn
+}
+
+// Clone makes an independent copy of the transaction. The new transaction
+// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
+func (t *Txn) Clone() *Txn {
+ // reset the writable node cache to avoid leaking future writes into the clone
+ t.writable = nil
+
+ txn := &Txn{
+ root: t.root,
+ snap: t.snap,
+ size: t.size,
+ }
+ return txn
+}
+
+// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
+// then notifications will be issued for affected internal nodes and leaves when
+// the transaction is committed.
+func (t *Txn) TrackMutate(track bool) {
+ t.trackMutate = track
+}
+
+// trackChannel safely attempts to track the given mutation channel, setting the
+// overflow flag if we can no longer track any more. This limits the amount of
+// state that will accumulate during a transaction and we have a slower algorithm
+// to switch to if we overflow.
+func (t *Txn) trackChannel(ch chan struct{}) {
+ // In overflow, make sure we don't store any more objects.
+ if t.trackOverflow {
+ return
+ }
+
+ // If this would overflow the state we reject it and set the flag (since
+ // we aren't tracking everything that's required any longer).
+ if len(t.trackChannels) >= defaultModifiedCache {
+ // Mark that we are in the overflow state
+ t.trackOverflow = true
+
+ // Clear the map so that the channels can be garbage collected. It is
+ // safe to do this since we have already overflowed and will be using
+ // the slow notify algorithm.
+ t.trackChannels = nil
+ return
+ }
+
+ // Create the map on the fly when we need it.
+ if t.trackChannels == nil {
+ t.trackChannels = make(map[chan struct{}]struct{})
+ }
+
+ // Otherwise we are good to track it.
+ t.trackChannels[ch] = struct{}{}
+}
+
+// writeNode returns a node to be modified, if the current node has already been
+// modified during the course of the transaction, it is used in-place. Set
+// forLeafUpdate to true if you are getting a write node to update the leaf,
+// which will set leaf mutation tracking appropriately as well.
+func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
+ // Ensure the writable set exists.
+ if t.writable == nil {
+ lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
+ if err != nil {
+ panic(err)
+ }
+ t.writable = lru
+ }
+
+ // If this node has already been modified, we can continue to use it
+ // during this transaction. We know that we don't need to track it for
+ // a node update since the node is writable, but if this is for a leaf
+ // update we track it, in case the initial write to this node didn't
+ // update the leaf.
+ if _, ok := t.writable.Get(n); ok {
+ if t.trackMutate && forLeafUpdate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+ return n
+ }
+
+ // Mark this node as being mutated.
+ if t.trackMutate {
+ t.trackChannel(n.mutateCh)
+ }
+
+ // Mark its leaf as being mutated, if appropriate.
+ if t.trackMutate && forLeafUpdate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+
+ // Copy the existing node. If you have set forLeafUpdate it will be
+ // safe to replace this leaf with another after you get your node for
+ // writing. You MUST replace it, because the channel associated with
+ // this leaf will be closed when this transaction is committed.
+ nc := &Node{
+ mutateCh: make(chan struct{}),
+ leaf: n.leaf,
+ }
+ if n.prefix != nil {
+ nc.prefix = make([]byte, len(n.prefix))
+ copy(nc.prefix, n.prefix)
+ }
+ if len(n.edges) != 0 {
+ nc.edges = make([]edge, len(n.edges))
+ copy(nc.edges, n.edges)
+ }
+
+ // Mark this node as writable.
+ t.writable.Add(nc, nil)
+ return nc
+}
+
+// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
+// Returns the size of the subtree visited
+func (t *Txn) trackChannelsAndCount(n *Node) int {
+ // Count only leaf nodes
+ leaves := 0
+ if n.leaf != nil {
+ leaves = 1
+ }
+ // Mark this node as being mutated.
+ if t.trackMutate {
+ t.trackChannel(n.mutateCh)
+ }
+
+ // Mark its leaf as being mutated, if appropriate.
+ if t.trackMutate && n.leaf != nil {
+ t.trackChannel(n.leaf.mutateCh)
+ }
+
+ // Recurse on the children
+ for _, e := range n.edges {
+ leaves += t.trackChannelsAndCount(e.node)
+ }
+ return leaves
+}
+
+// mergeChild is called to collapse the given node with its child. This is only
+// called when the given node is not a leaf and has a single edge.
+func (t *Txn) mergeChild(n *Node) {
+ // Mark the child node as being mutated since we are about to abandon
+ // it. We don't need to mark the leaf since we are retaining it if it
+ // is there.
+ e := n.edges[0]
+ child := e.node
+ if t.trackMutate {
+ t.trackChannel(child.mutateCh)
+ }
+
+ // Merge the nodes.
+ n.prefix = concat(n.prefix, child.prefix)
+ n.leaf = child.leaf
+ if len(child.edges) != 0 {
+ n.edges = make([]edge, len(child.edges))
+ copy(n.edges, child.edges)
+ } else {
+ n.edges = nil
+ }
+}
+
+// insert does a recursive insertion
+func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
+ // Handle key exhaustion
+ if len(search) == 0 {
+ var oldVal interface{}
+ didUpdate := false
+ if n.isLeaf() {
+ oldVal = n.leaf.val
+ didUpdate = true
+ }
+
+ nc := t.writeNode(n, true)
+ nc.leaf = &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ }
+ return nc, oldVal, didUpdate
+ }
+
+ // Look for the edge
+ idx, child := n.getEdge(search[0])
+
+ // No edge, create one
+ if child == nil {
+ e := edge{
+ label: search[0],
+ node: &Node{
+ mutateCh: make(chan struct{}),
+ leaf: &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ },
+ prefix: search,
+ },
+ }
+ nc := t.writeNode(n, false)
+ nc.addEdge(e)
+ return nc, nil, false
+ }
+
+ // Determine longest prefix of the search key on match
+ commonPrefix := longestPrefix(search, child.prefix)
+ if commonPrefix == len(child.prefix) {
+ search = search[commonPrefix:]
+ newChild, oldVal, didUpdate := t.insert(child, k, search, v)
+ if newChild != nil {
+ nc := t.writeNode(n, false)
+ nc.edges[idx].node = newChild
+ return nc, oldVal, didUpdate
+ }
+ return nil, oldVal, didUpdate
+ }
+
+ // Split the node
+ nc := t.writeNode(n, false)
+ splitNode := &Node{
+ mutateCh: make(chan struct{}),
+ prefix: search[:commonPrefix],
+ }
+ nc.replaceEdge(edge{
+ label: search[0],
+ node: splitNode,
+ })
+
+ // Restore the existing child node
+ modChild := t.writeNode(child, false)
+ splitNode.addEdge(edge{
+ label: modChild.prefix[commonPrefix],
+ node: modChild,
+ })
+ modChild.prefix = modChild.prefix[commonPrefix:]
+
+ // Create a new leaf node
+ leaf := &leafNode{
+ mutateCh: make(chan struct{}),
+ key: k,
+ val: v,
+ }
+
+ // If the new key is a subset, add to to this node
+ search = search[commonPrefix:]
+ if len(search) == 0 {
+ splitNode.leaf = leaf
+ return nc, nil, false
+ }
+
+ // Create a new edge for the node
+ splitNode.addEdge(edge{
+ label: search[0],
+ node: &Node{
+ mutateCh: make(chan struct{}),
+ leaf: leaf,
+ prefix: search,
+ },
+ })
+ return nc, nil, false
+}
+
+// delete does a recursive deletion
+func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ if !n.isLeaf() {
+ return nil, nil
+ }
+ // Copy the pointer in case we are in a transaction that already
+ // modified this node since the node will be reused. Any changes
+ // made to the node will not affect returning the original leaf
+ // value.
+ oldLeaf := n.leaf
+
+ // Remove the leaf node
+ nc := t.writeNode(n, true)
+ nc.leaf = nil
+
+ // Check if this node should be merged
+ if n != t.root && len(nc.edges) == 1 {
+ t.mergeChild(nc)
+ }
+ return nc, oldLeaf
+ }
+
+ // Look for an edge
+ label := search[0]
+ idx, child := n.getEdge(label)
+ if child == nil || !bytes.HasPrefix(search, child.prefix) {
+ return nil, nil
+ }
+
+ // Consume the search prefix
+ search = search[len(child.prefix):]
+ newChild, leaf := t.delete(n, child, search)
+ if newChild == nil {
+ return nil, nil
+ }
+
+ // Copy this node. WATCH OUT - it's safe to pass "false" here because we
+ // will only ADD a leaf via nc.mergeChild() if there isn't one due to
+ // the !nc.isLeaf() check in the logic just below. This is pretty subtle,
+ // so be careful if you change any of the logic here.
+ nc := t.writeNode(n, false)
+
+ // Delete the edge if the node has no edges
+ if newChild.leaf == nil && len(newChild.edges) == 0 {
+ nc.delEdge(label)
+ if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
+ t.mergeChild(nc)
+ }
+ } else {
+ nc.edges[idx].node = newChild
+ }
+ return nc, leaf
+}
+
+// delete does a recursive deletion
+func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ nc := t.writeNode(n, true)
+ if n.isLeaf() {
+ nc.leaf = nil
+ }
+ nc.edges = nil
+ return nc, t.trackChannelsAndCount(n)
+ }
+
+ // Look for an edge
+ label := search[0]
+ idx, child := n.getEdge(label)
+ // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
+ // Need to do both so that we can delete prefixes that don't correspond to any node in the tree
+ if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
+ return nil, 0
+ }
+
+ // Consume the search prefix
+ if len(child.prefix) > len(search) {
+ search = []byte("")
+ } else {
+ search = search[len(child.prefix):]
+ }
+ newChild, numDeletions := t.deletePrefix(n, child, search)
+ if newChild == nil {
+ return nil, 0
+ }
+ // Copy this node. WATCH OUT - it's safe to pass "false" here because we
+ // will only ADD a leaf via nc.mergeChild() if there isn't one due to
+ // the !nc.isLeaf() check in the logic just below. This is pretty subtle,
+ // so be careful if you change any of the logic here.
+
+ nc := t.writeNode(n, false)
+
+ // Delete the edge if the node has no edges
+ if newChild.leaf == nil && len(newChild.edges) == 0 {
+ nc.delEdge(label)
+ if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
+ t.mergeChild(nc)
+ }
+ } else {
+ nc.edges[idx].node = newChild
+ }
+ return nc, numDeletions
+}
+
+// Insert is used to add or update a given key. The return provides
+// the previous value and a bool indicating if any was set.
+func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
+ newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
+ if newRoot != nil {
+ t.root = newRoot
+ }
+ if !didUpdate {
+ t.size++
+ }
+ return oldVal, didUpdate
+}
+
+// Delete is used to delete a given key. Returns the old value if any,
+// and a bool indicating if the key was set.
+func (t *Txn) Delete(k []byte) (interface{}, bool) {
+ newRoot, leaf := t.delete(nil, t.root, k)
+ if newRoot != nil {
+ t.root = newRoot
+ }
+ if leaf != nil {
+ t.size--
+ return leaf.val, true
+ }
+ return nil, false
+}
+
+// DeletePrefix is used to delete an entire subtree that matches the prefix
+// This will delete all nodes under that prefix
+func (t *Txn) DeletePrefix(prefix []byte) bool {
+ newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
+ if newRoot != nil {
+ t.root = newRoot
+ t.size = t.size - numDeletions
+ return true
+ }
+ return false
+
+}
+
+// Root returns the current root of the radix tree within this
+// transaction. The root is not safe across insert and delete operations,
+// but can be used to read the current state during a transaction.
+func (t *Txn) Root() *Node {
+ return t.root
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Txn) Get(k []byte) (interface{}, bool) {
+ return t.root.Get(k)
+}
+
+// GetWatch is used to lookup a specific key, returning
+// the watch channel, value and if it was found
+func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
+ return t.root.GetWatch(k)
+}
+
+// Commit is used to finalize the transaction and return a new tree. If mutation
+// tracking is turned on then notifications will also be issued.
+func (t *Txn) Commit() *Tree {
+ nt := t.CommitOnly()
+ if t.trackMutate {
+ t.Notify()
+ }
+ return nt
+}
+
+// CommitOnly is used to finalize the transaction and return a new tree, but
+// does not issue any notifications until Notify is called.
+func (t *Txn) CommitOnly() *Tree {
+ nt := &Tree{t.root, t.size}
+ t.writable = nil
+ return nt
+}
+
+// slowNotify does a complete comparison of the before and after trees in order
+// to trigger notifications. This doesn't require any additional state but it
+// is very expensive to compute.
+func (t *Txn) slowNotify() {
+ snapIter := t.snap.rawIterator()
+ rootIter := t.root.rawIterator()
+ for snapIter.Front() != nil || rootIter.Front() != nil {
+ // If we've exhausted the nodes in the old snapshot, we know
+ // there's nothing remaining to notify.
+ if snapIter.Front() == nil {
+ return
+ }
+ snapElem := snapIter.Front()
+
+ // If we've exhausted the nodes in the new root, we know we need
+ // to invalidate everything that remains in the old snapshot. We
+ // know from the loop condition there's something in the old
+ // snapshot.
+ if rootIter.Front() == nil {
+ close(snapElem.mutateCh)
+ if snapElem.isLeaf() {
+ close(snapElem.leaf.mutateCh)
+ }
+ snapIter.Next()
+ continue
+ }
+
+ // Do one string compare so we can check the various conditions
+ // below without repeating the compare.
+ cmp := strings.Compare(snapIter.Path(), rootIter.Path())
+
+ // If the snapshot is behind the root, then we must have deleted
+ // this node during the transaction.
+ if cmp < 0 {
+ close(snapElem.mutateCh)
+ if snapElem.isLeaf() {
+ close(snapElem.leaf.mutateCh)
+ }
+ snapIter.Next()
+ continue
+ }
+
+ // If the snapshot is ahead of the root, then we must have added
+ // this node during the transaction.
+ if cmp > 0 {
+ rootIter.Next()
+ continue
+ }
+
+ // If we have the same path, then we need to see if we mutated a
+ // node and possibly the leaf.
+ rootElem := rootIter.Front()
+ if snapElem != rootElem {
+ close(snapElem.mutateCh)
+ if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
+ close(snapElem.leaf.mutateCh)
+ }
+ }
+ snapIter.Next()
+ rootIter.Next()
+ }
+}
+
+// Notify is used along with TrackMutate to trigger notifications. This must
+// only be done once a transaction is committed via CommitOnly, and it is called
+// automatically by Commit.
+func (t *Txn) Notify() {
+ if !t.trackMutate {
+ return
+ }
+
+ // If we've overflowed the tracking state we can't use it in any way and
+ // need to do a full tree compare.
+ if t.trackOverflow {
+ t.slowNotify()
+ } else {
+ for ch := range t.trackChannels {
+ close(ch)
+ }
+ }
+
+ // Clean up the tracking state so that a re-notify is safe (will trigger
+ // the else clause above which will be a no-op).
+ t.trackChannels = nil
+ t.trackOverflow = false
+}
+
+// Insert is used to add or update a given key. The return provides
+// the new tree, previous value and a bool indicating if any was set.
+func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
+ txn := t.Txn()
+ old, ok := txn.Insert(k, v)
+ return txn.Commit(), old, ok
+}
+
+// Delete is used to delete a given key. Returns the new tree,
+// old value if any, and a bool indicating if the key was set.
+func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
+ txn := t.Txn()
+ old, ok := txn.Delete(k)
+ return txn.Commit(), old, ok
+}
+
+// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
+// and a bool indicating if the prefix matched any nodes
+func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
+ txn := t.Txn()
+ ok := txn.DeletePrefix(k)
+ return txn.Commit(), ok
+}
+
+// Root returns the root node of the tree which can be used for richer
+// query operations.
+func (t *Tree) Root() *Node {
+ return t.root
+}
+
+// Get is used to lookup a specific key, returning
+// the value and if it was found
+func (t *Tree) Get(k []byte) (interface{}, bool) {
+ return t.root.Get(k)
+}
+
+// longestPrefix finds the length of the shared prefix
+// of two strings
+func longestPrefix(k1, k2 []byte) int {
+ max := len(k1)
+ if l := len(k2); l < max {
+ max = l
+ }
+ var i int
+ for i = 0; i < max; i++ {
+ if k1[i] != k2[i] {
+ break
+ }
+ }
+ return i
+}
+
+// concat two byte slices, returning a third new copy
+func concat(a, b []byte) []byte {
+ c := make([]byte, len(a)+len(b))
+ copy(c, a)
+ copy(c[len(a):], b)
+ return c
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
new file mode 100644
index 0000000..f17d0a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
@@ -0,0 +1,205 @@
+package iradix
+
+import (
+ "bytes"
+)
+
+// Iterator is used to iterate over a set of nodes
+// in pre-order
+type Iterator struct {
+ node *Node
+ stack []edges
+}
+
+// SeekPrefixWatch is used to seek the iterator to a given prefix
+// and returns the watch channel of the finest granularity
+func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
+ // Wipe the stack
+ i.stack = nil
+ n := i.node
+ watch = n.mutateCh
+ search := prefix
+ for {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ i.node = n
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ i.node = nil
+ return
+ }
+
+ // Update to the finest granularity as the search makes progress
+ watch = n.mutateCh
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+
+ } else if bytes.HasPrefix(n.prefix, search) {
+ i.node = n
+ return
+ } else {
+ i.node = nil
+ return
+ }
+ }
+}
+
+// SeekPrefix is used to seek the iterator to a given prefix
+func (i *Iterator) SeekPrefix(prefix []byte) {
+ i.SeekPrefixWatch(prefix)
+}
+
+func (i *Iterator) recurseMin(n *Node) *Node {
+ // Traverse to the minimum child
+ if n.leaf != nil {
+ return n
+ }
+ nEdges := len(n.edges)
+ if nEdges > 1 {
+ // Add all the other edges to the stack (the min node will be added as
+ // we recurse)
+ i.stack = append(i.stack, n.edges[1:])
+ }
+ if nEdges > 0 {
+ return i.recurseMin(n.edges[0].node)
+ }
+ // Shouldn't be possible
+ return nil
+}
+
+// SeekLowerBound is used to seek the iterator to the smallest key that is
+// greater or equal to the given key. There is no watch variant as it's hard to
+// predict based on the radix structure which node(s) changes might affect the
+// result.
+func (i *Iterator) SeekLowerBound(key []byte) {
+ // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
+ // go because we need only a subset of edges of many nodes in the path to the
+ // leaf with the lower bound. Note that the iterator will still recurse into
+ // children that we don't traverse on the way to the reverse lower bound as it
+ // walks the stack.
+ i.stack = []edges{}
+ // i.node starts off in the common case as pointing to the root node of the
+ // tree. By the time we return we have either found a lower bound and setup
+ // the stack to traverse all larger keys, or we have not and the stack and
+ // node should both be nil to prevent the iterator from assuming it is just
+ // iterating the whole tree from the root node. Either way this needs to end
+ // up as nil so just set it here.
+ n := i.node
+ i.node = nil
+ search := key
+
+ found := func(n *Node) {
+ i.stack = append(i.stack, edges{edge{node: n}})
+ }
+
+ findMin := func(n *Node) {
+ n = i.recurseMin(n)
+ if n != nil {
+ found(n)
+ return
+ }
+ }
+
+ for {
+ // Compare current prefix with the search key's same-length prefix.
+ var prefixCmp int
+ if len(n.prefix) < len(search) {
+ prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
+ } else {
+ prefixCmp = bytes.Compare(n.prefix, search)
+ }
+
+ if prefixCmp > 0 {
+ // Prefix is larger, that means the lower bound is greater than the search
+ // and from now on we need to follow the minimum path to the smallest
+ // leaf under this subtree.
+ findMin(n)
+ return
+ }
+
+ if prefixCmp < 0 {
+ // Prefix is smaller than search prefix, that means there is no lower
+ // bound
+ i.node = nil
+ return
+ }
+
+ // Prefix is equal, we are still heading for an exact match. If this is a
+ // leaf and an exact match we're done.
+ if n.leaf != nil && bytes.Equal(n.leaf.key, key) {
+ found(n)
+ return
+ }
+
+ // Consume the search prefix if the current node has one. Note that this is
+ // safe because if n.prefix is longer than the search slice prefixCmp would
+ // have been > 0 above and the method would have already returned.
+ search = search[len(n.prefix):]
+
+ if len(search) == 0 {
+ // We've exhausted the search key, but the current node is not an exact
+ // match or not a leaf. That means that the leaf value if it exists, and
+ // all child nodes must be strictly greater, the smallest key in this
+ // subtree must be the lower bound.
+ findMin(n)
+ return
+ }
+
+ // Otherwise, take the lower bound next edge.
+ idx, lbNode := n.getLowerBoundEdge(search[0])
+ if lbNode == nil {
+ return
+ }
+
+ // Create stack edges for the all strictly higher edges in this node.
+ if idx+1 < len(n.edges) {
+ i.stack = append(i.stack, n.edges[idx+1:])
+ }
+
+ // Recurse
+ n = lbNode
+ }
+}
+
+// Next returns the next node in order
+func (i *Iterator) Next() ([]byte, interface{}, bool) {
+ // Initialize our stack if needed
+ if i.stack == nil && i.node != nil {
+ i.stack = []edges{
+ {
+ edge{node: i.node},
+ },
+ }
+ }
+
+ for len(i.stack) > 0 {
+ // Inspect the last element of the stack
+ n := len(i.stack)
+ last := i.stack[n-1]
+ elem := last[0].node
+
+ // Update the stack
+ if len(last) > 1 {
+ i.stack[n-1] = last[1:]
+ } else {
+ i.stack = i.stack[:n-1]
+ }
+
+ // Push the edges onto the frontier
+ if len(elem.edges) > 0 {
+ i.stack = append(i.stack, elem.edges)
+ }
+
+ // Return the leaf values if any
+ if elem.leaf != nil {
+ return elem.leaf.key, elem.leaf.val, true
+ }
+ }
+ return nil, nil, false
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go
new file mode 100644
index 0000000..3598548
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go
@@ -0,0 +1,334 @@
+package iradix
+
+import (
+ "bytes"
+ "sort"
+)
+
+// WalkFn is used when walking the tree. Takes a
+// key and value, returning if iteration should
+// be terminated.
+type WalkFn func(k []byte, v interface{}) bool
+
+// leafNode is used to represent a value
+type leafNode struct {
+ mutateCh chan struct{}
+ key []byte
+ val interface{}
+}
+
+// edge is used to represent an edge node
+type edge struct {
+ label byte
+ node *Node
+}
+
+// Node is an immutable node in the radix tree
+type Node struct {
+ // mutateCh is closed if this node is modified
+ mutateCh chan struct{}
+
+ // leaf is used to store possible leaf
+ leaf *leafNode
+
+ // prefix is the common prefix we ignore
+ prefix []byte
+
+ // Edges should be stored in-order for iteration.
+ // We avoid a fully materialized slice to save memory,
+ // since in most cases we expect to be sparse
+ edges edges
+}
+
+func (n *Node) isLeaf() bool {
+ return n.leaf != nil
+}
+
+func (n *Node) addEdge(e edge) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= e.label
+ })
+ n.edges = append(n.edges, e)
+ if idx != num {
+ copy(n.edges[idx+1:], n.edges[idx:num])
+ n.edges[idx] = e
+ }
+}
+
+func (n *Node) replaceEdge(e edge) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= e.label
+ })
+ if idx < num && n.edges[idx].label == e.label {
+ n.edges[idx].node = e.node
+ return
+ }
+ panic("replacing missing edge")
+}
+
+func (n *Node) getEdge(label byte) (int, *Node) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ if idx < num && n.edges[idx].label == label {
+ return idx, n.edges[idx].node
+ }
+ return -1, nil
+}
+
+func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ // we want lower bound behavior so return even if it's not an exact match
+ if idx < num {
+ return idx, n.edges[idx].node
+ }
+ return -1, nil
+}
+
+func (n *Node) delEdge(label byte) {
+ num := len(n.edges)
+ idx := sort.Search(num, func(i int) bool {
+ return n.edges[i].label >= label
+ })
+ if idx < num && n.edges[idx].label == label {
+ copy(n.edges[idx:], n.edges[idx+1:])
+ n.edges[len(n.edges)-1] = edge{}
+ n.edges = n.edges[:len(n.edges)-1]
+ }
+}
+
+func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
+ search := k
+ watch := n.mutateCh
+ for {
+ // Check for key exhaustion
+ if len(search) == 0 {
+ if n.isLeaf() {
+ return n.leaf.mutateCh, n.leaf.val, true
+ }
+ break
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Update to the finest granularity as the search makes progress
+ watch = n.mutateCh
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+ return watch, nil, false
+}
+
+func (n *Node) Get(k []byte) (interface{}, bool) {
+ _, val, ok := n.GetWatch(k)
+ return val, ok
+}
+
+// LongestPrefix is like Get, but instead of an
+// exact match, it will return the longest prefix match.
+func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
+ var last *leafNode
+ search := k
+ for {
+ // Look for a leaf node
+ if n.isLeaf() {
+ last = n.leaf
+ }
+
+ // Check for key exhaution
+ if len(search) == 0 {
+ break
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+ if last != nil {
+ return last.key, last.val, true
+ }
+ return nil, nil, false
+}
+
+// Minimum is used to return the minimum value in the tree
+func (n *Node) Minimum() ([]byte, interface{}, bool) {
+ for {
+ if n.isLeaf() {
+ return n.leaf.key, n.leaf.val, true
+ }
+ if len(n.edges) > 0 {
+ n = n.edges[0].node
+ } else {
+ break
+ }
+ }
+ return nil, nil, false
+}
+
+// Maximum is used to return the maximum value in the tree
+func (n *Node) Maximum() ([]byte, interface{}, bool) {
+ for {
+ if num := len(n.edges); num > 0 {
+ n = n.edges[num-1].node
+ continue
+ }
+ if n.isLeaf() {
+ return n.leaf.key, n.leaf.val, true
+ } else {
+ break
+ }
+ }
+ return nil, nil, false
+}
+
+// Iterator is used to return an iterator at
+// the given node to walk the tree
+func (n *Node) Iterator() *Iterator {
+ return &Iterator{node: n}
+}
+
+// ReverseIterator is used to return an iterator at
+// the given node to walk the tree backwards
+func (n *Node) ReverseIterator() *ReverseIterator {
+ return NewReverseIterator(n)
+}
+
+// rawIterator is used to return a raw iterator at the given node to walk the
+// tree.
+func (n *Node) rawIterator() *rawIterator {
+ iter := &rawIterator{node: n}
+ iter.Next()
+ return iter
+}
+
+// Walk is used to walk the tree
+func (n *Node) Walk(fn WalkFn) {
+ recursiveWalk(n, fn)
+}
+
+// WalkBackwards is used to walk the tree in reverse order
+func (n *Node) WalkBackwards(fn WalkFn) {
+ reverseRecursiveWalk(n, fn)
+}
+
+// WalkPrefix is used to walk the tree under a prefix
+func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
+ search := prefix
+ for {
+ // Check for key exhaution
+ if len(search) == 0 {
+ recursiveWalk(n, fn)
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ break
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+
+ } else if bytes.HasPrefix(n.prefix, search) {
+ // Child may be under our search prefix
+ recursiveWalk(n, fn)
+ return
+ } else {
+ break
+ }
+ }
+}
+
+// WalkPath is used to walk the tree, but only visiting nodes
+// from the root down to a given leaf. Where WalkPrefix walks
+// all the entries *under* the given prefix, this walks the
+// entries *above* the given prefix.
+func (n *Node) WalkPath(path []byte, fn WalkFn) {
+ search := path
+ for {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return
+ }
+
+ // Check for key exhaution
+ if len(search) == 0 {
+ return
+ }
+
+ // Look for an edge
+ _, n = n.getEdge(search[0])
+ if n == nil {
+ return
+ }
+
+ // Consume the search prefix
+ if bytes.HasPrefix(search, n.prefix) {
+ search = search[len(n.prefix):]
+ } else {
+ break
+ }
+ }
+}
+
+// recursiveWalk is used to do a pre-order walk of a node
+// recursively. Returns true if the walk should be aborted
+func recursiveWalk(n *Node, fn WalkFn) bool {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return true
+ }
+
+ // Recurse on the children
+ for _, e := range n.edges {
+ if recursiveWalk(e.node, fn) {
+ return true
+ }
+ }
+ return false
+}
+
+// reverseRecursiveWalk is used to do a reverse pre-order
+// walk of a node recursively. Returns true if the walk
+// should be aborted
+func reverseRecursiveWalk(n *Node, fn WalkFn) bool {
+ // Visit the leaf values if any
+ if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
+ return true
+ }
+
+ // Recurse on the children in reverse order
+ for i := len(n.edges) - 1; i >= 0; i-- {
+ e := n.edges[i]
+ if reverseRecursiveWalk(e.node, fn) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
new file mode 100644
index 0000000..3c6a225
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
@@ -0,0 +1,78 @@
+package iradix
+
+// rawIterator visits each of the nodes in the tree, even the ones that are not
+// leaves. It keeps track of the effective path (what a leaf at a given node
+// would be called), which is useful for comparing trees.
+type rawIterator struct {
+ // node is the starting node in the tree for the iterator.
+ node *Node
+
+ // stack keeps track of edges in the frontier.
+ stack []rawStackEntry
+
+ // pos is the current position of the iterator.
+ pos *Node
+
+ // path is the effective path of the current iterator position,
+ // regardless of whether the current node is a leaf.
+ path string
+}
+
+// rawStackEntry is used to keep track of the cumulative common path as well as
+// its associated edges in the frontier.
+type rawStackEntry struct {
+ path string
+ edges edges
+}
+
+// Front returns the current node that has been iterated to.
+func (i *rawIterator) Front() *Node {
+ return i.pos
+}
+
+// Path returns the effective path of the current node, even if it's not actually
+// a leaf.
+func (i *rawIterator) Path() string {
+ return i.path
+}
+
+// Next advances the iterator to the next node.
+func (i *rawIterator) Next() {
+ // Initialize our stack if needed.
+ if i.stack == nil && i.node != nil {
+ i.stack = []rawStackEntry{
+ {
+ edges: edges{
+ edge{node: i.node},
+ },
+ },
+ }
+ }
+
+ for len(i.stack) > 0 {
+ // Inspect the last element of the stack.
+ n := len(i.stack)
+ last := i.stack[n-1]
+ elem := last.edges[0].node
+
+ // Update the stack.
+ if len(last.edges) > 1 {
+ i.stack[n-1].edges = last.edges[1:]
+ } else {
+ i.stack = i.stack[:n-1]
+ }
+
+ // Push the edges onto the frontier.
+ if len(elem.edges) > 0 {
+ path := last.path + string(elem.prefix)
+ i.stack = append(i.stack, rawStackEntry{path, elem.edges})
+ }
+
+ i.pos = elem
+ i.path = last.path + string(elem.prefix)
+ return
+ }
+
+ i.pos = nil
+ i.path = ""
+}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
new file mode 100644
index 0000000..554fa71
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go
@@ -0,0 +1,239 @@
+package iradix
+
+import (
+ "bytes"
+)
+
+// ReverseIterator is used to iterate over a set of nodes
+// in reverse in-order
+type ReverseIterator struct {
+ i *Iterator
+
+ // expandedParents stores the set of parent nodes whose relevant children have
+ // already been pushed into the stack. This can happen during seek or during
+ // iteration.
+ //
+ // Unlike forward iteration we need to recurse into children before we can
+ // output the value stored in an internal leaf since all children are greater.
+ // We use this to track whether we have already ensured all the children are
+ // in the stack.
+ expandedParents map[*Node]struct{}
+}
+
+// NewReverseIterator returns a new ReverseIterator at a node
+func NewReverseIterator(n *Node) *ReverseIterator {
+ return &ReverseIterator{
+ i: &Iterator{node: n},
+ }
+}
+
+// SeekPrefixWatch is used to seek the iterator to a given prefix
+// and returns the watch channel of the finest granularity
+func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
+ return ri.i.SeekPrefixWatch(prefix)
+}
+
+// SeekPrefix is used to seek the iterator to a given prefix
+func (ri *ReverseIterator) SeekPrefix(prefix []byte) {
+ ri.i.SeekPrefixWatch(prefix)
+}
+
+// SeekReverseLowerBound is used to seek the iterator to the largest key that is
+// lower or equal to the given key. There is no watch variant as it's hard to
+// predict based on the radix structure which node(s) changes might affect the
+// result.
+func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) {
+ // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
+ // go because we need only a subset of edges of many nodes in the path to the
+ // leaf with the lower bound. Note that the iterator will still recurse into
+ // children that we don't traverse on the way to the reverse lower bound as it
+ // walks the stack.
+ ri.i.stack = []edges{}
+ // ri.i.node starts off in the common case as pointing to the root node of the
+ // tree. By the time we return we have either found a lower bound and setup
+ // the stack to traverse all larger keys, or we have not and the stack and
+ // node should both be nil to prevent the iterator from assuming it is just
+ // iterating the whole tree from the root node. Either way this needs to end
+ // up as nil so just set it here.
+ n := ri.i.node
+ ri.i.node = nil
+ search := key
+
+ if ri.expandedParents == nil {
+ ri.expandedParents = make(map[*Node]struct{})
+ }
+
+ found := func(n *Node) {
+ ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
+ // We need to mark this node as expanded in advance too otherwise the
+ // iterator will attempt to walk all of its children even though they are
+ // greater than the lower bound we have found. We've expanded it in the
+ // sense that all of its children that we want to walk are already in the
+ // stack (i.e. none of them).
+ ri.expandedParents[n] = struct{}{}
+ }
+
+ for {
+ // Compare current prefix with the search key's same-length prefix.
+ var prefixCmp int
+ if len(n.prefix) < len(search) {
+ prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
+ } else {
+ prefixCmp = bytes.Compare(n.prefix, search)
+ }
+
+ if prefixCmp < 0 {
+ // Prefix is smaller than search prefix, that means there is no exact
+ // match for the search key. But we are looking in reverse, so the reverse
+ // lower bound will be the largest leaf under this subtree, since it is
+ // the value that would come right before the current search key if it
+ // were in the tree. So we need to follow the maximum path in this subtree
+ // to find it. Note that this is exactly what the iterator will already do
+ // if it finds a node in the stack that has _not_ been marked as expanded
+ // so in this one case we don't call `found` and instead let the iterator
+ // do the expansion and recursion through all the children.
+ ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
+ return
+ }
+
+ if prefixCmp > 0 {
+ // Prefix is larger than search prefix, or there is no prefix but we've
+ // also exhausted the search key. Either way, that means there is no
+ // reverse lower bound since nothing comes before our current search
+ // prefix.
+ return
+ }
+
+ // If this is a leaf, something needs to happen! Note that if it's a leaf
+ // and prefixCmp was zero (which it must be to get here) then the leaf value
+ // is either an exact match for the search, or it's lower. It can't be
+ // greater.
+ if n.isLeaf() {
+
+ // Firstly, if it's an exact match, we're done!
+ if bytes.Equal(n.leaf.key, key) {
+ found(n)
+ return
+ }
+
+ // It's not so this node's leaf value must be lower and could still be a
+ // valid contender for reverse lower bound.
+
+ // If it has no children then we are also done.
+ if len(n.edges) == 0 {
+ // This leaf is the lower bound.
+ found(n)
+ return
+ }
+
+ // Finally, this leaf is internal (has children) so we'll keep searching,
+ // but we need to add it to the iterator's stack since it has a leaf value
+ // that needs to be iterated over. It needs to be added to the stack
+ // before its children below as it comes first.
+ ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
+ // We also need to mark it as expanded since we'll be adding any of its
+ // relevant children below and so don't want the iterator to re-add them
+ // on its way back up the stack.
+ ri.expandedParents[n] = struct{}{}
+ }
+
+ // Consume the search prefix. Note that this is safe because if n.prefix is
+ // longer than the search slice prefixCmp would have been > 0 above and the
+ // method would have already returned.
+ search = search[len(n.prefix):]
+
+ if len(search) == 0 {
+ // We've exhausted the search key but we are not at a leaf. That means all
+ // children are greater than the search key so a reverse lower bound
+ // doesn't exist in this subtree. Note that there might still be one in
+ // the whole radix tree by following a different path somewhere further
+ // up. If that's the case then the iterator's stack will contain all the
+ // smaller nodes already and Previous will walk through them correctly.
+ return
+ }
+
+ // Otherwise, take the lower bound next edge.
+ idx, lbNode := n.getLowerBoundEdge(search[0])
+
+ // From here, we need to update the stack with all values lower than
+ // the lower bound edge. Since getLowerBoundEdge() returns -1 when the
+ // search prefix is larger than all edges, we need to place idx at the
+ // last edge index so they can all be place in the stack, since they
+ // come before our search prefix.
+ if idx == -1 {
+ idx = len(n.edges)
+ }
+
+ // Create stack edges for the all strictly lower edges in this node.
+ if len(n.edges[:idx]) > 0 {
+ ri.i.stack = append(ri.i.stack, n.edges[:idx])
+ }
+
+ // Exit if there's no lower bound edge. The stack will have the previous
+ // nodes already.
+ if lbNode == nil {
+ return
+ }
+
+ // Recurse
+ n = lbNode
+ }
+}
+
+// Previous returns the previous node in reverse order
+func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) {
+ // Initialize our stack if needed
+ if ri.i.stack == nil && ri.i.node != nil {
+ ri.i.stack = []edges{
+ {
+ edge{node: ri.i.node},
+ },
+ }
+ }
+
+ if ri.expandedParents == nil {
+ ri.expandedParents = make(map[*Node]struct{})
+ }
+
+ for len(ri.i.stack) > 0 {
+ // Inspect the last element of the stack
+ n := len(ri.i.stack)
+ last := ri.i.stack[n-1]
+ m := len(last)
+ elem := last[m-1].node
+
+ _, alreadyExpanded := ri.expandedParents[elem]
+
+ // If this is an internal node and we've not seen it already, we need to
+ // leave it in the stack so we can return its possible leaf value _after_
+ // we've recursed through all its children.
+ if len(elem.edges) > 0 && !alreadyExpanded {
+ // record that we've seen this node!
+ ri.expandedParents[elem] = struct{}{}
+ // push child edges onto stack and skip the rest of the loop to recurse
+ // into the largest one.
+ ri.i.stack = append(ri.i.stack, elem.edges)
+ continue
+ }
+
+ // Remove the node from the stack
+ if m > 1 {
+ ri.i.stack[n-1] = last[:m-1]
+ } else {
+ ri.i.stack = ri.i.stack[:n-1]
+ }
+ // We don't need this state any more as it's no longer in the stack so we
+ // won't visit it again
+ if alreadyExpanded {
+ delete(ri.expandedParents, elem)
+ }
+
+ // If this is a leaf, return it
+ if elem.leaf != nil {
+ return elem.leaf.key, elem.leaf.val, true
+ }
+
+ // it's not a leaf so keep walking the stack to find the previous leaf
+ }
+ return nil, nil, false
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/.gitignore b/vendor/github.com/hashicorp/go-memdb/.gitignore
new file mode 100644
index 0000000..11b90db
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/.gitignore
@@ -0,0 +1,26 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+
+.idea
diff --git a/vendor/github.com/hashicorp/go-memdb/CODEOWNERS b/vendor/github.com/hashicorp/go-memdb/CODEOWNERS
new file mode 100644
index 0000000..2cad39a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/CODEOWNERS
@@ -0,0 +1,13 @@
+# Each line is a file pattern followed by one or more owners.
+# More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
+
+# Default owner
+* @hashicorp/team-ip-compliance @hashicorp/raft-force
+
+# Add override rules below. Each line is a file/folder pattern followed by one or more owners.
+# Being an owner means those groups or individuals will be added as reviewers to PRs affecting
+# those areas of the code.
+# Examples:
+# /docs/ @docs-team
+# *.js @js-team
+# *.go @go-team
diff --git a/vendor/github.com/hashicorp/go-memdb/LICENSE b/vendor/github.com/hashicorp/go-memdb/LICENSE
new file mode 100644
index 0000000..f4f97ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/LICENSE
@@ -0,0 +1,365 @@
+Copyright (c) 2015 HashiCorp, Inc.
+
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-memdb/README.md b/vendor/github.com/hashicorp/go-memdb/README.md
new file mode 100644
index 0000000..080b744
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/README.md
@@ -0,0 +1,146 @@
+# go-memdb [![CircleCI](https://circleci.com/gh/hashicorp/go-memdb/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-memdb/tree/master)
+
+Provides the `memdb` package that implements a simple in-memory database
+built on immutable radix trees. The database provides Atomicity, Consistency
+and Isolation from ACID. Being that it is in-memory, it does not provide durability.
+The database is instantiated with a schema that specifies the tables and indices
+that exist and allows transactions to be executed.
+
+The database provides the following:
+
+* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees
+ the database is able to support any number of concurrent readers without locking,
+ and allows a writer to make progress.
+
+* Transaction Support - The database allows for rich transactions, in which multiple
+ objects are inserted, updated or deleted. The transactions can span multiple tables,
+ and are applied atomically. The database provides atomicity and isolation in ACID
+ terminology, such that until commit the updates are not visible.
+
+* Rich Indexing - Tables can support any number of indexes, which can be simple like
+ a single field index, or more advanced compound field indexes. Certain types like
+ UUID can be efficiently compressed from strings into byte indexes for reduced
+ storage requirements.
+
+* Watches - Callers can populate a watch set as part of a query, which can be used to
+ detect when a modification has been made to the database which affects the query
+ results. This lets callers easily watch for changes in the database in a very general
+ way.
+
+For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix).
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](https://pkg.go.dev/github.com/hashicorp/go-memdb).
+
+Example
+=======
+
+Below is a [simple example](https://play.golang.org/p/gCGE9FA4og1) of usage
+
+```go
+// Create a sample struct
+type Person struct {
+ Email string
+ Name string
+ Age int
+}
+
+// Create the DB schema
+schema := &memdb.DBSchema{
+ Tables: map[string]*memdb.TableSchema{
+ "person": &memdb.TableSchema{
+ Name: "person",
+ Indexes: map[string]*memdb.IndexSchema{
+ "id": &memdb.IndexSchema{
+ Name: "id",
+ Unique: true,
+ Indexer: &memdb.StringFieldIndex{Field: "Email"},
+ },
+ "age": &memdb.IndexSchema{
+ Name: "age",
+ Unique: false,
+ Indexer: &memdb.IntFieldIndex{Field: "Age"},
+ },
+ },
+ },
+ },
+}
+
+// Create a new data base
+db, err := memdb.NewMemDB(schema)
+if err != nil {
+ panic(err)
+}
+
+// Create a write transaction
+txn := db.Txn(true)
+
+// Insert some people
+people := []*Person{
+ &Person{"joe@aol.com", "Joe", 30},
+ &Person{"lucy@aol.com", "Lucy", 35},
+ &Person{"tariq@aol.com", "Tariq", 21},
+ &Person{"dorothy@aol.com", "Dorothy", 53},
+}
+for _, p := range people {
+ if err := txn.Insert("person", p); err != nil {
+ panic(err)
+ }
+}
+
+// Commit the transaction
+txn.Commit()
+
+// Create read-only transaction
+txn = db.Txn(false)
+defer txn.Abort()
+
+// Lookup by email
+raw, err := txn.First("person", "id", "joe@aol.com")
+if err != nil {
+ panic(err)
+}
+
+// Say hi!
+fmt.Printf("Hello %s!\n", raw.(*Person).Name)
+
+// List all the people
+it, err := txn.Get("person", "id")
+if err != nil {
+ panic(err)
+}
+
+fmt.Println("All the people:")
+for obj := it.Next(); obj != nil; obj = it.Next() {
+ p := obj.(*Person)
+ fmt.Printf(" %s\n", p.Name)
+}
+
+// Range scan over people with ages between 25 and 35 inclusive
+it, err = txn.LowerBound("person", "age", 25)
+if err != nil {
+ panic(err)
+}
+
+fmt.Println("People aged 25 - 35:")
+for obj := it.Next(); obj != nil; obj = it.Next() {
+ p := obj.(*Person)
+ if p.Age > 35 {
+ break
+ }
+ fmt.Printf(" %s is aged %d\n", p.Name, p.Age)
+}
+// Output:
+// Hello Joe!
+// All the people:
+// Dorothy
+// Joe
+// Lucy
+// Tariq
+// People aged 25 - 35:
+// Joe is aged 30
+// Lucy is aged 35
+```
+
diff --git a/vendor/github.com/hashicorp/go-memdb/changes.go b/vendor/github.com/hashicorp/go-memdb/changes.go
new file mode 100644
index 0000000..4761e92
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/changes.go
@@ -0,0 +1,37 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+// Changes describes a set of mutations to memDB tables performed during a
+// transaction.
+type Changes []Change
+
+// Change describes a mutation to an object in a table.
+type Change struct {
+ Table string
+ Before interface{}
+ After interface{}
+
+ // primaryKey stores the raw key value from the primary index so that we can
+ // de-duplicate multiple updates of the same object in the same transaction
+ // but we don't expose this implementation detail to the consumer.
+ primaryKey []byte
+}
+
+// Created returns true if the mutation describes a new object being inserted.
+func (m *Change) Created() bool {
+ return m.Before == nil && m.After != nil
+}
+
+// Updated returns true if the mutation describes an existing object being
+// updated.
+func (m *Change) Updated() bool {
+ return m.Before != nil && m.After != nil
+}
+
+// Deleted returns true if the mutation describes an existing object being
+// deleted.
+func (m *Change) Deleted() bool {
+ return m.Before != nil && m.After == nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/filter.go b/vendor/github.com/hashicorp/go-memdb/filter.go
new file mode 100644
index 0000000..2e13521
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/filter.go
@@ -0,0 +1,41 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+// FilterFunc is a function that takes the results of an iterator and returns
+// whether the result should be filtered out.
+type FilterFunc func(interface{}) bool
+
+// FilterIterator is used to wrap a ResultIterator and apply a filter over it.
+type FilterIterator struct {
+ // filter is the filter function applied over the base iterator.
+ filter FilterFunc
+
+ // iter is the iterator that is being wrapped.
+ iter ResultIterator
+}
+
+// NewFilterIterator wraps a ResultIterator. The filter function is applied
+// to each value returned by a call to iter.Next.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned FilterIterator.
+func NewFilterIterator(iter ResultIterator, filter FilterFunc) *FilterIterator {
+ return &FilterIterator{
+ filter: filter,
+ iter: iter,
+ }
+}
+
+// WatchCh returns the watch channel of the wrapped iterator.
+func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() }
+
+// Next returns the next non-filtered result from the wrapped iterator.
+func (f *FilterIterator) Next() interface{} {
+ for {
+ if value := f.iter.Next(); value == nil || !f.filter(value) {
+ return value
+ }
+ }
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/hashicorp/go-memdb/index.go
new file mode 100644
index 0000000..588e1c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/index.go
@@ -0,0 +1,934 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+import (
+ "encoding/binary"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// Indexer is an interface used for defining indexes. Indexes are used
+// for efficient lookup of objects in a MemDB table. An Indexer must also
+// implement one of SingleIndexer or MultiIndexer.
+//
+// Indexers are primarily responsible for returning the lookup key as
+// a byte slice. The byte slice is the key data in the underlying data storage.
+type Indexer interface {
+ // FromArgs is called to build the exact index key from a list of arguments.
+ FromArgs(args ...interface{}) ([]byte, error)
+}
+
+// SingleIndexer is an interface used for defining indexes that generate a
+// single value per object
+type SingleIndexer interface {
+ // FromObject extracts the index value from an object. The return values
+ // are whether the index value was found, the index value, and any error
+ // while extracting the index value, respectively.
+ FromObject(raw interface{}) (bool, []byte, error)
+}
+
+// MultiIndexer is an interface used for defining indexes that generate
+// multiple values per object. Each value is stored as a seperate index
+// pointing to the same object.
+//
+// For example, an index that extracts the first and last name of a person
+// and allows lookup based on eitherd would be a MultiIndexer. The FromObject
+// of this example would split the first and last name and return both as
+// values.
+type MultiIndexer interface {
+ // FromObject extracts index values from an object. The return values
+ // are the same as a SingleIndexer except there can be multiple index
+ // values.
+ FromObject(raw interface{}) (bool, [][]byte, error)
+}
+
+// PrefixIndexer is an optional interface on top of an Indexer that allows
+// indexes to support prefix-based iteration.
+type PrefixIndexer interface {
+ // PrefixFromArgs is the same as FromArgs for an Indexer except that
+ // the index value returned should return all prefix-matched values.
+ PrefixFromArgs(args ...interface{}) ([]byte, error)
+}
+
+// StringFieldIndex is used to extract a field from an object
+// using reflection and builds an index on that field.
+type StringFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ isPtr := fv.Kind() == reflect.Ptr
+ fv = reflect.Indirect(fv)
+ if !isPtr && !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid %v ", s.Field, obj, isPtr)
+ }
+
+ if isPtr && !fv.IsValid() {
+ val := ""
+ return false, []byte(val), nil
+ }
+
+ val := fv.String()
+ if val == "" {
+ return false, nil, nil
+ }
+
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ val += "\x00"
+ return true, []byte(val), nil
+}
+
+func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ arg, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ arg = strings.ToLower(arg)
+ }
+ // Add the null character as a terminator
+ arg += "\x00"
+ return []byte(arg), nil
+}
+
+func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ val, err := s.FromArgs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the null terminator, the rest is a prefix
+ n := len(val)
+ if n > 0 {
+ return val[:n-1], nil
+ }
+ return val, nil
+}
+
+// StringSliceFieldIndex builds an index from a field on an object that is a
+// string slice ([]string). Each value within the string slice can be used for
+// lookup.
+type StringSliceFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+ }
+
+ if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String {
+ return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field)
+ }
+
+ length := fv.Len()
+ vals := make([][]byte, 0, length)
+ for i := 0; i < fv.Len(); i++ {
+ val := fv.Index(i).String()
+ if val == "" {
+ continue
+ }
+
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ val += "\x00"
+ vals = append(vals, []byte(val))
+ }
+ if len(vals) == 0 {
+ return false, nil, nil
+ }
+ return true, vals, nil
+}
+
+func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ arg, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ arg = strings.ToLower(arg)
+ }
+ // Add the null character as a terminator
+ arg += "\x00"
+ return []byte(arg), nil
+}
+
+func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ val, err := s.FromArgs(args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Strip the null terminator, the rest is a prefix
+ n := len(val)
+ if n > 0 {
+ return val[:n-1], nil
+ }
+ return val, nil
+}
+
+// StringMapFieldIndex is used to extract a field of type map[string]string
+// from an object using reflection and builds an index on that field.
+//
+// Note that although FromArgs in theory supports using either one or
+// two arguments, there is a bug: FromObject only creates an index
+// using key/value, and does not also create an index using key. This
+// means a lookup using one argument will never actually work.
+//
+// It is currently left as-is to prevent backwards compatibility
+// issues.
+//
+// TODO: Fix this in the next major bump.
+type StringMapFieldIndex struct {
+ Field string
+ Lowercase bool
+}
+
+var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind()
+
+func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(s.Field)
+ if !fv.IsValid() {
+ return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj)
+ }
+
+ if fv.Kind() != MapType {
+ return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field)
+ }
+
+ length := fv.Len()
+ vals := make([][]byte, 0, length)
+ for _, key := range fv.MapKeys() {
+ k := key.String()
+ if k == "" {
+ continue
+ }
+ val := fv.MapIndex(key).String()
+
+ if s.Lowercase {
+ k = strings.ToLower(k)
+ val = strings.ToLower(val)
+ }
+
+ // Add the null character as a terminator
+ k += "\x00" + val + "\x00"
+
+ vals = append(vals, []byte(k))
+ }
+ if len(vals) == 0 {
+ return false, nil, nil
+ }
+ return true, vals, nil
+}
+
+// WARNING: Because of a bug in FromObject, this function will never return
+// a value when using the single-argument version.
+func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) > 2 || len(args) == 0 {
+ return nil, fmt.Errorf("must provide one or two arguments")
+ }
+ key, ok := args[0].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[0])
+ }
+ if s.Lowercase {
+ key = strings.ToLower(key)
+ }
+ // Add the null character as a terminator
+ key += "\x00"
+
+ if len(args) == 2 {
+ val, ok := args[1].(string)
+ if !ok {
+ return nil, fmt.Errorf("argument must be a string: %#v", args[1])
+ }
+ if s.Lowercase {
+ val = strings.ToLower(val)
+ }
+ // Add the null character as a terminator
+ key += val + "\x00"
+ }
+
+ return []byte(key), nil
+}
+
+// IntFieldIndex is used to extract an int field from an object using
+// reflection and builds an index on that field.
+type IntFieldIndex struct {
+ Field string
+}
+
+func (i *IntFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(i.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ size, ok := IsIntType(k)
+ if !ok {
+ return false, nil, fmt.Errorf("field %q is of type %v; want an int", i.Field, k)
+ }
+
+ // Get the value and encode it
+ val := fv.Int()
+ buf := encodeInt(val, size)
+
+ return true, buf, nil
+}
+
+func (i *IntFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ v := reflect.ValueOf(args[0])
+ if !v.IsValid() {
+ return nil, fmt.Errorf("%#v is invalid", args[0])
+ }
+
+ k := v.Kind()
+ size, ok := IsIntType(k)
+ if !ok {
+ return nil, fmt.Errorf("arg is of type %v; want a int", k)
+ }
+
+ val := v.Int()
+ buf := encodeInt(val, size)
+
+ return buf, nil
+}
+
+func encodeInt(val int64, size int) []byte {
+ buf := make([]byte, size)
+
+ // This bit flips the sign bit on any sized signed twos-complement integer,
+ // which when truncated to a uint of the same size will bias the value such
+ // that the maximum negative int becomes 0, and the maximum positive int
+ // becomes the maximum positive uint.
+ scaled := val ^ int64(-1<<(size*8-1))
+
+ switch size {
+ case 1:
+ buf[0] = uint8(scaled)
+ case 2:
+ binary.BigEndian.PutUint16(buf, uint16(scaled))
+ case 4:
+ binary.BigEndian.PutUint32(buf, uint32(scaled))
+ case 8:
+ binary.BigEndian.PutUint64(buf, uint64(scaled))
+ default:
+ panic(fmt.Sprintf("unsupported int size parameter: %d", size))
+ }
+
+ return buf
+}
+
+// IsIntType returns whether the passed type is a type of int and the number
+// of bytes needed to encode the type.
+func IsIntType(k reflect.Kind) (size int, okay bool) {
+ switch k {
+ case reflect.Int:
+ return strconv.IntSize / 8, true
+ case reflect.Int8:
+ return 1, true
+ case reflect.Int16:
+ return 2, true
+ case reflect.Int32:
+ return 4, true
+ case reflect.Int64:
+ return 8, true
+ default:
+ return 0, false
+ }
+}
+
+// UintFieldIndex is used to extract a uint field from an object using
+// reflection and builds an index on that field.
+type UintFieldIndex struct {
+ Field string
+}
+
+func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(u.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ size, ok := IsUintType(k)
+ if !ok {
+ return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k)
+ }
+
+ // Get the value and encode it
+ val := fv.Uint()
+ buf := encodeUInt(val, size)
+
+ return true, buf, nil
+}
+
+func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ v := reflect.ValueOf(args[0])
+ if !v.IsValid() {
+ return nil, fmt.Errorf("%#v is invalid", args[0])
+ }
+
+ k := v.Kind()
+ size, ok := IsUintType(k)
+ if !ok {
+ return nil, fmt.Errorf("arg is of type %v; want a uint", k)
+ }
+
+ val := v.Uint()
+ buf := encodeUInt(val, size)
+
+ return buf, nil
+}
+
+func encodeUInt(val uint64, size int) []byte {
+ buf := make([]byte, size)
+
+ switch size {
+ case 1:
+ buf[0] = uint8(val)
+ case 2:
+ binary.BigEndian.PutUint16(buf, uint16(val))
+ case 4:
+ binary.BigEndian.PutUint32(buf, uint32(val))
+ case 8:
+ binary.BigEndian.PutUint64(buf, val)
+ default:
+ panic(fmt.Sprintf("unsupported uint size parameter: %d", size))
+ }
+
+ return buf
+}
+
+// IsUintType returns whether the passed type is a type of uint and the number
+// of bytes needed to encode the type.
+func IsUintType(k reflect.Kind) (size int, okay bool) {
+ switch k {
+ case reflect.Uint:
+ return strconv.IntSize / 8, true
+ case reflect.Uint8:
+ return 1, true
+ case reflect.Uint16:
+ return 2, true
+ case reflect.Uint32:
+ return 4, true
+ case reflect.Uint64:
+ return 8, true
+ default:
+ return 0, false
+ }
+}
+
+// BoolFieldIndex is used to extract an boolean field from an object using
+// reflection and builds an index on that field.
+type BoolFieldIndex struct {
+ Field string
+}
+
+func (i *BoolFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(i.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj)
+ }
+
+ // Check the type
+ k := fv.Kind()
+ if k != reflect.Bool {
+ return false, nil, fmt.Errorf("field %q is of type %v; want a bool", i.Field, k)
+ }
+
+ // Get the value and encode it
+ buf := make([]byte, 1)
+ if fv.Bool() {
+ buf[0] = 1
+ }
+
+ return true, buf, nil
+}
+
+func (i *BoolFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// UUIDFieldIndex is used to extract a field from an object
+// using reflection and builds an index on that field by treating
+// it as a UUID. This is an optimization to using a StringFieldIndex
+// as the UUID can be more compactly represented in byte form.
+type UUIDFieldIndex struct {
+ Field string
+}
+
+func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(u.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj)
+ }
+
+ val := fv.String()
+ if val == "" {
+ return false, nil, nil
+ }
+
+ buf, err := u.parseString(val, true)
+ return true, buf, err
+}
+
+func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ switch arg := args[0].(type) {
+ case string:
+ return u.parseString(arg, true)
+ case []byte:
+ if len(arg) != 16 {
+ return nil, fmt.Errorf("byte slice must be 16 characters")
+ }
+ return arg, nil
+ default:
+ return nil,
+ fmt.Errorf("argument must be a string or byte slice: %#v", args[0])
+ }
+}
+
+func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+ switch arg := args[0].(type) {
+ case string:
+ return u.parseString(arg, false)
+ case []byte:
+ return arg, nil
+ default:
+ return nil,
+ fmt.Errorf("argument must be a string or byte slice: %#v", args[0])
+ }
+}
+
+// parseString parses a UUID from the string. If enforceLength is false, it will
+// parse a partial UUID. An error is returned if the input, stripped of hyphens,
+// is not even length.
+func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) {
+ // Verify the length
+ l := len(s)
+ if enforceLength && l != 36 {
+ return nil, fmt.Errorf("UUID must be 36 characters")
+ } else if l > 36 {
+ return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l)
+ }
+
+ hyphens := strings.Count(s, "-")
+ if hyphens > 4 {
+ return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens)
+ }
+
+ // The sanitized length is the length of the original string without the "-".
+ sanitized := strings.Replace(s, "-", "", -1)
+ sanitizedLength := len(sanitized)
+ if sanitizedLength%2 != 0 {
+ return nil, fmt.Errorf("Input (without hyphens) must be even length")
+ }
+
+ dec, err := hex.DecodeString(sanitized)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid UUID: %v", err)
+ }
+
+ return dec, nil
+}
+
+// FieldSetIndex is used to extract a field from an object using reflection and
+// builds an index on whether the field is set by comparing it against its
+// type's nil value.
+type FieldSetIndex struct {
+ Field string
+}
+
+func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ v := reflect.ValueOf(obj)
+ v = reflect.Indirect(v) // Dereference the pointer if any
+
+ fv := v.FieldByName(f.Field)
+ if !fv.IsValid() {
+ return false, nil,
+ fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj)
+ }
+
+ if fv.Interface() == reflect.Zero(fv.Type()).Interface() {
+ return true, []byte{0}, nil
+ }
+
+ return true, []byte{1}, nil
+}
+
+func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// ConditionalIndex builds an index based on a condition specified by a passed
+// user function. This function may examine the passed object and return a
+// boolean to encapsulate an arbitrarily complex conditional.
+type ConditionalIndex struct {
+ Conditional ConditionalIndexFunc
+}
+
+// ConditionalIndexFunc is the required function interface for a
+// ConditionalIndex.
+type ConditionalIndexFunc func(obj interface{}) (bool, error)
+
+func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) {
+ // Call the user's function
+ res, err := c.Conditional(obj)
+ if err != nil {
+ return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err)
+ }
+
+ if res {
+ return true, []byte{1}, nil
+ }
+
+ return true, []byte{0}, nil
+}
+
+func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ return fromBoolArgs(args)
+}
+
+// fromBoolArgs is a helper that expects only a single boolean argument and
+// returns a single length byte array containing either a one or zero depending
+// on whether the passed input is true or false respectively.
+func fromBoolArgs(args []interface{}) ([]byte, error) {
+ if len(args) != 1 {
+ return nil, fmt.Errorf("must provide only a single argument")
+ }
+
+ if val, ok := args[0].(bool); !ok {
+ return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0])
+ } else if val {
+ return []byte{1}, nil
+ }
+
+ return []byte{0}, nil
+}
+
+// CompoundIndex is used to build an index using multiple sub-indexes
+// Prefix based iteration is supported as long as the appropriate prefix
+// of indexers support it. All sub-indexers are only assumed to expect
+// a single argument.
+type CompoundIndex struct {
+ Indexes []Indexer
+
+ // AllowMissing results in an index based on only the indexers
+ // that return data. If true, you may end up with 2/3 columns
+ // indexed which might be useful for an index scan. Otherwise,
+ // the CompoundIndex requires all indexers to be satisfied.
+ AllowMissing bool
+}
+
+func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) {
+ var out []byte
+ for i, idxRaw := range c.Indexes {
+ idx, ok := idxRaw.(SingleIndexer)
+ if !ok {
+ return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer")
+ }
+ ok, val, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break
+ } else {
+ return false, nil, nil
+ }
+ }
+ out = append(out, val...)
+ }
+ return true, out, nil
+}
+
+func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) != len(c.Indexes) {
+ return nil, fmt.Errorf("non-equivalent argument count and index fields")
+ }
+ var out []byte
+ for i, arg := range args {
+ val, err := c.Indexes[i].FromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ return out, nil
+}
+
+func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) {
+ if len(args) > len(c.Indexes) {
+ return nil, fmt.Errorf("more arguments than index fields")
+ }
+ var out []byte
+ for i, arg := range args {
+ if i+1 < len(args) {
+ val, err := c.Indexes[i].FromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ } else {
+ prefixIndexer, ok := c.Indexes[i].(PrefixIndexer)
+ if !ok {
+ return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i)
+ }
+ val, err := prefixIndexer.PrefixFromArgs(arg)
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ }
+ return out, nil
+}
+
+// CompoundMultiIndex is used to build an index using multiple
+// sub-indexes.
+//
+// Unlike CompoundIndex, CompoundMultiIndex can have both
+// SingleIndexer and MultiIndexer sub-indexers. However, each
+// MultiIndexer adds considerable overhead/complexity in terms of
+// the number of indexes created under-the-hood. It is not suggested
+// to use more than one or two, if possible.
+//
+// Another change from CompoundIndexer is that if AllowMissing is
+// set, not only is it valid to have empty index fields, but it will
+// still create index values up to the first empty index. This means
+// that if you have a value with an empty field, rather than using a
+// prefix for lookup, you can simply pass in less arguments. As an
+// example, if {Foo, Bar} is indexed but Bar is missing for a value
+// and AllowMissing is set, an index will still be created for {Foo}
+// and it is valid to do a lookup passing in only Foo as an argument.
+// Note that the ordering isn't guaranteed -- it's last-insert wins,
+// but this is true if you have two objects that have the same
+// indexes not using AllowMissing anyways.
+//
+// Because StringMapFieldIndexers can take a varying number of args,
+// it is currently a requirement that whenever it is used, two
+// arguments must _always_ be provided for it. In theory we only
+// need one, except a bug in that indexer means the single-argument
+// version will never work. You can leave the second argument nil,
+// but it will never produce a value. We support this for whenever
+// that bug is fixed, likely in a next major version bump.
+//
+// Prefix-based indexing is not currently supported.
+type CompoundMultiIndex struct {
+ Indexes []Indexer
+
+ // AllowMissing results in an index based on only the indexers
+ // that return data. If true, you may end up with 2/3 columns
+ // indexed which might be useful for an index scan. Otherwise,
+ // CompoundMultiIndex requires all indexers to be satisfied.
+ AllowMissing bool
+}
+
+func (c *CompoundMultiIndex) FromObject(raw interface{}) (bool, [][]byte, error) {
+ // At each entry, builder is storing the results from the next index
+ builder := make([][][]byte, 0, len(c.Indexes))
+
+forloop:
+ // This loop goes through each indexer and adds the value(s) provided to the next
+ // entry in the slice. We can then later walk it like a tree to construct the indices.
+ for i, idxRaw := range c.Indexes {
+ switch idx := idxRaw.(type) {
+ case SingleIndexer:
+ ok, val, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("single sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break forloop
+ } else {
+ return false, nil, nil
+ }
+ }
+ builder = append(builder, [][]byte{val})
+
+ case MultiIndexer:
+ ok, vals, err := idx.FromObject(raw)
+ if err != nil {
+ return false, nil, fmt.Errorf("multi sub-index %d error: %v", i, err)
+ }
+ if !ok {
+ if c.AllowMissing {
+ break forloop
+ } else {
+ return false, nil, nil
+ }
+ }
+
+ // Add each of the new values to each of the old values
+ builder = append(builder, vals)
+
+ default:
+ return false, nil, fmt.Errorf("sub-index %d does not satisfy either SingleIndexer or MultiIndexer", i)
+ }
+ }
+
+ // Start with something higher to avoid resizing if possible
+ out := make([][]byte, 0, len(c.Indexes)^3)
+
+ // We are walking through the builder slice essentially in a depth-first fashion,
+ // building the prefix and leaves as we go. If AllowMissing is false, we only insert
+ // these full paths to leaves. Otherwise, we also insert each prefix along the way.
+ // This allows for lookup in FromArgs when AllowMissing is true that does not contain
+ // the full set of arguments. e.g. for {Foo, Bar} where an object has only the Foo
+ // field specified as "abc", it is valid to call FromArgs with just "abc".
+ var walkVals func([]byte, int)
+ walkVals = func(currPrefix []byte, depth int) {
+ if depth >= len(builder) {
+ return
+ }
+
+ if depth == len(builder)-1 {
+ // These are the "leaves", so append directly
+ for _, v := range builder[depth] {
+ outcome := make([]byte, len(currPrefix))
+ copy(outcome, currPrefix)
+ out = append(out, append(outcome, v...))
+ }
+ return
+ }
+ for _, v := range builder[depth] {
+ nextPrefix := append(currPrefix, v...)
+ if c.AllowMissing {
+ out = append(out, nextPrefix)
+ }
+ walkVals(nextPrefix, depth+1)
+ }
+ }
+
+ walkVals(nil, 0)
+
+ return true, out, nil
+}
+
+func (c *CompoundMultiIndex) FromArgs(args ...interface{}) ([]byte, error) {
+ var stringMapCount int
+ var argCount int
+ for _, index := range c.Indexes {
+ if argCount >= len(args) {
+ break
+ }
+ if _, ok := index.(*StringMapFieldIndex); ok {
+ // We require pairs for StringMapFieldIndex, but only got one
+ if argCount+1 >= len(args) {
+ return nil, errors.New("invalid number of arguments")
+ }
+ stringMapCount++
+ argCount += 2
+ } else {
+ argCount++
+ }
+ }
+ argCount = 0
+
+ switch c.AllowMissing {
+ case true:
+ if len(args) > len(c.Indexes)+stringMapCount {
+ return nil, errors.New("too many arguments")
+ }
+
+ default:
+ if len(args) != len(c.Indexes)+stringMapCount {
+ return nil, errors.New("number of arguments does not equal number of indexers")
+ }
+ }
+
+ var out []byte
+ var val []byte
+ var err error
+ for i, idx := range c.Indexes {
+ if argCount >= len(args) {
+ // We're done; should only hit this if AllowMissing
+ break
+ }
+ if _, ok := idx.(*StringMapFieldIndex); ok {
+ if args[argCount+1] == nil {
+ val, err = idx.FromArgs(args[argCount])
+ } else {
+ val, err = idx.FromArgs(args[argCount : argCount+2]...)
+ }
+ argCount += 2
+ } else {
+ val, err = idx.FromArgs(args[argCount])
+ argCount++
+ }
+ if err != nil {
+ return nil, fmt.Errorf("sub-index %d error: %v", i, err)
+ }
+ out = append(out, val...)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/memdb.go b/vendor/github.com/hashicorp/go-memdb/memdb.go
new file mode 100644
index 0000000..13cc6a8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/memdb.go
@@ -0,0 +1,119 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+// Package memdb provides an in-memory database that supports transactions
+// and MVCC.
+package memdb
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/hashicorp/go-immutable-radix"
+)
+
+// MemDB is an in-memory database providing Atomicity, Consistency, and
+// Isolation from ACID. MemDB doesn't provide Durability since it is an
+// in-memory database.
+//
+// MemDB provides a table abstraction to store objects (rows) with multiple
+// indexes based on inserted values. The database makes use of immutable radix
+// trees to provide transactions and MVCC.
+//
+// Objects inserted into MemDB are not copied. It is **extremely important**
+// that objects are not modified in-place after they are inserted since they
+// are stored directly in MemDB. It remains unsafe to modify inserted objects
+// even after they've been deleted from MemDB since there may still be older
+// snapshots of the DB being read from other goroutines.
+type MemDB struct {
+ schema *DBSchema
+ root unsafe.Pointer // *iradix.Tree underneath
+ primary bool
+
+ // There can only be a single writer at once
+ writer sync.Mutex
+}
+
+// NewMemDB creates a new MemDB with the given schema.
+func NewMemDB(schema *DBSchema) (*MemDB, error) {
+ // Validate the schema
+ if err := schema.Validate(); err != nil {
+ return nil, err
+ }
+
+ // Create the MemDB
+ db := &MemDB{
+ schema: schema,
+ root: unsafe.Pointer(iradix.New()),
+ primary: true,
+ }
+ if err := db.initialize(); err != nil {
+ return nil, err
+ }
+
+ return db, nil
+}
+
+// DBSchema returns schema in use for introspection.
+//
+// The method is intended for *read-only* debugging use cases,
+// returned schema should *never be modified in-place*.
+func (db *MemDB) DBSchema() *DBSchema {
+ return db.schema
+}
+
+// getRoot is used to do an atomic load of the root pointer
+func (db *MemDB) getRoot() *iradix.Tree {
+ root := (*iradix.Tree)(atomic.LoadPointer(&db.root))
+ return root
+}
+
+// Txn is used to start a new transaction in either read or write mode.
+// There can only be a single concurrent writer, but any number of readers.
+func (db *MemDB) Txn(write bool) *Txn {
+ if write {
+ db.writer.Lock()
+ }
+ txn := &Txn{
+ db: db,
+ write: write,
+ rootTxn: db.getRoot().Txn(),
+ }
+ return txn
+}
+
+// Snapshot is used to capture a point-in-time snapshot of the database that
+// will not be affected by any write operations to the existing DB.
+//
+// If MemDB is storing reference-based values (pointers, maps, slices, etc.),
+// the Snapshot will not deep copy those values. Therefore, it is still unsafe
+// to modify any inserted values in either DB.
+func (db *MemDB) Snapshot() *MemDB {
+ clone := &MemDB{
+ schema: db.schema,
+ root: unsafe.Pointer(db.getRoot()),
+ primary: false,
+ }
+ return clone
+}
+
+// initialize is used to setup the DB for use after creation. This should
+// be called only once after allocating a MemDB.
+func (db *MemDB) initialize() error {
+ root := db.getRoot()
+ for tName, tableSchema := range db.schema.Tables {
+ for iName := range tableSchema.Indexes {
+ index := iradix.New()
+ path := indexPath(tName, iName)
+ root, _, _ = root.Insert(path, index)
+ }
+ }
+ db.root = unsafe.Pointer(root)
+ return nil
+}
+
+// indexPath returns the path from the root to the given table index
+func indexPath(table, index string) []byte {
+ return []byte(table + "." + index)
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/schema.go b/vendor/github.com/hashicorp/go-memdb/schema.go
new file mode 100644
index 0000000..2d66f99
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/schema.go
@@ -0,0 +1,117 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+import "fmt"
+
+// DBSchema is the schema to use for the full database with a MemDB instance.
+//
+// MemDB will require a valid schema. Schema validation can be tested using
+// the Validate function. Calling this function is recommended in unit tests.
+type DBSchema struct {
+ // Tables is the set of tables within this database. The key is the
+ // table name and must match the Name in TableSchema.
+ Tables map[string]*TableSchema
+}
+
+// Validate validates the schema.
+func (s *DBSchema) Validate() error {
+ if s == nil {
+ return fmt.Errorf("schema is nil")
+ }
+
+ if len(s.Tables) == 0 {
+ return fmt.Errorf("schema has no tables defined")
+ }
+
+ for name, table := range s.Tables {
+ if name != table.Name {
+ return fmt.Errorf("table name mis-match for '%s'", name)
+ }
+
+ if err := table.Validate(); err != nil {
+ return fmt.Errorf("table %q: %s", name, err)
+ }
+ }
+
+ return nil
+}
+
+// TableSchema is the schema for a single table.
+type TableSchema struct {
+ // Name of the table. This must match the key in the Tables map in DBSchema.
+ Name string
+
+ // Indexes is the set of indexes for querying this table. The key
+ // is a unique name for the index and must match the Name in the
+ // IndexSchema.
+ Indexes map[string]*IndexSchema
+}
+
+// Validate is used to validate the table schema
+func (s *TableSchema) Validate() error {
+ if s.Name == "" {
+ return fmt.Errorf("missing table name")
+ }
+
+ if len(s.Indexes) == 0 {
+ return fmt.Errorf("missing table indexes for '%s'", s.Name)
+ }
+
+ if _, ok := s.Indexes["id"]; !ok {
+ return fmt.Errorf("must have id index")
+ }
+
+ if !s.Indexes["id"].Unique {
+ return fmt.Errorf("id index must be unique")
+ }
+
+ if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok {
+ return fmt.Errorf("id index must be a SingleIndexer")
+ }
+
+ for name, index := range s.Indexes {
+ if name != index.Name {
+ return fmt.Errorf("index name mis-match for '%s'", name)
+ }
+
+ if err := index.Validate(); err != nil {
+ return fmt.Errorf("index %q: %s", name, err)
+ }
+ }
+
+ return nil
+}
+
+// IndexSchema is the schema for an index. An index defines how a table is
+// queried.
+type IndexSchema struct {
+ // Name of the index. This must be unique among a tables set of indexes.
+ // This must match the key in the map of Indexes for a TableSchema.
+ Name string
+
+ // AllowMissing if true ignores this index if it doesn't produce a
+ // value. For example, an index that extracts a field that doesn't
+ // exist from a structure.
+ AllowMissing bool
+
+ Unique bool
+ Indexer Indexer
+}
+
+func (s *IndexSchema) Validate() error {
+ if s.Name == "" {
+ return fmt.Errorf("missing index name")
+ }
+ if s.Indexer == nil {
+ return fmt.Errorf("missing index function for '%s'", s.Name)
+ }
+ switch s.Indexer.(type) {
+ case SingleIndexer:
+ case MultiIndexer:
+ default:
+ return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name)
+ }
+ return nil
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/hashicorp/go-memdb/txn.go
new file mode 100644
index 0000000..f83f4fa
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/txn.go
@@ -0,0 +1,1024 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "sync/atomic"
+ "unsafe"
+
+ iradix "github.com/hashicorp/go-immutable-radix"
+)
+
+const (
+ id = "id"
+)
+
+var (
+ // ErrNotFound is returned when the requested item is not found
+ ErrNotFound = fmt.Errorf("not found")
+)
+
+// tableIndex is a tuple of (Table, Index) used for lookups
+type tableIndex struct {
+ Table string
+ Index string
+}
+
+// Txn is a transaction against a MemDB.
+// This can be a read or write transaction.
+type Txn struct {
+ db *MemDB
+ write bool
+ rootTxn *iradix.Txn
+ after []func()
+
+ // changes is used to track the changes performed during the transaction. If
+ // it is nil at transaction start then changes are not tracked.
+ changes Changes
+
+ modified map[tableIndex]*iradix.Txn
+}
+
+// TrackChanges enables change tracking for the transaction. If called at any
+// point before commit, subsequent mutations will be recorded and can be
+// retrieved using ChangeSet. Once this has been called on a transaction it
+// can't be unset. As with other Txn methods it's not safe to call this from a
+// different goroutine than the one making mutations or committing the
+// transaction.
+func (txn *Txn) TrackChanges() {
+ if txn.changes == nil {
+ txn.changes = make(Changes, 0, 1)
+ }
+}
+
+// readableIndex returns a transaction usable for reading the given index in a
+// table. If the transaction is a write transaction with modifications, a clone of the
+// modified index will be returned.
+func (txn *Txn) readableIndex(table, index string) *iradix.Txn {
+ // Look for existing transaction
+ if txn.write && txn.modified != nil {
+ key := tableIndex{table, index}
+ exist, ok := txn.modified[key]
+ if ok {
+ return exist.Clone()
+ }
+ }
+
+ // Create a read transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+ return indexTxn
+}
+
+// writableIndex returns a transaction usable for modifying the
+// given index in a table.
+func (txn *Txn) writableIndex(table, index string) *iradix.Txn {
+ if txn.modified == nil {
+ txn.modified = make(map[tableIndex]*iradix.Txn)
+ }
+
+ // Look for existing transaction
+ key := tableIndex{table, index}
+ exist, ok := txn.modified[key]
+ if ok {
+ return exist
+ }
+
+ // Start a new transaction
+ path := indexPath(table, index)
+ raw, _ := txn.rootTxn.Get(path)
+ indexTxn := raw.(*iradix.Tree).Txn()
+
+ // If we are the primary DB, enable mutation tracking. Snapshots should
+ // not notify, otherwise we will trigger watches on the primary DB when
+ // the writes will not be visible.
+ indexTxn.TrackMutate(txn.db.primary)
+
+ // Keep this open for the duration of the txn
+ txn.modified[key] = indexTxn
+ return indexTxn
+}
+
+// Abort is used to cancel this transaction.
+// This is a noop for read transactions,
+// already aborted or commited transactions.
+func (txn *Txn) Abort() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+ txn.changes = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+}
+
+// Commit is used to finalize this transaction.
+// This is a noop for read transactions,
+// already aborted or committed transactions.
+func (txn *Txn) Commit() {
+ // Noop for a read transaction
+ if !txn.write {
+ return
+ }
+
+ // Check if already aborted or committed
+ if txn.rootTxn == nil {
+ return
+ }
+
+ // Commit each sub-transaction scoped to (table, index)
+ for key, subTxn := range txn.modified {
+ path := indexPath(key.Table, key.Index)
+ final := subTxn.CommitOnly()
+ txn.rootTxn.Insert(path, final)
+ }
+
+ // Update the root of the DB
+ newRoot := txn.rootTxn.CommitOnly()
+ atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot))
+
+ // Now issue all of the mutation updates (this is safe to call
+ // even if mutation tracking isn't enabled); we do this after
+ // the root pointer is swapped so that waking responders will
+ // see the new state.
+ for _, subTxn := range txn.modified {
+ subTxn.Notify()
+ }
+ txn.rootTxn.Notify()
+
+ // Clear the txn
+ txn.rootTxn = nil
+ txn.modified = nil
+
+ // Release the writer lock since this is invalid
+ txn.db.writer.Unlock()
+
+ // Run the deferred functions, if any
+ for i := len(txn.after); i > 0; i-- {
+ fn := txn.after[i-1]
+ fn()
+ }
+}
+
+// Insert is used to add or update an object into the given table.
+//
+// When updating an object, the obj provided should be a copy rather
+// than a value updated in-place. Modifying values in-place that are already
+// inserted into MemDB is not supported behavior.
+func (txn *Txn) Insert(table string, obj interface{}) error {
+ if !txn.write {
+ return fmt.Errorf("cannot insert in read-only transaction")
+ }
+
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return fmt.Errorf("object missing primary index")
+ }
+
+ // Lookup the object by ID first, to see if this is an update
+ idTxn := txn.writableIndex(table, id)
+ existing, update := idTxn.Get(idVal)
+
+ // On an update, there is an existing object with the given
+ // primary ID. We do the update by deleting the current object
+ // and inserting the new object.
+ for name, indexSchema := range tableSchema.Indexes {
+ indexTxn := txn.writableIndex(table, name)
+
+ // Determine the new index value
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(obj)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(obj)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if ok && !indexSchema.Unique {
+ for i := range vals {
+ vals[i] = append(vals[i], idVal...)
+ }
+ }
+
+ // Handle the update by deleting from the index first
+ if update {
+ var (
+ okExist bool
+ valsExist [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var valExist []byte
+ okExist, valExist, err = indexer.FromObject(existing)
+ valsExist = [][]byte{valExist}
+ case MultiIndexer:
+ okExist, valsExist, err = indexer.FromObject(existing)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+ if okExist {
+ for i, valExist := range valsExist {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ if !indexSchema.Unique {
+ valExist = append(valExist, idVal...)
+ }
+
+ // If we are writing to the same index with the same value,
+ // we can avoid the delete as the insert will overwrite the
+ // value anyways.
+ if i >= len(vals) || !bytes.Equal(valExist, vals[i]) {
+ indexTxn.Delete(valExist)
+ }
+ }
+ }
+ }
+
+ // If there is no index value, either this is an error or an expected
+ // case and we can skip updating
+ if !ok {
+ if indexSchema.AllowMissing {
+ continue
+ } else {
+ return fmt.Errorf("missing value for index '%s'", name)
+ }
+ }
+
+ // Update the value of the index
+ for _, val := range vals {
+ indexTxn.Insert(val, obj)
+ }
+ }
+ if txn.changes != nil {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing, // might be nil on a create
+ After: obj,
+ primaryKey: idVal,
+ })
+ }
+ return nil
+}
+
+// Delete is used to delete a single object from the given table.
+// This object must already exist in the table.
+func (txn *Txn) Delete(table string, obj interface{}) error {
+ if !txn.write {
+ return fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(obj)
+ if err != nil {
+ return fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return fmt.Errorf("object missing primary index")
+ }
+
+ // Lookup the object by ID first, check if we should continue
+ idTxn := txn.writableIndex(table, id)
+ existing, ok := idTxn.Get(idVal)
+ if !ok {
+ return ErrNotFound
+ }
+
+ // Remove the object from all the indexes
+ for name, indexSchema := range tableSchema.Indexes {
+ indexTxn := txn.writableIndex(table, name)
+
+ // Handle the update by deleting from the index first
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(existing)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(existing)
+ }
+ if err != nil {
+ return fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+ if ok {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ for _, val := range vals {
+ if !indexSchema.Unique {
+ val = append(val, idVal...)
+ }
+ indexTxn.Delete(val)
+ }
+ }
+ }
+ if txn.changes != nil {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing,
+ After: nil, // Now nil indicates deletion
+ primaryKey: idVal,
+ })
+ }
+ return nil
+}
+
+// DeletePrefix is used to delete an entire subtree based on a prefix.
+// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete.
+// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation.
+// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects.
+func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) {
+ if !txn.write {
+ return false, fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ if !strings.HasSuffix(prefix_index, "_prefix") {
+ return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index)
+ }
+
+ deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix")
+
+ // Get an iterator over all of the keys with the given prefix.
+ entries, err := txn.Get(table, prefix_index, prefix)
+ if err != nil {
+ return false, fmt.Errorf("failed kvs lookup: %s", err)
+ }
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return false, fmt.Errorf("invalid table '%s'", table)
+ }
+
+ foundAny := false
+ for entry := entries.Next(); entry != nil; entry = entries.Next() {
+ if !foundAny {
+ foundAny = true
+ }
+ // Get the primary ID of the object
+ idSchema := tableSchema.Indexes[id]
+ idIndexer := idSchema.Indexer.(SingleIndexer)
+ ok, idVal, err := idIndexer.FromObject(entry)
+ if err != nil {
+ return false, fmt.Errorf("failed to build primary index: %v", err)
+ }
+ if !ok {
+ return false, fmt.Errorf("object missing primary index")
+ }
+ if txn.changes != nil {
+ // Record the deletion
+ idTxn := txn.writableIndex(table, id)
+ existing, ok := idTxn.Get(idVal)
+ if ok {
+ txn.changes = append(txn.changes, Change{
+ Table: table,
+ Before: existing,
+ After: nil, // Now nil indicates deletion
+ primaryKey: idVal,
+ })
+ }
+ }
+ // Remove the object from all the indexes except the given prefix index
+ for name, indexSchema := range tableSchema.Indexes {
+ if name == deletePrefixIndex {
+ continue
+ }
+ indexTxn := txn.writableIndex(table, name)
+
+ // Handle the update by deleting from the index first
+ var (
+ ok bool
+ vals [][]byte
+ err error
+ )
+ switch indexer := indexSchema.Indexer.(type) {
+ case SingleIndexer:
+ var val []byte
+ ok, val, err = indexer.FromObject(entry)
+ vals = [][]byte{val}
+ case MultiIndexer:
+ ok, vals, err = indexer.FromObject(entry)
+ }
+ if err != nil {
+ return false, fmt.Errorf("failed to build index '%s': %v", name, err)
+ }
+
+ if ok {
+ // Handle non-unique index by computing a unique index.
+ // This is done by appending the primary key which must
+ // be unique anyways.
+ for _, val := range vals {
+ if !indexSchema.Unique {
+ val = append(val, idVal...)
+ }
+ indexTxn.Delete(val)
+ }
+ }
+ }
+
+ }
+ if foundAny {
+ indexTxn := txn.writableIndex(table, deletePrefixIndex)
+ ok = indexTxn.DeletePrefix([]byte(prefix))
+ if !ok {
+ panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix))
+ }
+ return true, nil
+ }
+ return false, nil
+}
+
+// DeleteAll is used to delete all the objects in a given table
+// matching the constraints on the index
+func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) {
+ if !txn.write {
+ return 0, fmt.Errorf("cannot delete in read-only transaction")
+ }
+
+ // Get all the objects
+ iter, err := txn.Get(table, index, args...)
+ if err != nil {
+ return 0, err
+ }
+
+ // Put them into a slice so there are no safety concerns while actually
+ // performing the deletes
+ var objs []interface{}
+ for {
+ obj := iter.Next()
+ if obj == nil {
+ break
+ }
+
+ objs = append(objs, obj)
+ }
+
+ // Do the deletes
+ num := 0
+ for _, obj := range objs {
+ if err := txn.Delete(table, obj); err != nil {
+ return num, err
+ }
+ num++
+ }
+ return num, nil
+}
+
+// FirstWatch is used to return the first matching object for
+// the given constraints on the index along with the watch channel.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+//
+// The watch channel is closed when a subsequent write transaction
+// has updated the result of the query. Since each read transaction
+// operates on an isolated snapshot, a new read transaction must be
+// started to observe the changes that have been made.
+//
+// If the value of index ends with "_prefix", FirstWatch will perform a prefix
+// match instead of full match on the index. The registered indexer must implement
+// PrefixIndexer, otherwise an error is returned.
+func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
+ // Get the index value
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+
+ // Do an exact lookup
+ if indexSchema.Unique && val != nil && indexSchema.Name == index {
+ watch, obj, ok := indexTxn.GetWatch(val)
+ if !ok {
+ return watch, nil, nil
+ }
+ return watch, obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the first value
+ iter := indexTxn.Root().Iterator()
+ watch := iter.SeekPrefixWatch(val)
+ _, value, _ := iter.Next()
+ return watch, value, nil
+}
+
+// LastWatch is used to return the last matching object for
+// the given constraints on the index along with the watch channel.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+//
+// The watch channel is closed when a subsequent write transaction
+// has updated the result of the query. Since each read transaction
+// operates on an isolated snapshot, a new read transaction must be
+// started to observe the changes that have been made.
+//
+// If the value of index ends with "_prefix", LastWatch will perform a prefix
+// match instead of full match on the index. The registered indexer must implement
+// PrefixIndexer, otherwise an error is returned.
+func (txn *Txn) LastWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) {
+ // Get the index value
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+
+ // Do an exact lookup
+ if indexSchema.Unique && val != nil && indexSchema.Name == index {
+ watch, obj, ok := indexTxn.GetWatch(val)
+ if !ok {
+ return watch, nil, nil
+ }
+ return watch, obj, nil
+ }
+
+ // Handle non-unique index by using an iterator and getting the last value
+ iter := indexTxn.Root().ReverseIterator()
+ watch := iter.SeekPrefixWatch(val)
+ _, value, _ := iter.Previous()
+ return watch, value, nil
+}
+
+// First is used to return the first matching object for
+// the given constraints on the index.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) {
+ _, val, err := txn.FirstWatch(table, index, args...)
+ return val, err
+}
+
+// Last is used to return the last matching object for
+// the given constraints on the index.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) Last(table, index string, args ...interface{}) (interface{}, error) {
+ _, val, err := txn.LastWatch(table, index, args...)
+ return val, err
+}
+
+// LongestPrefix is used to fetch the longest prefix match for the given
+// constraints on the index. Note that this will not work with the memdb
+// StringFieldIndex because it adds null terminators which prevent the
+// algorithm from correctly finding a match (it will get to right before the
+// null and fail to find a leaf node). This should only be used where the prefix
+// given is capable of matching indexed entries directly, which typically only
+// applies to a custom indexer. See the unit test for an example.
+//
+// Note that all values read in the transaction form a consistent snapshot
+// from the time when the transaction was created.
+func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) {
+ // Enforce that this only works on prefix indexes.
+ if !strings.HasSuffix(index, "_prefix") {
+ return nil, fmt.Errorf("must use '%s_prefix' on index", index)
+ }
+
+ // Get the index value.
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // This algorithm only makes sense against a unique index, otherwise the
+ // index keys will have the IDs appended to them.
+ if !indexSchema.Unique {
+ return nil, fmt.Errorf("index '%s' is not unique", index)
+ }
+
+ // Find the longest prefix match with the given index.
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ if _, value, ok := indexTxn.Root().LongestPrefix(val); ok {
+ return value, nil
+ }
+ return nil, nil
+}
+
+// getIndexValue is used to get the IndexSchema and the value
+// used to scan the index given the parameters. This handles prefix based
+// scans when the index has the "_prefix" suffix. The index must support
+// prefix iteration.
+func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) {
+ // Get the table schema
+ tableSchema, ok := txn.db.schema.Tables[table]
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid table '%s'", table)
+ }
+
+ // Check for a prefix scan
+ prefixScan := false
+ if strings.HasSuffix(index, "_prefix") {
+ index = strings.TrimSuffix(index, "_prefix")
+ prefixScan = true
+ }
+
+ // Get the index schema
+ indexSchema, ok := tableSchema.Indexes[index]
+ if !ok {
+ return nil, nil, fmt.Errorf("invalid index '%s'", index)
+ }
+
+ // Hot-path for when there are no arguments
+ if len(args) == 0 {
+ return indexSchema, nil, nil
+ }
+
+ // Special case the prefix scanning
+ if prefixScan {
+ prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer)
+ if !ok {
+ return indexSchema, nil,
+ fmt.Errorf("index '%s' does not support prefix scanning", index)
+ }
+
+ val, err := prefixIndexer.PrefixFromArgs(args...)
+ if err != nil {
+ return indexSchema, nil, fmt.Errorf("index error: %v", err)
+ }
+ return indexSchema, val, err
+ }
+
+ // Get the exact match index
+ val, err := indexSchema.Indexer.FromArgs(args...)
+ if err != nil {
+ return indexSchema, nil, fmt.Errorf("index error: %v", err)
+ }
+ return indexSchema, val, err
+}
+
+// ResultIterator is used to iterate over a list of results from a query on a table.
+//
+// When a ResultIterator is created from a write transaction, the results from
+// Next will reflect a snapshot of the table at the time the ResultIterator is
+// created.
+// This means that calling Insert or Delete on a transaction while iterating is
+// allowed, but the changes made by Insert or Delete will not be observed in the
+// results returned from subsequent calls to Next. For example if an item is deleted
+// from the index used by the iterator it will still be returned by Next. If an
+// item is inserted into the index used by the iterator, it will not be returned
+// by Next. However, an iterator created after a call to Insert or Delete will
+// reflect the modifications.
+//
+// When a ResultIterator is created from a write transaction, and there are already
+// modifications to the index used by the iterator, the modification cache of the
+// index will be invalidated. This may result in some additional allocations if
+// the same node in the index is modified again.
+type ResultIterator interface {
+ WatchCh() <-chan struct{}
+ // Next returns the next result from the iterator. If there are no more results
+ // nil is returned.
+ Next() interface{}
+}
+
+// Get is used to construct a ResultIterator over all the rows that match the
+// given constraints of an index. The index values must match exactly (this
+// is not a range-based or prefix-based lookup) by default.
+//
+// Prefix lookups: if the named index implements PrefixIndexer, you may perform
+// prefix-based lookups by appending "_prefix" to the index name. In this
+// scenario, the index values given in args are treated as prefix lookups. For
+// example, a StringFieldIndex will match any string with the given value
+// as a prefix: "mem" matches "memdb".
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIterator(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ watchCh := indexIter.SeekPrefixWatch(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ watchCh: watchCh,
+ }
+ return iter, nil
+}
+
+// GetReverse is used to construct a Reverse ResultIterator over all the
+// rows that match the given constraints of an index.
+// The returned ResultIterator's Next() will return the next Previous value.
+//
+// See the documentation on Get for details on arguments.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) GetReverse(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ watchCh := indexIter.SeekPrefixWatch(val)
+
+ // Create an iterator
+ iter := &radixReverseIterator{
+ iter: indexIter,
+ watchCh: watchCh,
+ }
+ return iter, nil
+}
+
+// LowerBound is used to construct a ResultIterator over all the the range of
+// rows that have an index value greater than or equal to the provide args.
+// Calling this then iterating until the rows are larger than required allows
+// range scans within an index. It is not possible to watch the resulting
+// iterator since the radix tree doesn't efficiently allow watching on lower
+// bound changes. The WatchCh returned will be nill and so will block forever.
+//
+// If the value of index ends with "_prefix", LowerBound will perform a prefix match instead of
+// a full match on the index. The registered index must implement PrefixIndexer,
+// otherwise an error is returned.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIterator(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekLowerBound(val)
+
+ // Create an iterator
+ iter := &radixIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+// ReverseLowerBound is used to construct a Reverse ResultIterator over all the
+// the range of rows that have an index value less than or equal to the
+// provide args. Calling this then iterating until the rows are lower than
+// required allows range scans within an index. It is not possible to watch the
+// resulting iterator since the radix tree doesn't efficiently allow watching
+// on lower bound changes. The WatchCh returned will be nill and so will block
+// forever.
+//
+// See the documentation for ResultIterator to understand the behaviour of the
+// returned ResultIterator.
+func (txn *Txn) ReverseLowerBound(table, index string, args ...interface{}) (ResultIterator, error) {
+ indexIter, val, err := txn.getIndexIteratorReverse(table, index, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ // Seek the iterator to the appropriate sub-set
+ indexIter.SeekReverseLowerBound(val)
+
+ // Create an iterator
+ iter := &radixReverseIterator{
+ iter: indexIter,
+ }
+ return iter, nil
+}
+
+// objectID is a tuple of table name and the raw internal id byte slice
+// converted to a string. It's only converted to a string to make it comparable
+// so this struct can be used as a map index.
+type objectID struct {
+ Table string
+ IndexVal string
+}
+
+// mutInfo stores metadata about mutations to allow collapsing multiple
+// mutations to the same object into one.
+type mutInfo struct {
+ firstBefore interface{}
+ lastIdx int
+}
+
+// Changes returns the set of object changes that have been made in the
+// transaction so far. If change tracking is not enabled it wil always return
+// nil. It can be called before or after Commit. If it is before Commit it will
+// return all changes made so far which may not be the same as the final
+// Changes. After abort it will always return nil. As with other Txn methods
+// it's not safe to call this from a different goroutine than the one making
+// mutations or committing the transaction. Mutations will appear in the order
+// they were performed in the transaction but multiple operations to the same
+// object will be collapsed so only the effective overall change to that object
+// is present. If transaction operations are dependent (e.g. copy object X to Y
+// then delete X) this might mean the set of mutations is incomplete to verify
+// history, but it is complete in that the net effect is preserved (Y got a new
+// value, X got removed).
+func (txn *Txn) Changes() Changes {
+ if txn.changes == nil {
+ return nil
+ }
+
+ // De-duplicate mutations by key so all take effect at the point of the last
+ // write but we keep the mutations in order.
+ dups := make(map[objectID]mutInfo)
+ for i, m := range txn.changes {
+ oid := objectID{
+ Table: m.Table,
+ IndexVal: string(m.primaryKey),
+ }
+ // Store the latest mutation index for each key value
+ mi, ok := dups[oid]
+ if !ok {
+ // First entry for key, store the before value
+ mi.firstBefore = m.Before
+ }
+ mi.lastIdx = i
+ dups[oid] = mi
+ }
+ if len(dups) == len(txn.changes) {
+ // No duplicates found, fast path return it as is
+ return txn.changes
+ }
+
+ // Need to remove the duplicates
+ cs := make(Changes, 0, len(dups))
+ for i, m := range txn.changes {
+ oid := objectID{
+ Table: m.Table,
+ IndexVal: string(m.primaryKey),
+ }
+ mi := dups[oid]
+ if mi.lastIdx == i {
+ // This was the latest value for this key copy it with the before value in
+ // case it's different. Note that m is not a pointer so we are not
+ // modifying the txn.changeSet here - it's already a copy.
+ m.Before = mi.firstBefore
+
+ // Edge case - if the object was inserted and then eventually deleted in
+ // the same transaction, then the net affect on that key is a no-op. Don't
+ // emit a mutation with nil for before and after as it's meaningless and
+ // might violate expectations and cause a panic in code that assumes at
+ // least one must be set.
+ if m.Before == nil && m.After == nil {
+ continue
+ }
+ cs = append(cs, m)
+ }
+ }
+ // Store the de-duped version in case this is called again
+ txn.changes = cs
+ return cs
+}
+
+func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*iradix.Iterator, []byte, error) {
+ // Get the index value to scan
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ indexRoot := indexTxn.Root()
+
+ // Get an iterator over the index
+ indexIter := indexRoot.Iterator()
+ return indexIter, val, nil
+}
+
+func (txn *Txn) getIndexIteratorReverse(table, index string, args ...interface{}) (*iradix.ReverseIterator, []byte, error) {
+ // Get the index value to scan
+ indexSchema, val, err := txn.getIndexValue(table, index, args...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Get the index itself
+ indexTxn := txn.readableIndex(table, indexSchema.Name)
+ indexRoot := indexTxn.Root()
+
+ // Get an interator over the index
+ indexIter := indexRoot.ReverseIterator()
+ return indexIter, val, nil
+}
+
+// Defer is used to push a new arbitrary function onto a stack which
+// gets called when a transaction is committed and finished. Deferred
+// functions are called in LIFO order, and only invoked at the end of
+// write transactions.
+func (txn *Txn) Defer(fn func()) {
+ txn.after = append(txn.after, fn)
+}
+
+// radixIterator is used to wrap an underlying iradix iterator.
+// This is much more efficient than a sliceIterator as we are not
+// materializing the entire view.
+type radixIterator struct {
+ iter *iradix.Iterator
+ watchCh <-chan struct{}
+}
+
+func (r *radixIterator) WatchCh() <-chan struct{} {
+ return r.watchCh
+}
+
+func (r *radixIterator) Next() interface{} {
+ _, value, ok := r.iter.Next()
+ if !ok {
+ return nil
+ }
+ return value
+}
+
+type radixReverseIterator struct {
+ iter *iradix.ReverseIterator
+ watchCh <-chan struct{}
+}
+
+func (r *radixReverseIterator) Next() interface{} {
+ _, value, ok := r.iter.Previous()
+ if !ok {
+ return nil
+ }
+ return value
+}
+
+func (r *radixReverseIterator) WatchCh() <-chan struct{} {
+ return r.watchCh
+}
+
+// Snapshot creates a snapshot of the current state of the transaction.
+// Returns a new read-only transaction or nil if the transaction is already
+// aborted or committed.
+func (txn *Txn) Snapshot() *Txn {
+ if txn.rootTxn == nil {
+ return nil
+ }
+
+ snapshot := &Txn{
+ db: txn.db,
+ rootTxn: txn.rootTxn.Clone(),
+ }
+
+ // Commit sub-transactions into the snapshot
+ for key, subTxn := range txn.modified {
+ path := indexPath(key.Table, key.Index)
+ final := subTxn.CommitOnly()
+ snapshot.rootTxn.Insert(path, final)
+ }
+
+ return snapshot
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/hashicorp/go-memdb/watch.go
new file mode 100644
index 0000000..4d9cc7e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/watch.go
@@ -0,0 +1,155 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+import (
+ "context"
+ "time"
+)
+
+// WatchSet is a collection of watch channels. The zero value is not usable.
+// Use NewWatchSet to create a WatchSet.
+type WatchSet map[<-chan struct{}]struct{}
+
+// NewWatchSet constructs a new watch set.
+func NewWatchSet() WatchSet {
+ return make(map[<-chan struct{}]struct{})
+}
+
+// Add appends a watchCh to the WatchSet if non-nil.
+func (w WatchSet) Add(watchCh <-chan struct{}) {
+ if w == nil {
+ return
+ }
+
+ if _, ok := w[watchCh]; !ok {
+ w[watchCh] = struct{}{}
+ }
+}
+
+// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given
+// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate
+// channel. It's expected that the altCh will be the same on many calls to this
+// function, so you will exceed the soft limit a little bit if you hit this, but
+// not by much.
+//
+// This is useful if you want to track individual items up to some limit, after
+// which you watch a higher-level channel (usually a channel from start of
+// an iterator higher up in the radix tree) that will watch a superset of items.
+func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) {
+ // This is safe for a nil WatchSet so we don't need to check that here.
+ if len(w) < softLimit {
+ w.Add(watchCh)
+ } else {
+ w.Add(altCh)
+ }
+}
+
+// Watch blocks until one of the channels in the watch set is closed, or
+// timeoutCh sends a value.
+// Returns true if timeoutCh is what caused Watch to unblock.
+func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool {
+ if w == nil {
+ return false
+ }
+
+ // Create a context that gets cancelled when the timeout is triggered
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ go func() {
+ select {
+ case <-timeoutCh:
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+
+ return w.WatchCtx(ctx) == context.Canceled
+}
+
+// WatchCtx blocks until one of the channels in the watch set is closed, or
+// ctx is done (cancelled or exceeds the deadline). WatchCtx returns an error
+// if the ctx causes it to unblock, otherwise returns nil.
+//
+// WatchCtx should be preferred over Watch.
+func (w WatchSet) WatchCtx(ctx context.Context) error {
+ if w == nil {
+ return nil
+ }
+
+ if n := len(w); n <= aFew {
+ idx := 0
+ chunk := make([]<-chan struct{}, aFew)
+ for watchCh := range w {
+ chunk[idx] = watchCh
+ idx++
+ }
+ return watchFew(ctx, chunk)
+ }
+
+ return w.watchMany(ctx)
+}
+
+// watchMany is used if there are many watchers.
+func (w WatchSet) watchMany(ctx context.Context) error {
+ // Cancel all watcher goroutines when return.
+ watcherCtx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // Set up a goroutine for each watcher.
+ triggerCh := make(chan struct{}, 1)
+ watcher := func(chunk []<-chan struct{}) {
+ if err := watchFew(watcherCtx, chunk); err == nil {
+ select {
+ case triggerCh <- struct{}{}:
+ default:
+ }
+ }
+ }
+
+ // Apportion the watch channels into chunks we can feed into the
+ // watchFew helper.
+ idx := 0
+ chunk := make([]<-chan struct{}, aFew)
+ for watchCh := range w {
+ subIdx := idx % aFew
+ chunk[subIdx] = watchCh
+ idx++
+
+ // Fire off this chunk and start a fresh one.
+ if idx%aFew == 0 {
+ go watcher(chunk)
+ chunk = make([]<-chan struct{}, aFew)
+ }
+ }
+
+ // Make sure to watch any residual channels in the last chunk.
+ if idx%aFew != 0 {
+ go watcher(chunk)
+ }
+
+ // Wait for a channel to trigger or timeout.
+ select {
+ case <-triggerCh:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+// WatchCh returns a channel that is used to wait for any channel of the watch set to trigger
+// or for the context to be cancelled. WatchCh creates a new goroutine each call, so
+// callers may need to cache the returned channel to avoid creating extra goroutines.
+func (w WatchSet) WatchCh(ctx context.Context) <-chan error {
+ // Create the outgoing channel
+ triggerCh := make(chan error, 1)
+
+ // Create a goroutine to collect the error from WatchCtx
+ go func() {
+ triggerCh <- w.WatchCtx(ctx)
+ }()
+
+ return triggerCh
+}
diff --git a/vendor/github.com/hashicorp/go-memdb/watch_few.go b/vendor/github.com/hashicorp/go-memdb/watch_few.go
new file mode 100644
index 0000000..ccdbff0
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-memdb/watch_few.go
@@ -0,0 +1,120 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: MPL-2.0
+
+package memdb
+
+//go:generate sh -c "go run watch-gen/main.go >watch_few.go"
+
+import (
+ "context"
+)
+
+// aFew gives how many watchers this function is wired to support. You must
+// always pass a full slice of this length, but unused channels can be nil.
+const aFew = 32
+
+// watchFew is used if there are only a few watchers as a performance
+// optimization.
+func watchFew(ctx context.Context, ch []<-chan struct{}) error {
+ select {
+
+ case <-ch[0]:
+ return nil
+
+ case <-ch[1]:
+ return nil
+
+ case <-ch[2]:
+ return nil
+
+ case <-ch[3]:
+ return nil
+
+ case <-ch[4]:
+ return nil
+
+ case <-ch[5]:
+ return nil
+
+ case <-ch[6]:
+ return nil
+
+ case <-ch[7]:
+ return nil
+
+ case <-ch[8]:
+ return nil
+
+ case <-ch[9]:
+ return nil
+
+ case <-ch[10]:
+ return nil
+
+ case <-ch[11]:
+ return nil
+
+ case <-ch[12]:
+ return nil
+
+ case <-ch[13]:
+ return nil
+
+ case <-ch[14]:
+ return nil
+
+ case <-ch[15]:
+ return nil
+
+ case <-ch[16]:
+ return nil
+
+ case <-ch[17]:
+ return nil
+
+ case <-ch[18]:
+ return nil
+
+ case <-ch[19]:
+ return nil
+
+ case <-ch[20]:
+ return nil
+
+ case <-ch[21]:
+ return nil
+
+ case <-ch[22]:
+ return nil
+
+ case <-ch[23]:
+ return nil
+
+ case <-ch[24]:
+ return nil
+
+ case <-ch[25]:
+ return nil
+
+ case <-ch[26]:
+ return nil
+
+ case <-ch[27]:
+ return nil
+
+ case <-ch[28]:
+ return nil
+
+ case <-ch[29]:
+ return nil
+
+ case <-ch[30]:
+ return nil
+
+ case <-ch[31]:
+ return nil
+
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
new file mode 100644
index 0000000..0e5d580
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/LICENSE
@@ -0,0 +1,364 @@
+Copyright (c) 2014 HashiCorp, Inc.
+
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
new file mode 100644
index 0000000..9233583
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
@@ -0,0 +1,177 @@
+package simplelru
+
+import (
+ "container/list"
+ "errors"
+)
+
+// EvictCallback is used to get a callback when a cache entry is evicted
+type EvictCallback func(key interface{}, value interface{})
+
+// LRU implements a non-thread safe fixed size LRU cache
+type LRU struct {
+ size int
+ evictList *list.List
+ items map[interface{}]*list.Element
+ onEvict EvictCallback
+}
+
+// entry is used to hold a value in the evictList
+type entry struct {
+ key interface{}
+ value interface{}
+}
+
+// NewLRU constructs an LRU of the given size
+func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
+ if size <= 0 {
+ return nil, errors.New("must provide a positive size")
+ }
+ c := &LRU{
+ size: size,
+ evictList: list.New(),
+ items: make(map[interface{}]*list.Element),
+ onEvict: onEvict,
+ }
+ return c, nil
+}
+
+// Purge is used to completely clear the cache.
+func (c *LRU) Purge() {
+ for k, v := range c.items {
+ if c.onEvict != nil {
+ c.onEvict(k, v.Value.(*entry).value)
+ }
+ delete(c.items, k)
+ }
+ c.evictList.Init()
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+func (c *LRU) Add(key, value interface{}) (evicted bool) {
+ // Check for existing item
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ ent.Value.(*entry).value = value
+ return false
+ }
+
+ // Add new item
+ ent := &entry{key, value}
+ entry := c.evictList.PushFront(ent)
+ c.items[key] = entry
+
+ evict := c.evictList.Len() > c.size
+ // Verify size not exceeded
+ if evict {
+ c.removeOldest()
+ }
+ return evict
+}
+
+// Get looks up a key's value from the cache.
+func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
+ if ent, ok := c.items[key]; ok {
+ c.evictList.MoveToFront(ent)
+ if ent.Value.(*entry) == nil {
+ return nil, false
+ }
+ return ent.Value.(*entry).value, true
+ }
+ return
+}
+
+// Contains checks if a key is in the cache, without updating the recent-ness
+// or deleting it for being stale.
+func (c *LRU) Contains(key interface{}) (ok bool) {
+ _, ok = c.items[key]
+ return ok
+}
+
+// Peek returns the key value (or undefined if not found) without updating
+// the "recently used"-ness of the key.
+func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
+ var ent *list.Element
+ if ent, ok = c.items[key]; ok {
+ return ent.Value.(*entry).value, true
+ }
+ return nil, ok
+}
+
+// Remove removes the provided key from the cache, returning if the
+// key was contained.
+func (c *LRU) Remove(key interface{}) (present bool) {
+ if ent, ok := c.items[key]; ok {
+ c.removeElement(ent)
+ return true
+ }
+ return false
+}
+
+// RemoveOldest removes the oldest item from the cache.
+func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// GetOldest returns the oldest entry
+func (c *LRU) GetOldest() (key, value interface{}, ok bool) {
+ ent := c.evictList.Back()
+ if ent != nil {
+ kv := ent.Value.(*entry)
+ return kv.key, kv.value, true
+ }
+ return nil, nil, false
+}
+
+// Keys returns a slice of the keys in the cache, from oldest to newest.
+func (c *LRU) Keys() []interface{} {
+ keys := make([]interface{}, len(c.items))
+ i := 0
+ for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
+ keys[i] = ent.Value.(*entry).key
+ i++
+ }
+ return keys
+}
+
+// Len returns the number of items in the cache.
+func (c *LRU) Len() int {
+ return c.evictList.Len()
+}
+
+// Resize changes the cache size.
+func (c *LRU) Resize(size int) (evicted int) {
+ diff := c.Len() - size
+ if diff < 0 {
+ diff = 0
+ }
+ for i := 0; i < diff; i++ {
+ c.removeOldest()
+ }
+ c.size = size
+ return diff
+}
+
+// removeOldest removes the oldest item from the cache.
+func (c *LRU) removeOldest() {
+ ent := c.evictList.Back()
+ if ent != nil {
+ c.removeElement(ent)
+ }
+}
+
+// removeElement is used to remove a given list element from the cache
+func (c *LRU) removeElement(e *list.Element) {
+ c.evictList.Remove(e)
+ kv := e.Value.(*entry)
+ delete(c.items, kv.key)
+ if c.onEvict != nil {
+ c.onEvict(kv.key, kv.value)
+ }
+}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
new file mode 100644
index 0000000..cb7f8ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
@@ -0,0 +1,40 @@
+// Package simplelru provides simple LRU implementation based on build-in container/list.
+package simplelru
+
+// LRUCache is the interface for simple LRU cache.
+type LRUCache interface {
+ // Adds a value to the cache, returns true if an eviction occurred and
+ // updates the "recently used"-ness of the key.
+ Add(key, value interface{}) bool
+
+ // Returns key's value from the cache and
+ // updates the "recently used"-ness of the key. #value, isFound
+ Get(key interface{}) (value interface{}, ok bool)
+
+ // Checks if a key exists in cache without updating the recent-ness.
+ Contains(key interface{}) (ok bool)
+
+ // Returns key's value without updating the "recently used"-ness of the key.
+ Peek(key interface{}) (value interface{}, ok bool)
+
+ // Removes a key from the cache.
+ Remove(key interface{}) bool
+
+ // Removes the oldest entry from cache.
+ RemoveOldest() (interface{}, interface{}, bool)
+
+ // Returns the oldest entry from the cache. #key, value, isFound
+ GetOldest() (interface{}, interface{}, bool)
+
+ // Returns a slice of the keys in the cache, from oldest to newest.
+ Keys() []interface{}
+
+ // Returns the number of items in the cache.
+ Len() int
+
+ // Clears all cache entries.
+ Purge()
+
+ // Resizes cache, returning number evicted
+ Resize(int) int
+}
diff --git a/vendor/github.com/hashicorp/hcl/.gitignore b/vendor/github.com/hashicorp/hcl/.gitignore
new file mode 100644
index 0000000..15586a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.gitignore
@@ -0,0 +1,9 @@
+y.output
+
+# ignore intellij files
+.idea
+*.iml
+*.ipr
+*.iws
+
+*.test
diff --git a/vendor/github.com/hashicorp/hcl/.travis.yml b/vendor/github.com/hashicorp/hcl/.travis.yml
new file mode 100644
index 0000000..cb63a32
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/.travis.yml
@@ -0,0 +1,13 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.x
+ - tip
+
+branches:
+ only:
+ - master
+
+script: make test
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+ means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of version
+ 1.1 or earlier of the License, but not also under the terms of a
+ Secondary License.
+
+1.6. “Executable Form”
+
+ means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+ means a work that combines Covered Software with other material, in a separate
+ file or files, that is not Covered Software.
+
+1.8. “License”
+
+ means this document.
+
+1.9. “Licensable”
+
+ means having the right to grant, to the maximum extent possible, whether at the
+ time of the initial grant or subsequently, any and all of the rights conveyed by
+ this License.
+
+1.10. “Modifications”
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to, deletion
+ from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+ means any patent claim(s), including without limitation, method, process,
+ and apparatus claims, in any patent Licensable by such Contributor that
+ would be infringed, but for the grant of the License, by the making,
+ using, selling, offering for sale, having made, import, or transfer of
+ either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+ means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, “You” includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, “control” means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or as
+ part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its Contributions
+ or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution become
+ effective for each Contribution on the date the Contributor first distributes
+ such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under this
+ License. No additional rights or licenses will be implied from the distribution
+ or licensing of Covered Software under this License. Notwithstanding Section
+ 2.1(b) above, no patent license is granted by a Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party’s
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of its
+ Contributions.
+
+ This License does not grant any rights in the trademarks, service marks, or
+ logos of any Contributor (except as may be necessary to comply with the
+ notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this License
+ (see Section 10.2) or under the terms of a Secondary License (if permitted
+ under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its Contributions
+ are its original creation(s) or it has sufficient rights to grant the
+ rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under applicable
+ copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under the
+ terms of this License. You must inform recipients that the Source Code Form
+ of the Covered Software is governed by the terms of this License, and how
+ they can obtain a copy of this License. You may not attempt to alter or
+ restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this License,
+ or sublicense it under different terms, provided that the license for
+ the Executable Form does not attempt to limit or alter the recipients’
+ rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for the
+ Covered Software. If the Larger Work is a combination of Covered Software
+ with a work governed by one or more Secondary Licenses, and the Covered
+ Software is not Incompatible With Secondary Licenses, this License permits
+ You to additionally distribute such Covered Software under the terms of
+ such Secondary License(s), so that the recipient of the Larger Work may, at
+ their option, further distribute the Covered Software under the terms of
+ either this License or such Secondary License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices (including
+ copyright notices, patent notices, disclaimers of warranty, or limitations
+ of liability) contained within the Source Code Form of the Covered
+ Software, except that You may alter any license notices to the extent
+ required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on behalf
+ of any Contributor. You must make it absolutely clear that any such
+ warranty, support, indemnity, or liability obligation is offered by You
+ alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute, judicial
+ order, or regulation then You must: (a) comply with the terms of this License
+ to the maximum extent possible; and (b) describe the limitations and the code
+ they affect. Such description must be placed in a text file included with all
+ distributions of the Covered Software under this License. Except to the
+ extent prohibited by statute or regulation, such description must be
+ sufficiently detailed for a recipient of ordinary skill to be able to
+ understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+ if such Contributor fails to notify You of the non-compliance by some
+ reasonable means prior to 60 days after You have come back into compliance.
+ Moreover, Your grants from a particular Contributor are reinstated on an
+ ongoing basis if such Contributor notifies You of the non-compliance by
+ some reasonable means, this is the first time You have received notice of
+ non-compliance with this License from such Contributor, and You become
+ compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions, counter-claims,
+ and cross-claims) alleging that a Contributor Version directly or
+ indirectly infringes any patent, then the rights granted to You by any and
+ all Contributors for the Covered Software under Section 2.1 of this License
+ shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an “as is” basis, without
+ warranty of any kind, either expressed, implied, or statutory, including,
+ without limitation, warranties that the Covered Software is free of defects,
+ merchantable, fit for a particular purpose or non-infringing. The entire
+ risk as to the quality and performance of the Covered Software is with You.
+ Should any Covered Software prove defective in any respect, You (not any
+ Contributor) assume the cost of any necessary servicing, repair, or
+ correction. This disclaimer of warranty constitutes an essential part of this
+ License. No use of any Covered Software is authorized under this License
+ except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from such
+ party’s negligence to the extent applicable law prohibits such limitation.
+ Some jurisdictions do not allow the exclusion or limitation of incidental or
+ consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts of
+ a jurisdiction where the defendant maintains its principal place of business
+ and such litigation shall be governed by laws of that jurisdiction, without
+ reference to its conflict-of-law provisions. Nothing in this Section shall
+ prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject matter
+ hereof. If any provision of this License is held to be unenforceable, such
+ provision shall be reformed only to the extent necessary to make it
+ enforceable. Any law or regulation which provides that the language of a
+ contract shall be construed against the drafter shall not be used to construe
+ this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version of
+ the License under which You originally received the Covered Software, or
+ under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a modified
+ version of this License if you rename the license and remove any
+ references to the name of the license steward (except to note that such
+ modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+ If You choose to distribute Source Code Form that is Incompatible With
+ Secondary Licenses under the terms of this version of the License, the
+ notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+ This Source Code Form is “Incompatible
+ With Secondary Licenses”, as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile
new file mode 100644
index 0000000..84fd743
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/Makefile
@@ -0,0 +1,18 @@
+TEST?=./...
+
+default: test
+
+fmt: generate
+ go fmt ./...
+
+test: generate
+ go get -t ./...
+ go test $(TEST) $(TESTARGS)
+
+generate:
+ go generate ./...
+
+updatedeps:
+ go get -u golang.org/x/tools/cmd/stringer
+
+.PHONY: default generate test updatedeps
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644
index 0000000..c822332
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/README.md
@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+ * Single line comments start with `#` or `//`
+
+ * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+ are not allowed. A multi-line comment (also known as a block comment)
+ terminates at the first `*/` found.
+
+ * Values are assigned with the syntax `key = value` (whitespace doesn't
+ matter). The value can be any primitive: a string, number, boolean,
+ object, or list.
+
+ * Strings are double-quoted and can contain any UTF-8 characters.
+ Example: `"Hello, World"`
+
+ * Multi-line strings start with `<<EOF` at the end of a line, and end
+ with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
+ Any text may be used in place of `EOF`. Example:
+```
+<<FOO
+hello
+world
+FOO
+```
+
+ * Numbers are assumed to be base 10. If you prefix a number with 0x,
+ it is treated as a hexadecimal. If it is prefixed with 0, it is
+ treated as an octal. Numbers can be in scientific notation: "1e10".
+
+ * Boolean values: `true`, `false`
+
+ * Arrays can be made by wrapping it in `[]`. Example:
+ `["foo", "bar", 42]`. Arrays can contain primitives,
+ other arrays, and objects. As an alternative, lists
+ of objects can be created with repeated blocks, using
+ this structure:
+
+ ```hcl
+ service {
+ key = "value"
+ }
+
+ service {
+ key = "value"
+ }
+ ```
+
+Objects and nested objects are created using the structure shown below:
+
+```
+variable "ami" {
+ description = "the AMI to use"
+}
+```
+This would be equivalent to the following json:
+``` json
+{
+ "variable": {
+ "ami": {
+ "description": "the AMI to use"
+ }
+ }
+}
+```
+
+## Thanks
+
+Thanks to:
+
+ * [@vstakhov](https://github.com/vstakhov) - The original libucl parser
+ and syntax that HCL was based off of.
+
+ * [@fatih](https://github.com/fatih) - The rewritten HCL parser
+ in pure Go (no goyacc) and support for a printer.
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml
new file mode 100644
index 0000000..4db0b71
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/appveyor.yml
@@ -0,0 +1,19 @@
+version: "build-{branch}-{build}"
+image: Visual Studio 2015
+clone_folder: c:\gopath\src\github.com\hashicorp\hcl
+environment:
+ GOPATH: c:\gopath
+init:
+ - git config --global core.autocrlf false
+install:
+- cmd: >-
+ echo %Path%
+
+ go version
+
+ go env
+
+ go get -t ./...
+
+build_script:
+- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644
index 0000000..bed9ebb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -0,0 +1,729 @@
+package hcl
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+ // nodeType holds a reference to the type of ast.Node
+ nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+ root, err := parse(bs)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+ obj, err := Parse(in)
+ if err != nil {
+ return err
+ }
+
+ return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+ val := reflect.ValueOf(out)
+ if val.Kind() != reflect.Ptr {
+ return errors.New("result must be a pointer")
+ }
+
+ // If we have the file, we really decode the root node
+ if f, ok := n.(*ast.File); ok {
+ n = f.Node
+ }
+
+ var d decoder
+ return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+ stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+ k := result
+
+ // If we have an interface with a valid value, we use that
+ // for the check.
+ if result.Kind() == reflect.Interface {
+ elem := result.Elem()
+ if elem.IsValid() {
+ k = elem
+ }
+ }
+
+ // Push current onto stack unless it is an interface.
+ if k.Kind() != reflect.Interface {
+ d.stack = append(d.stack, k.Kind())
+
+ // Schedule a pop
+ defer func() {
+ d.stack = d.stack[:len(d.stack)-1]
+ }()
+ }
+
+ switch k.Kind() {
+ case reflect.Bool:
+ return d.decodeBool(name, node, result)
+ case reflect.Float32, reflect.Float64:
+ return d.decodeFloat(name, node, result)
+ case reflect.Int, reflect.Int32, reflect.Int64:
+ return d.decodeInt(name, node, result)
+ case reflect.Interface:
+ // When we see an interface, we make our own thing
+ return d.decodeInterface(name, node, result)
+ case reflect.Map:
+ return d.decodeMap(name, node, result)
+ case reflect.Ptr:
+ return d.decodePtr(name, node, result)
+ case reflect.Slice:
+ return d.decodeSlice(name, node, result)
+ case reflect.String:
+ return d.decodeString(name, node, result)
+ case reflect.Struct:
+ return d.decodeStruct(name, node, result)
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+ }
+ }
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.BOOL {
+ v, err := strconv.ParseBool(n.Token.Text)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
+ v, err := strconv.ParseFloat(n.Token.Text, 64)
+ if err != nil {
+ return err
+ }
+
+ result.Set(reflect.ValueOf(v).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ case token.STRING:
+ v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+ if err != nil {
+ return err
+ }
+
+ if result.Kind() == reflect.Interface {
+ result.Set(reflect.ValueOf(int(v)))
+ } else {
+ result.SetInt(v)
+ }
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type %T", name, node),
+ }
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+ // When we see an ast.Node, we retain the value to enable deferred decoding.
+ // Very useful in situations where we want to preserve ast.Node information
+ // like Pos
+ if result.Type() == nodeType && result.CanSet() {
+ result.Set(reflect.ValueOf(node))
+ return nil
+ }
+
+ var set reflect.Value
+ redecode := true
+
+ // For testing types, ObjectType should just be treated as a list. We
+ // set this to a temporary var because we want to pass in the real node.
+ testNode := node
+ if ot, ok := node.(*ast.ObjectType); ok {
+ testNode = ot.List
+ }
+
+ switch n := testNode.(type) {
+ case *ast.ObjectList:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+ set = result
+ }
+ case *ast.ObjectType:
+ // If we're at the root or we're directly within a slice, then we
+ // decode objects into map[string]interface{}, otherwise we decode
+ // them into lists.
+ if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+ var temp map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeMap(
+ reflect.MapOf(
+ reflect.TypeOf(""),
+ tempVal.Type().Elem()))
+
+ set = result
+ } else {
+ var temp []map[string]interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+ set = result
+ }
+ case *ast.ListType:
+ var temp []interface{}
+ tempVal := reflect.ValueOf(temp)
+ result := reflect.MakeSlice(
+ reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+ set = result
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.BOOL:
+ var result bool
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.FLOAT:
+ var result float64
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.NUMBER:
+ var result int
+ set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+ case token.STRING, token.HEREDOC:
+ set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+ }
+ }
+ default:
+ return fmt.Errorf(
+ "%s: cannot decode into interface: %T",
+ name, node)
+ }
+
+ // Set the result to what its supposed to be, then reset
+ // result so we don't reflect into this method anymore.
+ result.Set(set)
+
+ if redecode {
+ // Revisit the node so that we can use the newly instantiated
+ // thing and populate it.
+ if err := d.decode(name, node, result); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+ if item, ok := node.(*ast.ObjectItem); ok {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ n, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+ }
+ }
+
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ resultKeyType := resultType.Key()
+ if resultKeyType.Kind() != reflect.String {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Make a map if it is nil
+ resultMap := result
+ if result.IsNil() {
+ resultMap = reflect.MakeMap(
+ reflect.MapOf(resultKeyType, resultElemType))
+ }
+
+ // Go through each element and decode it.
+ done := make(map[string]struct{})
+ for _, item := range n.Items {
+ if item.Val == nil {
+ continue
+ }
+
+ // github.com/hashicorp/terraform/issue/5740
+ if len(item.Keys) == 0 {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: map must have string keys", name),
+ }
+ }
+
+ // Get the key we're dealing with, which is the first item
+ keyStr := item.Keys[0].Token.Value().(string)
+
+ // If we've already processed this key, then ignore it
+ if _, ok := done[keyStr]; ok {
+ continue
+ }
+
+ // Determine the value. If we have more than one key, then we
+ // get the objectlist of only these keys.
+ itemVal := item.Val
+ if len(item.Keys) > 1 {
+ itemVal = n.Filter(keyStr)
+ done[keyStr] = struct{}{}
+ }
+
+ // Make the field name
+ fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+ // Get the key/value as reflection values
+ key := reflect.ValueOf(keyStr)
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // If we have a pre-existing value in the map, use that
+ oldVal := resultMap.MapIndex(key)
+ if oldVal.IsValid() {
+ val.Set(oldVal)
+ }
+
+ // Decode!
+ if err := d.decode(fieldName, itemVal, val); err != nil {
+ return err
+ }
+
+ // Set the value on the map
+ resultMap.SetMapIndex(key, val)
+ }
+
+ // Set the final map if we can
+ set.Set(resultMap)
+ return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+ // Create an element of the concrete (non pointer) type and decode
+ // into that. Then set the value of the pointer to this type.
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ val := reflect.New(resultElemType)
+ if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+ return err
+ }
+
+ result.Set(val)
+ return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+ // If we have an interface, then we can address the interface,
+ // but not the slice itself, so get the element but set the interface
+ set := result
+ if result.Kind() == reflect.Interface {
+ result = result.Elem()
+ }
+ // Create the slice if it isn't nil
+ resultType := result.Type()
+ resultElemType := resultType.Elem()
+ if result.IsNil() {
+ resultSliceType := reflect.SliceOf(resultElemType)
+ result = reflect.MakeSlice(
+ resultSliceType, 0, 0)
+ }
+
+ // Figure out the items we'll be copying into the slice
+ var items []ast.Node
+ switch n := node.(type) {
+ case *ast.ObjectList:
+ items = make([]ast.Node, len(n.Items))
+ for i, item := range n.Items {
+ items[i] = item
+ }
+ case *ast.ObjectType:
+ items = []ast.Node{n}
+ case *ast.ListType:
+ items = n.List
+ default:
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("unknown slice type: %T", node),
+ }
+ }
+
+ for i, item := range items {
+ fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+ // Decode
+ val := reflect.Indirect(reflect.New(resultElemType))
+
+ // if item is an object that was decoded from ambiguous JSON and
+ // flattened, make sure it's expanded if it needs to decode into a
+ // defined structure.
+ item := expandObject(item, val)
+
+ if err := d.decode(fieldName, item, val); err != nil {
+ return err
+ }
+
+ // Append it onto the slice
+ result = reflect.Append(result, val)
+ }
+
+ set.Set(result)
+ return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+ item, ok := node.(*ast.ObjectItem)
+ if !ok {
+ return node
+ }
+
+ elemType := result.Type()
+
+ // our target type must be a struct
+ switch elemType.Kind() {
+ case reflect.Ptr:
+ switch elemType.Elem().Kind() {
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+ case reflect.Struct:
+ //OK
+ default:
+ return node
+ }
+
+ // A list value will have a key and field name. If it had more fields,
+ // it wouldn't have been flattened.
+ if len(item.Keys) != 2 {
+ return node
+ }
+
+ keyToken := item.Keys[0].Token
+ item.Keys = item.Keys[1:]
+
+ // we need to un-flatten the ast enough to decode
+ newNode := &ast.ObjectItem{
+ Keys: []*ast.ObjectKey{
+ &ast.ObjectKey{
+ Token: keyToken,
+ },
+ },
+ Val: &ast.ObjectType{
+ List: &ast.ObjectList{
+ Items: []*ast.ObjectItem{item},
+ },
+ },
+ }
+
+ return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+ switch n := node.(type) {
+ case *ast.LiteralType:
+ switch n.Token.Type {
+ case token.NUMBER:
+ result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+ return nil
+ case token.STRING, token.HEREDOC:
+ result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+ return nil
+ }
+ }
+
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+ }
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+ var item *ast.ObjectItem
+ if it, ok := node.(*ast.ObjectItem); ok {
+ item = it
+ node = it.Val
+ }
+
+ if ot, ok := node.(*ast.ObjectType); ok {
+ node = ot.List
+ }
+
+ // Handle the special case where the object itself is a literal. Previously
+ // the yacc parser would always ensure top-level elements were arrays. The new
+ // parser does not make the same guarantees, thus we need to convert any
+ // top-level literal elements into a list.
+ if _, ok := node.(*ast.LiteralType); ok && item != nil {
+ node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+ }
+
+ list, ok := node.(*ast.ObjectList)
+ if !ok {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+ }
+ }
+
+ // This slice will keep track of all the structs we'll be decoding.
+ // There can be more than one struct if there are embedded structs
+ // that are squashed.
+ structs := make([]reflect.Value, 1, 5)
+ structs[0] = result
+
+ // Compile the list of all the fields that we're going to be decoding
+ // from all the structs.
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+ fields := []field{}
+ for len(structs) > 0 {
+ structVal := structs[0]
+ structs = structs[1:]
+
+ structType := structVal.Type()
+ for i := 0; i < structType.NumField(); i++ {
+ fieldType := structType.Field(i)
+ tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+ // Ignore fields with tag name "-"
+ if tagParts[0] == "-" {
+ continue
+ }
+
+ if fieldType.Anonymous {
+ fieldKind := fieldType.Type.Kind()
+ if fieldKind != reflect.Struct {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: unsupported type to struct: %s",
+ fieldType.Name, fieldKind),
+ }
+ }
+
+ // We have an embedded field. We "squash" the fields down
+ // if specified in the tag.
+ squash := false
+ for _, tag := range tagParts[1:] {
+ if tag == "squash" {
+ squash = true
+ break
+ }
+ }
+
+ if squash {
+ structs = append(
+ structs, result.FieldByName(fieldType.Name))
+ continue
+ }
+ }
+
+ // Normal struct field, store it away
+ fields = append(fields, field{fieldType, structVal.Field(i)})
+ }
+ }
+
+ usedKeys := make(map[string]struct{})
+ decodedFields := make([]string, 0, len(fields))
+ decodedFieldsVal := make([]reflect.Value, 0)
+ unusedKeysVal := make([]reflect.Value, 0)
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ if !fieldValue.IsValid() {
+ // This should never happen
+ panic("field is not valid")
+ }
+
+ // If we can't set the field, then it is unexported or something,
+ // and we just continue onwards.
+ if !fieldValue.CanSet() {
+ continue
+ }
+
+ fieldName := field.Name
+
+ tagValue := field.Tag.Get(tagName)
+ tagParts := strings.SplitN(tagValue, ",", 2)
+ if len(tagParts) >= 2 {
+ switch tagParts[1] {
+ case "decodedFields":
+ decodedFieldsVal = append(decodedFieldsVal, fieldValue)
+ continue
+ case "key":
+ if item == nil {
+ return &parser.PosError{
+ Pos: node.Pos(),
+ Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+ name, fieldName),
+ }
+ }
+
+ fieldValue.SetString(item.Keys[0].Token.Value().(string))
+ continue
+ case "unusedKeys":
+ unusedKeysVal = append(unusedKeysVal, fieldValue)
+ continue
+ }
+ }
+
+ if tagParts[0] != "" {
+ fieldName = tagParts[0]
+ }
+
+ // Determine the element we'll use to decode. If it is a single
+ // match (only object with the field), then we decode it exactly.
+ // If it is a prefix match, then we decode the matches.
+ filter := list.Filter(fieldName)
+
+ prefixMatches := filter.Children()
+ matches := filter.Elem()
+ if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+ continue
+ }
+
+ // Track the used key
+ usedKeys[fieldName] = struct{}{}
+
+ // Create the field name and decode. We range over the elements
+ // because we actually want the value.
+ fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+ if len(prefixMatches.Items) > 0 {
+ if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
+ return err
+ }
+ }
+ for _, match := range matches.Items {
+ var decodeNode ast.Node = match.Val
+ if ot, ok := decodeNode.(*ast.ObjectType); ok {
+ decodeNode = &ast.ObjectList{Items: ot.List.Items}
+ }
+
+ if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
+ return err
+ }
+ }
+
+ decodedFields = append(decodedFields, field.Name)
+ }
+
+ if len(decodedFieldsVal) > 0 {
+ // Sort it so that it is deterministic
+ sort.Strings(decodedFields)
+
+ for _, v := range decodedFieldsVal {
+ v.Set(reflect.ValueOf(decodedFields))
+ }
+ }
+
+ return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+ var nodeContainer struct {
+ Node ast.Node
+ }
+ value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+ return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644
index 0000000..575a20b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl.go
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644
index 0000000..6e5ef65
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+ node()
+ Pos() token.Pos
+}
+
+func (File) node() {}
+func (ObjectList) node() {}
+func (ObjectKey) node() {}
+func (ObjectItem) node() {}
+func (Comment) node() {}
+func (CommentGroup) node() {}
+func (ObjectType) node() {}
+func (LiteralType) node() {}
+func (ListType) node() {}
+
+// File represents a single HCL file
+type File struct {
+ Node Node // usually a *ObjectList
+ Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+ return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+ Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+ o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ // If there aren't enough keys, then ignore this
+ if len(item.Keys) < len(keys) {
+ continue
+ }
+
+ match := true
+ for i, key := range item.Keys[:len(keys)] {
+ key := key.Token.Value().(string)
+ if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+ match = false
+ break
+ }
+ }
+ if !match {
+ continue
+ }
+
+ // Strip off the prefix from the children
+ newItem := *item
+ newItem.Keys = newItem.Keys[len(keys):]
+ result.Add(&newItem)
+ }
+
+ return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) > 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+ var result ObjectList
+ for _, item := range o.Items {
+ if len(item.Keys) == 0 {
+ result.Add(item)
+ }
+ }
+
+ return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+ // always returns the uninitiliazed position
+ return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+ // keys is only one length long if it's of type assignment. If it's a
+ // nested object it can be larger than one. In that case "assign" is
+ // invalid as there is no assignments for a nested object.
+ Keys []*ObjectKey
+
+ // assign contains the position of "=", if any
+ Assign token.Pos
+
+ // val is the item itself. It can be an object,list, number, bool or a
+ // string. If key length is larger than one, val can be only of type
+ // Object.
+ Val Node
+
+ LeadComment *CommentGroup // associated lead comment
+ LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+ // I'm not entirely sure what causes this, but removing this causes
+ // a test failure. We should investigate at some point.
+ if len(o.Keys) == 0 {
+ return token.Pos{}
+ }
+
+ return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+ Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+ return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+ Token token.Token
+
+ // comment types, only used when in a list
+ LeadComment *CommentGroup
+ LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+ return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+ Lbrack token.Pos // position of "["
+ Rbrack token.Pos // position of "]"
+ List []Node // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+ return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+ l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+ Lbrace token.Pos // position of "{"
+ Rbrace token.Pos // position of "}"
+ List *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+ return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+ Start token.Pos // position of / or #
+ Text string
+}
+
+func (c *Comment) Pos() token.Pos {
+ return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+ List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+ return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644
index 0000000..ba07ad4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+ rewritten, ok := fn(node)
+ if !ok {
+ return rewritten
+ }
+
+ switch n := node.(type) {
+ case *File:
+ n.Node = Walk(n.Node, fn)
+ case *ObjectList:
+ for i, item := range n.Items {
+ n.Items[i] = Walk(item, fn).(*ObjectItem)
+ }
+ case *ObjectKey:
+ // nothing to do
+ case *ObjectItem:
+ for i, k := range n.Keys {
+ n.Keys[i] = Walk(k, fn).(*ObjectKey)
+ }
+
+ if n.Val != nil {
+ n.Val = Walk(n.Val, fn)
+ }
+ case *LiteralType:
+ // nothing to do
+ case *ListType:
+ for i, l := range n.List {
+ n.List[i] = Walk(l, fn)
+ }
+ case *ObjectType:
+ n.List = Walk(n.List, fn).(*ObjectList)
+ default:
+ // should we panic here?
+ fmt.Printf("unknown type: %T\n", n)
+ }
+
+ fn(nil)
+ return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644
index 0000000..5c99381
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
@@ -0,0 +1,17 @@
+package parser
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+ Pos token.Pos
+ Err error
+}
+
+func (e *PosError) Error() string {
+ return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644
index 0000000..64c83bc
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -0,0 +1,532 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/scanner"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ comments []*ast.CommentGroup
+ leadComment *ast.CommentGroup // last lead comment
+ lineComment *ast.CommentGroup // last line comment
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ // normalize all line endings
+ // since the scanner and output only work with "\n" line endings, we may
+ // end up with dangling "\r" characters in the parsed data.
+ src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+ }
+
+ f.Node, err = p.objectList(false)
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ f.Comments = p.comments
+ return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ if obj {
+ tok := p.scan()
+ p.unscan()
+ if tok.Type == token.RBRACE {
+ break
+ }
+ }
+
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // object lists can be optionally comma-delimited e.g. when a list of maps
+ // is being expressed, so a comma is allowed here - it's simply consumed
+ tok := p.scan()
+ if tok.Type != token.COMMA {
+ p.unscan()
+ }
+ }
+ return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+ endline = p.tok.Pos.Line
+
+ // count the endline if it's multiline comment, ie starting with /*
+ if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+ // don't use range here - no need to decode Unicode code points
+ for i := 0; i < len(p.tok.Text); i++ {
+ if p.tok.Text[i] == '\n' {
+ endline++
+ }
+ }
+ }
+
+ comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+ p.tok = p.sc.Scan()
+ return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+ var list []*ast.Comment
+ endline = p.tok.Pos.Line
+
+ for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+ var comment *ast.Comment
+ comment, endline = p.consumeComment()
+ list = append(list, comment)
+ }
+
+ // add comment group to the comments list
+ comments = &ast.CommentGroup{List: list}
+ p.comments = append(p.comments, comments)
+
+ return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if len(keys) > 0 && err == errEofToken {
+ // We ignore eof token here since it is an error if we didn't
+ // receive a value (but we did receive a key) for the item.
+ err = nil
+ }
+ if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+ // This is a strange boolean statement, but what it means is:
+ // We have keys with no value, and we're likely in an object
+ // (since RBrace ends an object). For this, we set err to nil so
+ // we continue and get the error below of having the wrong value
+ // type.
+ err = nil
+
+ // Reset the token type so we don't think it completed fine. See
+ // objectType which uses p.tok.Type to check if we're done with
+ // the object.
+ p.tok.Type = token.EOF
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ if p.leadComment != nil {
+ o.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ switch p.tok.Type {
+ case token.ASSIGN:
+ o.Assign = p.tok.Pos
+ o.Val, err = p.object()
+ if err != nil {
+ return nil, err
+ }
+ case token.LBRACE:
+ o.Val, err = p.objectType()
+ if err != nil {
+ return nil, err
+ }
+ default:
+ keyStr := make([]string, 0, len(keys))
+ for _, k := range keys {
+ keyStr = append(keyStr, k.Token.Text)
+ }
+
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " ")),
+ }
+ }
+
+ // key=#comment
+ // val
+ if p.lineComment != nil {
+ o.LineComment, p.lineComment = p.lineComment, nil
+ }
+
+ // do a look-ahead for line comment
+ p.scan()
+ if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+ o.LineComment = p.lineComment
+ p.lineComment = nil
+ }
+ p.unscan()
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ // It is very important to also return the keys here as well as
+ // the error. This is because we need to be able to tell if we
+ // did parse keys prior to finding the EOF, or if we just found
+ // a bare EOF.
+ return keys, errEofToken
+ case token.ASSIGN:
+ // assignment or object only, but not nested objects. this is not
+ // allowed: `foo bar = {}`
+ if keyCount > 1 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+ }
+ }
+
+ if keyCount == 0 {
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: errors.New("no object keys found!"),
+ }
+ }
+
+ return keys, nil
+ case token.LBRACE:
+ var err error
+
+ // If we have no keys, then it is a syntax error. i.e. {{}} is not
+ // allowed.
+ if len(keys) == 0 {
+ err = &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+ }
+ }
+
+ // object
+ return keys, err
+ case token.IDENT, token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{Token: p.tok})
+ case token.ILLEGAL:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("illegal character"),
+ }
+ default:
+ return keys, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+ }
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.COMMENT:
+ // implement comment
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("Unknown token: %+v", tok),
+ }
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{
+ Lbrace: p.tok.Pos,
+ }
+
+ l, err := p.objectList(true)
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ // No error, scan and expect the ending to be a brace
+ if tok := p.scan(); tok.Type != token.RBRACE {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+ }
+ }
+
+ o.List = l
+ o.Rbrace = p.tok.Pos // advanced via parseObjectList
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{
+ Lbrack: p.tok.Pos,
+ }
+
+ needComma := false
+ for {
+ tok := p.scan()
+ if needComma {
+ switch tok.Type {
+ case token.COMMA, token.RBRACK:
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error parsing list, expected comma or list end, got: %s",
+ tok.Type),
+ }
+ }
+ }
+ switch tok.Type {
+ case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ // If there is a lead comment, apply it
+ if p.leadComment != nil {
+ node.LeadComment = p.leadComment
+ p.leadComment = nil
+ }
+
+ l.Add(node)
+ needComma = true
+ case token.COMMA:
+ // get next list item or we are at the end
+ // do a look-ahead for line comment
+ p.scan()
+ if p.lineComment != nil && len(l.List) > 0 {
+ lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+ if ok {
+ lit.LineComment = p.lineComment
+ l.List[len(l.List)-1] = lit
+ p.lineComment = nil
+ }
+ }
+ p.unscan()
+
+ needComma = false
+ continue
+ case token.LBRACE:
+ // Looks like a nested object, so parse it out
+ node, err := p.objectType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse object within list: %s", err),
+ }
+ }
+ l.Add(node)
+ needComma = true
+ case token.LBRACK:
+ node, err := p.listType()
+ if err != nil {
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf(
+ "error while trying to parse list within list: %s", err),
+ }
+ }
+ l.Add(node)
+ case token.RBRACK:
+ // finished
+ l.Rbrack = p.tok.Pos
+ return l, nil
+ default:
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+ }
+ }
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok,
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ // Otherwise read the next token from the scanner and Save it to the buffer
+ // in case we unscan later.
+ prev := p.tok
+ p.tok = p.sc.Scan()
+
+ if p.tok.Type == token.COMMENT {
+ var comment *ast.CommentGroup
+ var endline int
+
+ // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+ // p.tok.Pos.Line, prev.Pos.Line, endline)
+ if p.tok.Pos.Line == prev.Pos.Line {
+ // The comment is on same line as the previous token; it
+ // cannot be a lead comment but may be a line comment.
+ comment, endline = p.consumeCommentGroup(0)
+ if p.tok.Pos.Line != endline {
+ // The next token is on a different line, thus
+ // the last comment group is a line comment.
+ p.lineComment = comment
+ }
+ }
+
+ // consume successor comments, if any
+ endline = -1
+ for p.tok.Type == token.COMMENT {
+ comment, endline = p.consumeCommentGroup(1)
+ }
+
+ if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+ switch p.tok.Type {
+ case token.RBRACE, token.RBRACK:
+ // Do not count for these cases
+ default:
+ // The next token is following on the line immediately after the
+ // comment group, thus the last comment group is a lead comment.
+ p.leadComment = comment
+ }
+ }
+
+ }
+
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
new file mode 100644
index 0000000..7c038d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
@@ -0,0 +1,789 @@
+package printer
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+const (
+ blank = byte(' ')
+ newline = byte('\n')
+ tab = byte('\t')
+ infinity = 1 << 30 // offset or line
+)
+
+var (
+ unindent = []byte("\uE123") // in the private use space
+)
+
+type printer struct {
+ cfg Config
+ prev token.Pos
+
+ comments []*ast.CommentGroup // may be nil, contains all comments
+ standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
+
+ enableTrace bool
+ indentTrace int
+}
+
+type ByPosition []*ast.CommentGroup
+
+func (b ByPosition) Len() int { return len(b) }
+func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
+
+// collectComments comments all standalone comments which are not lead or line
+// comment
+func (p *printer) collectComments(node ast.Node) {
+ // first collect all comments. This is already stored in
+ // ast.File.(comments)
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.File:
+ p.comments = t.Comments
+ return nn, false
+ }
+ return nn, true
+ })
+
+ standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
+ for _, c := range p.comments {
+ standaloneComments[c.Pos()] = c
+ }
+
+ // next remove all lead and line comments from the overall comment map.
+ // This will give us comments which are standalone, comments which are not
+ // assigned to any kind of node.
+ ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
+ switch t := nn.(type) {
+ case *ast.LiteralType:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ case *ast.ObjectItem:
+ if t.LeadComment != nil {
+ for _, comment := range t.LeadComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+
+ if t.LineComment != nil {
+ for _, comment := range t.LineComment.List {
+ if _, ok := standaloneComments[comment.Pos()]; ok {
+ delete(standaloneComments, comment.Pos())
+ }
+ }
+ }
+ }
+
+ return nn, true
+ })
+
+ for _, c := range standaloneComments {
+ p.standaloneComments = append(p.standaloneComments, c)
+ }
+
+ sort.Sort(ByPosition(p.standaloneComments))
+}
+
+// output prints creates b printable HCL output and returns it.
+func (p *printer) output(n interface{}) []byte {
+ var buf bytes.Buffer
+
+ switch t := n.(type) {
+ case *ast.File:
+ // File doesn't trace so we add the tracing here
+ defer un(trace(p, "File"))
+ return p.output(t.Node)
+ case *ast.ObjectList:
+ defer un(trace(p, "ObjectList"))
+
+ var index int
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is at "infinity"
+ var nextItem token.Pos
+ if index != len(t.Items) {
+ nextItem = t.Items[index].Pos()
+ } else {
+ nextItem = token.Pos{Offset: infinity, Line: infinity}
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ // Go through all the comments in the group. The group
+ // should be printed together, not separated by double newlines.
+ printed := false
+ newlinePrinted := false
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // if we hit the end add newlines so we can print the comment
+ // we don't do this if prev is invalid which means the
+ // beginning of the file since the first comment should
+ // be at the first line.
+ if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
+ buf.Write([]byte{newline, newline})
+ newlinePrinted = true
+ }
+
+ // Write the actual comment.
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+
+ // Set printed to true to note that we printed something
+ printed = true
+ }
+ }
+
+ // If we're not at the last item, write a new line so
+ // that there is a newline separating this comment from
+ // the next object.
+ if printed && index != len(t.Items) {
+ buf.WriteByte(newline)
+ }
+ }
+
+ if index == len(t.Items) {
+ break
+ }
+
+ buf.Write(p.output(t.Items[index]))
+ if index != len(t.Items)-1 {
+ // Always write a newline to separate us from the next item
+ buf.WriteByte(newline)
+
+ // Need to determine if we're going to separate the next item
+ // with a blank line. The logic here is simple, though there
+ // are a few conditions:
+ //
+ // 1. The next object is more than one line away anyways,
+ // so we need an empty line.
+ //
+ // 2. The next object is not a "single line" object, so
+ // we need an empty line.
+ //
+ // 3. This current object is not a single line object,
+ // so we need an empty line.
+ current := t.Items[index]
+ next := t.Items[index+1]
+ if next.Pos().Line != t.Items[index].Pos().Line+1 ||
+ !p.isSingleLineObject(next) ||
+ !p.isSingleLineObject(current) {
+ buf.WriteByte(newline)
+ }
+ }
+ index++
+ }
+ case *ast.ObjectKey:
+ buf.WriteString(t.Token.Text)
+ case *ast.ObjectItem:
+ p.prev = t.Pos()
+ buf.Write(p.objectItem(t))
+ case *ast.LiteralType:
+ buf.Write(p.literalType(t))
+ case *ast.ListType:
+ buf.Write(p.list(t))
+ case *ast.ObjectType:
+ buf.Write(p.objectType(t))
+ default:
+ fmt.Printf(" unknown type: %T\n", n)
+ }
+
+ return buf.Bytes()
+}
+
+func (p *printer) literalType(lit *ast.LiteralType) []byte {
+ result := []byte(lit.Token.Text)
+ switch lit.Token.Type {
+ case token.HEREDOC:
+ // Clear the trailing newline from heredocs
+ if result[len(result)-1] == '\n' {
+ result = result[:len(result)-1]
+ }
+
+ // Poison lines 2+ so that we don't indent them
+ result = p.heredocIndent(result)
+ case token.STRING:
+ // If this is a multiline string, poison lines 2+ so we don't
+ // indent them.
+ if bytes.IndexRune(result, '\n') >= 0 {
+ result = p.heredocIndent(result)
+ }
+ }
+
+ return result
+}
+
+// objectItem returns the printable HCL form of an object item. An object type
+// starts with one/multiple keys and has a value. The value might be of any
+// type.
+func (p *printer) objectItem(o *ast.ObjectItem) []byte {
+ defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
+ var buf bytes.Buffer
+
+ if o.LeadComment != nil {
+ for _, comment := range o.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ // If key and val are on different lines, treat line comments like lead comments.
+ if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range o.Keys {
+ buf.WriteString(k.Token.Text)
+ buf.WriteByte(blank)
+
+ // reach end of key
+ if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ buf.Write(p.output(o.Val))
+
+ if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
+ buf.WriteByte(blank)
+ for _, comment := range o.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// objectType returns the printable HCL form of an object type. An object type
+// begins with a brace and ends with a brace.
+func (p *printer) objectType(o *ast.ObjectType) []byte {
+ defer un(trace(p, "ObjectType"))
+ var buf bytes.Buffer
+ buf.WriteString("{")
+
+ var index int
+ var nextItem token.Pos
+ var commented, newlinePrinted bool
+ for {
+ // Determine the location of the next actual non-comment
+ // item. If we're at the end, the next item is the closing brace
+ if index != len(o.List.Items) {
+ nextItem = o.List.Items[index].Pos()
+ } else {
+ nextItem = o.Rbrace
+ }
+
+ // Go through the standalone comments in the file and print out
+ // the comments that we should be for this object item.
+ for _, c := range p.standaloneComments {
+ printed := false
+ var lastCommentPos token.Pos
+ for _, comment := range c.List {
+ // We only care about comments after the previous item
+ // we've printed so that comments are printed in the
+ // correct locations (between two objects for example).
+ // And before the next item.
+ if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
+ // If there are standalone comments and the initial newline has not
+ // been printed yet, do it now.
+ if !newlinePrinted {
+ newlinePrinted = true
+ buf.WriteByte(newline)
+ }
+
+ // add newline if it's between other printed nodes
+ if index > 0 {
+ commented = true
+ buf.WriteByte(newline)
+ }
+
+ // Store this position
+ lastCommentPos = comment.Pos()
+
+ // output the comment itself
+ buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
+
+ // Set printed to true to note that we printed something
+ printed = true
+
+ /*
+ if index != len(o.List.Items) {
+ buf.WriteByte(newline) // do not print on the end
+ }
+ */
+ }
+ }
+
+ // Stuff to do if we had comments
+ if printed {
+ // Always write a newline
+ buf.WriteByte(newline)
+
+ // If there is another item in the object and our comment
+ // didn't hug it directly, then make sure there is a blank
+ // line separating them.
+ if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
+ buf.WriteByte(newline)
+ }
+ }
+ }
+
+ if index == len(o.List.Items) {
+ p.prev = o.Rbrace
+ break
+ }
+
+ // At this point we are sure that it's not a totally empty block: print
+ // the initial newline if it hasn't been printed yet by the previous
+ // block about standalone comments.
+ if !newlinePrinted {
+ buf.WriteByte(newline)
+ newlinePrinted = true
+ }
+
+ // check if we have adjacent one liner items. If yes we'll going to align
+ // the comments.
+ var aligned []*ast.ObjectItem
+ for _, item := range o.List.Items[index:] {
+ // we don't group one line lists
+ if len(o.List.Items) == 1 {
+ break
+ }
+
+ // one means a oneliner with out any lead comment
+ // two means a oneliner with lead comment
+ // anything else might be something else
+ cur := lines(string(p.objectItem(item)))
+ if cur > 2 {
+ break
+ }
+
+ curPos := item.Pos()
+
+ nextPos := token.Pos{}
+ if index != len(o.List.Items)-1 {
+ nextPos = o.List.Items[index+1].Pos()
+ }
+
+ prevPos := token.Pos{}
+ if index != 0 {
+ prevPos = o.List.Items[index-1].Pos()
+ }
+
+ // fmt.Println("DEBUG ----------------")
+ // fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
+ // fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
+ // fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
+
+ if curPos.Line+1 == nextPos.Line {
+ aligned = append(aligned, item)
+ index++
+ continue
+ }
+
+ if curPos.Line-1 == prevPos.Line {
+ aligned = append(aligned, item)
+ index++
+
+ // finish if we have a new line or comment next. This happens
+ // if the next item is not adjacent
+ if curPos.Line+1 != nextPos.Line {
+ break
+ }
+ continue
+ }
+
+ break
+ }
+
+ // put newlines if the items are between other non aligned items.
+ // newlines are also added if there is a standalone comment already, so
+ // check it too
+ if !commented && index != len(aligned) {
+ buf.WriteByte(newline)
+ }
+
+ if len(aligned) >= 1 {
+ p.prev = aligned[len(aligned)-1].Pos()
+
+ items := p.alignedItems(aligned)
+ buf.Write(p.indent(items))
+ } else {
+ p.prev = o.List.Items[index].Pos()
+
+ buf.Write(p.indent(p.objectItem(o.List.Items[index])))
+ index++
+ }
+
+ buf.WriteByte(newline)
+ }
+
+ buf.WriteString("}")
+ return buf.Bytes()
+}
+
+func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
+ var buf bytes.Buffer
+
+ // find the longest key and value length, needed for alignment
+ var longestKeyLen int // longest key length
+ var longestValLen int // longest value length
+ for _, item := range items {
+ key := len(item.Keys[0].Token.Text)
+ val := len(p.output(item.Val))
+
+ if key > longestKeyLen {
+ longestKeyLen = key
+ }
+
+ if val > longestValLen {
+ longestValLen = val
+ }
+ }
+
+ for i, item := range items {
+ if item.LeadComment != nil {
+ for _, comment := range item.LeadComment.List {
+ buf.WriteString(comment.Text)
+ buf.WriteByte(newline)
+ }
+ }
+
+ for i, k := range item.Keys {
+ keyLen := len(k.Token.Text)
+ buf.WriteString(k.Token.Text)
+ for i := 0; i < longestKeyLen-keyLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ // reach end of key
+ if i == len(item.Keys)-1 && len(item.Keys) == 1 {
+ buf.WriteString("=")
+ buf.WriteByte(blank)
+ }
+ }
+
+ val := p.output(item.Val)
+ valLen := len(val)
+ buf.Write(val)
+
+ if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
+ for i := 0; i < longestValLen-valLen+1; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range item.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ // do not print for the last item
+ if i != len(items)-1 {
+ buf.WriteByte(newline)
+ }
+ }
+
+ return buf.Bytes()
+}
+
+// list returns the printable HCL form of an list type.
+func (p *printer) list(l *ast.ListType) []byte {
+ if p.isSingleLineList(l) {
+ return p.singleLineList(l)
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString("[")
+ buf.WriteByte(newline)
+
+ var longestLine int
+ for _, item := range l.List {
+ // for now we assume that the list only contains literal types
+ if lit, ok := item.(*ast.LiteralType); ok {
+ lineLen := len(lit.Token.Text)
+ if lineLen > longestLine {
+ longestLine = lineLen
+ }
+ }
+ }
+
+ haveEmptyLine := false
+ for i, item := range l.List {
+ // If we have a lead comment, then we want to write that first
+ leadComment := false
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
+ leadComment = true
+
+ // Ensure an empty line before every element with a
+ // lead comment (except the first item in a list).
+ if !haveEmptyLine && i != 0 {
+ buf.WriteByte(newline)
+ }
+
+ for _, comment := range lit.LeadComment.List {
+ buf.Write(p.indent([]byte(comment.Text)))
+ buf.WriteByte(newline)
+ }
+ }
+
+ // also indent each line
+ val := p.output(item)
+ curLen := len(val)
+ buf.Write(p.indent(val))
+
+ // if this item is a heredoc, then we output the comma on
+ // the next line. This is the only case this happens.
+ comma := []byte{','}
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ comma = p.indent(comma)
+ }
+
+ buf.Write(comma)
+
+ if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
+ // if the next item doesn't have any comments, do not align
+ buf.WriteByte(blank) // align one space
+ for i := 0; i < longestLine-curLen; i++ {
+ buf.WriteByte(blank)
+ }
+
+ for _, comment := range lit.LineComment.List {
+ buf.WriteString(comment.Text)
+ }
+ }
+
+ buf.WriteByte(newline)
+
+ // Ensure an empty line after every element with a
+ // lead comment (except the first item in a list).
+ haveEmptyLine = leadComment && i != len(l.List)-1
+ if haveEmptyLine {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// isSingleLineList returns true if:
+// * they were previously formatted entirely on one line
+// * they consist entirely of literals
+// * there are either no heredoc strings or the list has exactly one element
+// * there are no line comments
+func (printer) isSingleLineList(l *ast.ListType) bool {
+ for _, item := range l.List {
+ if item.Pos().Line != l.Lbrack.Line {
+ return false
+ }
+
+ lit, ok := item.(*ast.LiteralType)
+ if !ok {
+ return false
+ }
+
+ if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
+ return false
+ }
+
+ if lit.LineComment != nil {
+ return false
+ }
+ }
+
+ return true
+}
+
+// singleLineList prints a simple single line list.
+// For a definition of "simple", see isSingleLineList above.
+func (p *printer) singleLineList(l *ast.ListType) []byte {
+ buf := &bytes.Buffer{}
+
+ buf.WriteString("[")
+ for i, item := range l.List {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+
+ // Output the item itself
+ buf.Write(p.output(item))
+
+ // The heredoc marker needs to be at the end of line.
+ if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
+ buf.WriteByte(newline)
+ }
+ }
+
+ buf.WriteString("]")
+ return buf.Bytes()
+}
+
+// indent indents the lines of the given buffer for each non-empty line
+func (p *printer) indent(buf []byte) []byte {
+ var prefix []byte
+ if p.cfg.SpacesWidth != 0 {
+ for i := 0; i < p.cfg.SpacesWidth; i++ {
+ prefix = append(prefix, blank)
+ }
+ } else {
+ prefix = []byte{tab}
+ }
+
+ var res []byte
+ bol := true
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, prefix...)
+ }
+
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// unindent removes all the indentation from the tombstoned lines
+func (p *printer) unindent(buf []byte) []byte {
+ var res []byte
+ for i := 0; i < len(buf); i++ {
+ skip := len(buf)-i <= len(unindent)
+ if !skip {
+ skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
+ }
+ if skip {
+ res = append(res, buf[i])
+ continue
+ }
+
+ // We have a marker. we have to backtrace here and clean out
+ // any whitespace ahead of our tombstone up to a \n
+ for j := len(res) - 1; j >= 0; j-- {
+ if res[j] == '\n' {
+ break
+ }
+
+ res = res[:j]
+ }
+
+ // Skip the entire unindent marker
+ i += len(unindent) - 1
+ }
+
+ return res
+}
+
+// heredocIndent marks all the 2nd and further lines as unindentable
+func (p *printer) heredocIndent(buf []byte) []byte {
+ var res []byte
+ bol := false
+ for _, c := range buf {
+ if bol && c != '\n' {
+ res = append(res, unindent...)
+ }
+ res = append(res, c)
+ bol = c == '\n'
+ }
+ return res
+}
+
+// isSingleLineObject tells whether the given object item is a single
+// line object such as "obj {}".
+//
+// A single line object:
+//
+// * has no lead comments (hence multi-line)
+// * has no assignment
+// * has no values in the stanza (within {})
+//
+func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
+ // If there is a lead comment, can't be one line
+ if val.LeadComment != nil {
+ return false
+ }
+
+ // If there is assignment, we always break by line
+ if val.Assign.IsValid() {
+ return false
+ }
+
+ // If it isn't an object type, then its not a single line object
+ ot, ok := val.Val.(*ast.ObjectType)
+ if !ok {
+ return false
+ }
+
+ // If the object has no items, it is single line!
+ return len(ot.List.Items) == 0
+}
+
+func lines(txt string) int {
+ endline := 1
+ for i := 0; i < len(txt); i++ {
+ if txt[i] == '\n' {
+ endline++
+ }
+ }
+ return endline
+}
+
+// ----------------------------------------------------------------------------
+// Tracing support
+
+func (p *printer) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ i := 2 * p.indentTrace
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *printer, msg string) *printer {
+ p.printTrace(msg, "(")
+ p.indentTrace++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *printer) {
+ p.indentTrace--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
new file mode 100644
index 0000000..6617ab8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
@@ -0,0 +1,66 @@
+// Package printer implements printing of AST nodes to HCL format.
+package printer
+
+import (
+ "bytes"
+ "io"
+ "text/tabwriter"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ "github.com/hashicorp/hcl/hcl/parser"
+)
+
+var DefaultConfig = Config{
+ SpacesWidth: 2,
+}
+
+// A Config node controls the output of Fprint.
+type Config struct {
+ SpacesWidth int // if set, it will use spaces instead of tabs for alignment
+}
+
+func (c *Config) Fprint(output io.Writer, node ast.Node) error {
+ p := &printer{
+ cfg: *c,
+ comments: make([]*ast.CommentGroup, 0),
+ standaloneComments: make([]*ast.CommentGroup, 0),
+ // enableTrace: true,
+ }
+
+ p.collectComments(node)
+
+ if _, err := output.Write(p.unindent(p.output(node))); err != nil {
+ return err
+ }
+
+ // flush tabwriter, if any
+ var err error
+ if tw, _ := output.(*tabwriter.Writer); tw != nil {
+ err = tw.Flush()
+ }
+
+ return err
+}
+
+// Fprint "pretty-prints" an HCL node to output
+// It calls Config.Fprint with default settings.
+func Fprint(output io.Writer, node ast.Node) error {
+ return DefaultConfig.Fprint(output, node)
+}
+
+// Format formats src HCL and returns the result.
+func Format(src []byte) ([]byte, error) {
+ node, err := parser.Parse(src)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+ if err := DefaultConfig.Fprint(&buf, node); err != nil {
+ return nil, err
+ }
+
+ // Add trailing newline to result
+ buf.WriteString("\n")
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644
index 0000000..624a18f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -0,0 +1,652 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "regexp"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == utf8.RuneError && size == 1 {
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ if ch == '\x00' {
+ s.err("unexpected null character (0x00)")
+ return eof
+ }
+
+ if ch == '\uE123' {
+ s.err("unicode code point U+E123 reserved for internal use")
+ return utf8.RuneError
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ tok = token.IDENT
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '#', '/':
+ tok = token.COMMENT
+ s.scanComment(ch)
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '<':
+ tok = token.HEREDOC
+ s.scanHeredoc()
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case '=':
+ tok = token.ASSIGN
+ case '+':
+ tok = token.ADD
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ tok = token.SUB
+ }
+ default:
+ s.err("illegal char")
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+func (s *Scanner) scanComment(ch rune) {
+ // single line comments
+ if ch == '#' || (ch == '/' && s.peek() != '*') {
+ if ch == '/' && s.peek() != '/' {
+ s.err("expected '/' for comment")
+ return
+ }
+
+ ch = s.next()
+ for ch != '\n' && ch >= 0 && ch != eof {
+ ch = s.next()
+ }
+ if ch != eof && ch >= 0 {
+ s.unread()
+ }
+ return
+ }
+
+ // be sure we get the character after /* This allows us to find comment's
+ // that are not erminated
+ if ch == '/' {
+ s.next()
+ ch = s.next() // read character after "/*"
+ }
+
+ // look for /* - style comments
+ for {
+ if ch < 0 || ch == eof {
+ s.err("comment not terminated")
+ break
+ }
+
+ ch0 := ch
+ ch = s.next()
+ if ch0 == '*' && ch == '/' {
+ break
+ }
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ if ch == '0' {
+ // check for hexadecimal, octal or float
+ ch = s.next()
+ if ch == 'x' || ch == 'X' {
+ // hexadecimal
+ ch = s.next()
+ found := false
+ for isHexadecimal(ch) {
+ ch = s.next()
+ found = true
+ }
+
+ if !found {
+ s.err("illegal hexadecimal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ return token.NUMBER
+ }
+
+ // now it's either something like: 0421(octal) or 0.1231(float)
+ illegalOctal := false
+ for isDecimal(ch) {
+ ch = s.next()
+ if ch == '8' || ch == '9' {
+ // this is just a possibility. For example 0159 is illegal, but
+ // 0159.23 is valid. So we mark a possible illegal octal. If
+ // the next character is not a period, we'll print the error.
+ illegalOctal = true
+ }
+ }
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if illegalOctal {
+ s.err("illegal octal number")
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+ }
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+ // Scan the second '<' in example: '<<EOF'
+ if s.next() != '<' {
+ s.err("heredoc expected second '<', didn't see it")
+ return
+ }
+
+ // Get the original offset so we can read just the heredoc ident
+ offs := s.srcPos.Offset
+
+ // Scan the identifier
+ ch := s.next()
+
+ // Indented heredoc syntax
+ if ch == '-' {
+ ch = s.next()
+ }
+
+ for isLetter(ch) || isDigit(ch) {
+ ch = s.next()
+ }
+
+ // If we reached an EOF then that is not good
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+
+ // Ignore the '\r' in Windows line endings
+ if ch == '\r' {
+ if s.peek() == '\n' {
+ ch = s.next()
+ }
+ }
+
+ // If we didn't reach a newline then that is also not good
+ if ch != '\n' {
+ s.err("invalid characters in heredoc anchor")
+ return
+ }
+
+ // Read the identifier
+ identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
+ if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
+ s.err("zero-length heredoc anchor")
+ return
+ }
+
+ var identRegexp *regexp.Regexp
+ if identBytes[0] == '-' {
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
+ } else {
+ identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
+ }
+
+ // Read the actual string value
+ lineStart := s.srcPos.Offset
+ for {
+ ch := s.next()
+
+ // Special newline handling.
+ if ch == '\n' {
+ // Math is fast, so we first compare the byte counts to see if we have a chance
+ // of seeing the same identifier - if the length is less than the number of bytes
+ // in the identifier, this cannot be a valid terminator.
+ lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
+ if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+ break
+ }
+
+ // Not an anchor match, record the start of a new line
+ lineStart = s.srcPos.Offset
+ }
+
+ if ch == eof {
+ s.err("heredoc not terminated")
+ return
+ }
+ }
+
+ return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' && braces == 0 {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ start := n
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ if ch == eof {
+ // If we see an EOF, we halt any more scanning of digits
+ // immediately.
+ break
+ }
+
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ if n != start && ch != eof {
+ // we scanned all digits, put the last non digit char back,
+ // only if we read anything at all
+ s.unread()
+ }
+
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644
index 0000000..5f981ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+ "errors"
+ "unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes. (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+ n := len(s)
+ if n < 2 {
+ return "", ErrSyntax
+ }
+ quote := s[0]
+ if quote != s[n-1] {
+ return "", ErrSyntax
+ }
+ s = s[1 : n-1]
+
+ if quote != '"' {
+ return "", ErrSyntax
+ }
+ if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+ return "", ErrSyntax
+ }
+
+ // Is it trivial? Avoid allocation.
+ if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+ switch quote {
+ case '"':
+ return s, nil
+ case '\'':
+ r, size := utf8.DecodeRuneInString(s)
+ if size == len(s) && (r != utf8.RuneError || size != 1) {
+ return s, nil
+ }
+ }
+ }
+
+ var runeTmp [utf8.UTFMax]byte
+ buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+ for len(s) > 0 {
+ // If we're starting a '${}' then let it through un-unquoted.
+ // Specifically: we don't unquote any characters within the `${}`
+ // section.
+ if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+ buf = append(buf, '$', '{')
+ s = s[2:]
+
+ // Continue reading until we find the closing brace, copying as-is
+ braces := 1
+ for len(s) > 0 && braces > 0 {
+ r, size := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError {
+ return "", ErrSyntax
+ }
+
+ s = s[size:]
+
+ n := utf8.EncodeRune(runeTmp[:], r)
+ buf = append(buf, runeTmp[:n]...)
+
+ switch r {
+ case '{':
+ braces++
+ case '}':
+ braces--
+ }
+ }
+ if braces != 0 {
+ return "", ErrSyntax
+ }
+ if len(s) == 0 {
+ // If there's no string left, we're done!
+ break
+ } else {
+ // If there's more left, we need to pop back up to the top of the loop
+ // in case there's another interpolation in this string.
+ continue
+ }
+ }
+
+ if s[0] == '\n' {
+ return "", ErrSyntax
+ }
+
+ c, multibyte, ss, err := unquoteChar(s, quote)
+ if err != nil {
+ return "", err
+ }
+ s = ss
+ if c < utf8.RuneSelf || !multibyte {
+ buf = append(buf, byte(c))
+ } else {
+ n := utf8.EncodeRune(runeTmp[:], c)
+ buf = append(buf, runeTmp[:n]...)
+ }
+ if quote == '\'' && len(s) != 0 {
+ // single-quoted must be single character
+ return "", ErrSyntax
+ }
+ }
+ return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] == c {
+ return true
+ }
+ }
+ return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+ c := rune(b)
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+ return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+ // easy cases
+ switch c := s[0]; {
+ case c == quote && (quote == '\'' || quote == '"'):
+ err = ErrSyntax
+ return
+ case c >= utf8.RuneSelf:
+ r, size := utf8.DecodeRuneInString(s)
+ return r, true, s[size:], nil
+ case c != '\\':
+ return rune(s[0]), false, s[1:], nil
+ }
+
+ // hard case: c is backslash
+ if len(s) <= 1 {
+ err = ErrSyntax
+ return
+ }
+ c := s[1]
+ s = s[2:]
+
+ switch c {
+ case 'a':
+ value = '\a'
+ case 'b':
+ value = '\b'
+ case 'f':
+ value = '\f'
+ case 'n':
+ value = '\n'
+ case 'r':
+ value = '\r'
+ case 't':
+ value = '\t'
+ case 'v':
+ value = '\v'
+ case 'x', 'u', 'U':
+ n := 0
+ switch c {
+ case 'x':
+ n = 2
+ case 'u':
+ n = 4
+ case 'U':
+ n = 8
+ }
+ var v rune
+ if len(s) < n {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < n; j++ {
+ x, ok := unhex(s[j])
+ if !ok {
+ err = ErrSyntax
+ return
+ }
+ v = v<<4 | x
+ }
+ s = s[n:]
+ if c == 'x' {
+ // single-byte string, possibly not UTF-8
+ value = v
+ break
+ }
+ if v > utf8.MaxRune {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ multibyte = true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ v := rune(c) - '0'
+ if len(s) < 2 {
+ err = ErrSyntax
+ return
+ }
+ for j := 0; j < 2; j++ { // one digit already; two more
+ x := rune(s[j]) - '0'
+ if x < 0 || x > 7 {
+ err = ErrSyntax
+ return
+ }
+ v = (v << 3) | x
+ }
+ s = s[2:]
+ if v > 255 {
+ err = ErrSyntax
+ return
+ }
+ value = v
+ case '\\':
+ value = '\\'
+ case '\'', '"':
+ if c != quote {
+ err = ErrSyntax
+ return
+ }
+ value = rune(c)
+ default:
+ err = ErrSyntax
+ return
+ }
+ tail = s
+ return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644
index 0000000..59c1bb7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644
index 0000000..e37c066
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+ JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+ COMMENT
+
+ identifier_beg
+ IDENT // literals
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ HEREDOC // <<FOO\nbar\nFOO
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+
+ RBRACK // ]
+ RBRACE // }
+
+ ASSIGN // =
+ ADD // +
+ SUB // -
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+ COMMENT: "COMMENT",
+
+ IDENT: "IDENT",
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ HEREDOC: "HEREDOC",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+
+ ASSIGN: "ASSIGN",
+ ADD: "ADD",
+ SUB: "SUB",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// Value returns the properly typed value for this token. The type of
+// the returned interface{} is guaranteed based on the Type field.
+//
+// This can only be called for literal types. If it is called for any other
+// type, this will panic.
+func (t Token) Value() interface{} {
+ switch t.Type {
+ case BOOL:
+ if t.Text == "true" {
+ return true
+ } else if t.Text == "false" {
+ return false
+ }
+
+ panic("unknown bool value: " + t.Text)
+ case FLOAT:
+ v, err := strconv.ParseFloat(t.Text, 64)
+ if err != nil {
+ panic(err)
+ }
+
+ return float64(v)
+ case NUMBER:
+ v, err := strconv.ParseInt(t.Text, 0, 64)
+ if err != nil {
+ panic(err)
+ }
+
+ return int64(v)
+ case IDENT:
+ return t.Text
+ case HEREDOC:
+ return unindentHeredoc(t.Text)
+ case STRING:
+ // Determine the Unquote method to use. If it came from JSON,
+ // then we need to use the built-in unquote since we have to
+ // escape interpolations there.
+ f := hclstrconv.Unquote
+ if t.JSON {
+ f = strconv.Unquote
+ }
+
+ // This case occurs if json null is used
+ if t.Text == "" {
+ return ""
+ }
+
+ v, err := f(t.Text)
+ if err != nil {
+ panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
+ }
+
+ return v
+ default:
+ panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
+ }
+}
+
+// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
+// and the content of a HEREDOC with the hanging indent removed if it is started with
+// a <<-, and the terminating line is at least as indented as the least indented line.
+func unindentHeredoc(heredoc string) string {
+ // We need to find the end of the marker
+ idx := strings.IndexByte(heredoc, '\n')
+ if idx == -1 {
+ panic("heredoc doesn't contain newline")
+ }
+
+ unindent := heredoc[2] == '-'
+
+ // We can optimize if the heredoc isn't marked for indentation
+ if !unindent {
+ return string(heredoc[idx+1 : len(heredoc)-idx+1])
+ }
+
+ // We need to unindent each line based on the indentation level of the marker
+ lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
+ whitespacePrefix := lines[len(lines)-1]
+
+ isIndented := true
+ for _, v := range lines {
+ if strings.HasPrefix(v, whitespacePrefix) {
+ continue
+ }
+
+ isIndented = false
+ break
+ }
+
+ // If all lines are not at least as indented as the terminating mark, return the
+ // heredoc as is, but trim the leading space from the marker on the final line.
+ if !isIndented {
+ return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
+ }
+
+ unindentedLines := make([]string, len(lines))
+ for k, v := range lines {
+ if k == len(lines)-1 {
+ unindentedLines[k] = ""
+ break
+ }
+
+ unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
+ }
+
+ return strings.Join(unindentedLines, "\n")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
new file mode 100644
index 0000000..f652d6f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
@@ -0,0 +1,117 @@
+package parser
+
+import "github.com/hashicorp/hcl/hcl/ast"
+
+// flattenObjects takes an AST node, walks it, and flattens
+func flattenObjects(node ast.Node) {
+ ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
+ // We only care about lists, because this is what we modify
+ list, ok := n.(*ast.ObjectList)
+ if !ok {
+ return n, true
+ }
+
+ // Rebuild the item list
+ items := make([]*ast.ObjectItem, 0, len(list.Items))
+ frontier := make([]*ast.ObjectItem, len(list.Items))
+ copy(frontier, list.Items)
+ for len(frontier) > 0 {
+ // Pop the current item
+ n := len(frontier)
+ item := frontier[n-1]
+ frontier = frontier[:n-1]
+
+ switch v := item.Val.(type) {
+ case *ast.ObjectType:
+ items, frontier = flattenObjectType(v, item, items, frontier)
+ case *ast.ListType:
+ items, frontier = flattenListType(v, item, items, frontier)
+ default:
+ items = append(items, item)
+ }
+ }
+
+ // Reverse the list since the frontier model runs things backwards
+ for i := len(items)/2 - 1; i >= 0; i-- {
+ opp := len(items) - 1 - i
+ items[i], items[opp] = items[opp], items[i]
+ }
+
+ // Done! Set the original items
+ list.Items = items
+ return n, true
+ })
+}
+
+func flattenListType(
+ ot *ast.ListType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list is empty, keep the original list
+ if len(ot.List) == 0 {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List {
+ if _, ok := subitem.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, elem := range ot.List {
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: item.Keys,
+ Assign: item.Assign,
+ Val: elem,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
+
+func flattenObjectType(
+ ot *ast.ObjectType,
+ item *ast.ObjectItem,
+ items []*ast.ObjectItem,
+ frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+ // If the list has no items we do not have to flatten anything
+ if ot.List.Items == nil {
+ items = append(items, item)
+ return items, frontier
+ }
+
+ // All the elements of this object must also be objects!
+ for _, subitem := range ot.List.Items {
+ if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+ items = append(items, item)
+ return items, frontier
+ }
+ }
+
+ // Great! We have a match go through all the items and flatten
+ for _, subitem := range ot.List.Items {
+ // Copy the new key
+ keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+ copy(keys, item.Keys)
+ copy(keys[len(item.Keys):], subitem.Keys)
+
+ // Add it to the frontier so that we can recurse
+ frontier = append(frontier, &ast.ObjectItem{
+ Keys: keys,
+ Assign: item.Assign,
+ Val: subitem.Val,
+ LeadComment: item.LeadComment,
+ LineComment: item.LineComment,
+ })
+ }
+
+ return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644
index 0000000..125a5f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -0,0 +1,313 @@
+package parser
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+ "github.com/hashicorp/hcl/json/scanner"
+ "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+ sc *scanner.Scanner
+
+ // Last read token
+ tok token.Token
+ commaPrev token.Token
+
+ enableTrace bool
+ indent int
+ n int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+ return &Parser{
+ sc: scanner.New(src),
+ }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+ p := newParser(src)
+ return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+ f := &ast.File{}
+ var err, scerr error
+ p.sc.Error = func(pos token.Pos, msg string) {
+ scerr = fmt.Errorf("%s: %s", pos, msg)
+ }
+
+ // The root must be an object in JSON
+ object, err := p.object()
+ if scerr != nil {
+ return nil, scerr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // We make our final node an object list so it is more HCL compatible
+ f.Node = object.List
+
+ // Flatten it, which finds patterns and turns them into more HCL-like
+ // AST trees.
+ flattenObjects(f.Node)
+
+ return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+ defer un(trace(p, "ParseObjectList"))
+ node := &ast.ObjectList{}
+
+ for {
+ n, err := p.objectItem()
+ if err == errEofToken {
+ break // we are finished
+ }
+
+ // we don't return a nil node, because might want to use already
+ // collected items.
+ if err != nil {
+ return node, err
+ }
+
+ node.Add(n)
+
+ // Check for a followup comma. If it isn't a comma, then we're done
+ if tok := p.scan(); tok.Type != token.COMMA {
+ break
+ }
+ }
+
+ return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+ defer un(trace(p, "ParseObjectItem"))
+
+ keys, err := p.objectKey()
+ if err != nil {
+ return nil, err
+ }
+
+ o := &ast.ObjectItem{
+ Keys: keys,
+ }
+
+ switch p.tok.Type {
+ case token.COLON:
+ pos := p.tok.Pos
+ o.Assign = hcltoken.Pos{
+ Filename: pos.Filename,
+ Offset: pos.Offset,
+ Line: pos.Line,
+ Column: pos.Column,
+ }
+
+ o.Val, err = p.objectValue()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+ keyCount := 0
+ keys := make([]*ast.ObjectKey, 0)
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.EOF:
+ return nil, errEofToken
+ case token.STRING:
+ keyCount++
+ keys = append(keys, &ast.ObjectKey{
+ Token: p.tok.HCLToken(),
+ })
+ case token.COLON:
+ // If we have a zero keycount it means that we never got
+ // an object key, i.e. `{ :`. This is a syntax error.
+ if keyCount == 0 {
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+
+ // Done
+ return keys, nil
+ case token.ILLEGAL:
+ return nil, errors.New("illegal")
+ default:
+ return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+ }
+ }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+ defer un(trace(p, "ParseObjectValue"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+ return p.literalType()
+ case token.LBRACE:
+ return p.objectType()
+ case token.LBRACK:
+ return p.listType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseType"))
+ tok := p.scan()
+
+ switch tok.Type {
+ case token.LBRACE:
+ return p.objectType()
+ case token.EOF:
+ return nil, errEofToken
+ }
+
+ return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+ defer un(trace(p, "ParseObjectType"))
+
+ // we assume that the currently scanned token is a LBRACE
+ o := &ast.ObjectType{}
+
+ l, err := p.objectList()
+
+ // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+ // not a RBRACE, it's an syntax error and we just return it.
+ if err != nil && p.tok.Type != token.RBRACE {
+ return nil, err
+ }
+
+ o.List = l
+ return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+ defer un(trace(p, "ParseListType"))
+
+ // we assume that the currently scanned token is a LBRACK
+ l := &ast.ListType{}
+
+ for {
+ tok := p.scan()
+ switch tok.Type {
+ case token.NUMBER, token.FLOAT, token.STRING:
+ node, err := p.literalType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.COMMA:
+ continue
+ case token.LBRACE:
+ node, err := p.objectType()
+ if err != nil {
+ return nil, err
+ }
+
+ l.Add(node)
+ case token.BOOL:
+ // TODO(arslan) should we support? not supported by HCL yet
+ case token.LBRACK:
+ // TODO(arslan) should we support nested lists? Even though it's
+ // written in README of HCL, it's not a part of the grammar
+ // (not defined in parse.y)
+ case token.RBRACK:
+ // finished
+ return l, nil
+ default:
+ return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+ }
+
+ }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+ defer un(trace(p, "ParseLiteral"))
+
+ return &ast.LiteralType{
+ Token: p.tok.HCLToken(),
+ }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+ // If we have a token on the buffer, then return it.
+ if p.n != 0 {
+ p.n = 0
+ return p.tok
+ }
+
+ p.tok = p.sc.Scan()
+ return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+ p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+ if !p.enableTrace {
+ return
+ }
+
+ const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+ const n = len(dots)
+ fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+ i := 2 * p.indent
+ for i > n {
+ fmt.Print(dots)
+ i -= n
+ }
+ // i <= n
+ fmt.Print(dots[0:i])
+ fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+ p.printTrace(msg, "(")
+ p.indent++
+ return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+ p.indent--
+ p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644
index 0000000..fe3f0f0
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+ buf *bytes.Buffer // Source buffer for advancing and scanning
+ src []byte // Source buffer for immutable access
+
+ // Source Position
+ srcPos token.Pos // current position
+ prevPos token.Pos // previous position, used for peek() method
+
+ lastCharLen int // length of last character in bytes
+ lastLineLen int // length of last line in characters (for correct column reporting)
+
+ tokStart int // token text start position
+ tokEnd int // token text end position
+
+ // Error is called for each error encountered. If no Error
+ // function is set, the error is reported to os.Stderr.
+ Error func(pos token.Pos, msg string)
+
+ // ErrorCount is incremented by one for each error encountered.
+ ErrorCount int
+
+ // tokPos is the start position of most recently scanned token; set by
+ // Scan. The Filename field is always left untouched by the Scanner. If
+ // an error is reported (via Error) and Position is invalid, the scanner is
+ // not inside a token.
+ tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+ // even though we accept a src, we read from a io.Reader compatible type
+ // (*bytes.Buffer). So in the future we might easily change it to streaming
+ // read.
+ b := bytes.NewBuffer(src)
+ s := &Scanner{
+ buf: b,
+ src: src,
+ }
+
+ // srcPosition always starts with 1
+ s.srcPos.Line = 1
+ return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+ ch, size, err := s.buf.ReadRune()
+ if err != nil {
+ // advance for error reporting
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ return eof
+ }
+
+ if ch == utf8.RuneError && size == 1 {
+ s.srcPos.Column++
+ s.srcPos.Offset += size
+ s.lastCharLen = size
+ s.err("illegal UTF-8 encoding")
+ return ch
+ }
+
+ // remember last position
+ s.prevPos = s.srcPos
+
+ s.srcPos.Column++
+ s.lastCharLen = size
+ s.srcPos.Offset += size
+
+ if ch == '\n' {
+ s.srcPos.Line++
+ s.lastLineLen = s.srcPos.Column
+ s.srcPos.Column = 0
+ }
+
+ // debug
+ // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+ return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+ if err := s.buf.UnreadRune(); err != nil {
+ panic(err) // this is user fault, we should catch it
+ }
+ s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+ peek, _, err := s.buf.ReadRune()
+ if err != nil {
+ return eof
+ }
+
+ s.buf.UnreadRune()
+ return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+ ch := s.next()
+
+ // skip white space
+ for isWhitespace(ch) {
+ ch = s.next()
+ }
+
+ var tok token.Type
+
+ // token text markings
+ s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+ // token position, initial next() is moving the offset by one(size of rune
+ // actually), though we are interested with the starting point
+ s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+ if s.srcPos.Column > 0 {
+ // common case: last character was not a '\n'
+ s.tokPos.Line = s.srcPos.Line
+ s.tokPos.Column = s.srcPos.Column
+ } else {
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ s.tokPos.Line = s.srcPos.Line - 1
+ s.tokPos.Column = s.lastLineLen
+ }
+
+ switch {
+ case isLetter(ch):
+ lit := s.scanIdentifier()
+ if lit == "true" || lit == "false" {
+ tok = token.BOOL
+ } else if lit == "null" {
+ tok = token.NULL
+ } else {
+ s.err("illegal char")
+ }
+ case isDecimal(ch):
+ tok = s.scanNumber(ch)
+ default:
+ switch ch {
+ case eof:
+ tok = token.EOF
+ case '"':
+ tok = token.STRING
+ s.scanString()
+ case '.':
+ tok = token.PERIOD
+ ch = s.peek()
+ if isDecimal(ch) {
+ tok = token.FLOAT
+ ch = s.scanMantissa(ch)
+ ch = s.scanExponent(ch)
+ }
+ case '[':
+ tok = token.LBRACK
+ case ']':
+ tok = token.RBRACK
+ case '{':
+ tok = token.LBRACE
+ case '}':
+ tok = token.RBRACE
+ case ',':
+ tok = token.COMMA
+ case ':':
+ tok = token.COLON
+ case '-':
+ if isDecimal(s.peek()) {
+ ch := s.next()
+ tok = s.scanNumber(ch)
+ } else {
+ s.err("illegal char")
+ }
+ default:
+ s.err("illegal char: " + string(ch))
+ }
+ }
+
+ // finish token ending
+ s.tokEnd = s.srcPos.Offset
+
+ // create token literal
+ var tokenText string
+ if s.tokStart >= 0 {
+ tokenText = string(s.src[s.tokStart:s.tokEnd])
+ }
+ s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+ return token.Token{
+ Type: tok,
+ Pos: s.tokPos,
+ Text: tokenText,
+ }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+ zero := ch == '0'
+ pos := s.srcPos
+
+ s.scanMantissa(ch)
+ ch = s.next() // seek forward
+ if ch == 'e' || ch == 'E' {
+ ch = s.scanExponent(ch)
+ return token.FLOAT
+ }
+
+ if ch == '.' {
+ ch = s.scanFraction(ch)
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ ch = s.scanExponent(ch)
+ }
+ return token.FLOAT
+ }
+
+ if ch != eof {
+ s.unread()
+ }
+
+ // If we have a larger number and this is zero, error
+ if zero && pos != s.srcPos {
+ s.err("numbers cannot start with 0")
+ }
+
+ return token.NUMBER
+}
+
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+ scanned := false
+ for isDecimal(ch) {
+ ch = s.next()
+ scanned = true
+ }
+
+ if scanned && ch != eof {
+ s.unread()
+ }
+ return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+ if ch == '.' {
+ ch = s.peek() // we peek just to see if we can move forward
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+ if ch == 'e' || ch == 'E' {
+ ch = s.next()
+ if ch == '-' || ch == '+' {
+ ch = s.next()
+ }
+ ch = s.scanMantissa(ch)
+ }
+ return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+ braces := 0
+ for {
+ // '"' opening already consumed
+ // read character after quote
+ ch := s.next()
+
+ if ch == '\n' || ch < 0 || ch == eof {
+ s.err("literal not terminated")
+ return
+ }
+
+ if ch == '"' {
+ break
+ }
+
+ // If we're going into a ${} then we can ignore quotes for awhile
+ if braces == 0 && ch == '$' && s.peek() == '{' {
+ braces++
+ s.next()
+ } else if braces > 0 && ch == '{' {
+ braces++
+ }
+ if braces > 0 && ch == '}' {
+ braces--
+ }
+
+ if ch == '\\' {
+ s.scanEscape()
+ }
+ }
+
+ return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+ // http://en.cppreference.com/w/cpp/language/escape
+ ch := s.next() // read character after '/'
+ switch ch {
+ case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+ // nothing to do
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ // octal notation
+ ch = s.scanDigits(ch, 8, 3)
+ case 'x':
+ // hexademical notation
+ ch = s.scanDigits(s.next(), 16, 2)
+ case 'u':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 4)
+ case 'U':
+ // universal character name
+ ch = s.scanDigits(s.next(), 16, 8)
+ default:
+ s.err("illegal char escape")
+ }
+ return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+ for n > 0 && digitVal(ch) < base {
+ ch = s.next()
+ n--
+ }
+ if n > 0 {
+ s.err("illegal char escape")
+ }
+
+ // we scanned all digits, put the last non digit char back
+ s.unread()
+ return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+ offs := s.srcPos.Offset - s.lastCharLen
+ ch := s.next()
+ for isLetter(ch) || isDigit(ch) || ch == '-' {
+ ch = s.next()
+ }
+
+ if ch != eof {
+ s.unread() // we got identifier, put back latest char
+ }
+
+ return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+ pos.Offset = s.srcPos.Offset - s.lastCharLen
+ switch {
+ case s.srcPos.Column > 0:
+ // common case: last character was not a '\n'
+ pos.Line = s.srcPos.Line
+ pos.Column = s.srcPos.Column
+ case s.lastLineLen > 0:
+ // last character was a '\n'
+ // (we cannot be at the beginning of the source
+ // since we have called next() at least once)
+ pos.Line = s.srcPos.Line - 1
+ pos.Column = s.lastLineLen
+ default:
+ // at the beginning of the source
+ pos.Line = 1
+ pos.Column = 1
+ }
+ return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+ s.ErrorCount++
+ pos := s.recentPosition()
+
+ if s.Error != nil {
+ s.Error(pos, msg)
+ return
+ }
+
+ fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+ return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+ return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+ return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch - '0')
+ case 'a' <= ch && ch <= 'f':
+ return int(ch - 'a' + 10)
+ case 'A' <= ch && ch <= 'F':
+ return int(ch - 'A' + 10)
+ }
+ return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644
index 0000000..59c1bb7
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/position.go
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+ Filename string // filename, if any
+ Offset int // offset, starting at 0
+ Line int // line number, starting at 1
+ Column int // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+// file:line:column valid position with file name
+// line:column valid position without file name
+// file invalid position with file name
+// - invalid position without file name
+func (p Pos) String() string {
+ s := p.Filename
+ if p.IsValid() {
+ if s != "" {
+ s += ":"
+ }
+ s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+ }
+ if s == "" {
+ s = "-"
+ }
+ return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+ return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+ return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644
index 0000000..95a0c3e
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/json/token/token.go
@@ -0,0 +1,118 @@
+package token
+
+import (
+ "fmt"
+ "strconv"
+
+ hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+ Type Type
+ Pos Pos
+ Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+ // Special tokens
+ ILLEGAL Type = iota
+ EOF
+
+ identifier_beg
+ literal_beg
+ NUMBER // 12345
+ FLOAT // 123.45
+ BOOL // true,false
+ STRING // "abc"
+ NULL // null
+ literal_end
+ identifier_end
+
+ operator_beg
+ LBRACK // [
+ LBRACE // {
+ COMMA // ,
+ PERIOD // .
+ COLON // :
+
+ RBRACK // ]
+ RBRACE // }
+
+ operator_end
+)
+
+var tokens = [...]string{
+ ILLEGAL: "ILLEGAL",
+
+ EOF: "EOF",
+
+ NUMBER: "NUMBER",
+ FLOAT: "FLOAT",
+ BOOL: "BOOL",
+ STRING: "STRING",
+ NULL: "NULL",
+
+ LBRACK: "LBRACK",
+ LBRACE: "LBRACE",
+ COMMA: "COMMA",
+ PERIOD: "PERIOD",
+ COLON: "COLON",
+
+ RBRACK: "RBRACK",
+ RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+ s := ""
+ if 0 <= t && t < Type(len(tokens)) {
+ s = tokens[t]
+ }
+ if s == "" {
+ s = "token(" + strconv.Itoa(int(t)) + ")"
+ }
+ return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+ return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+ switch t.Type {
+ case BOOL:
+ return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+ case FLOAT:
+ return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+ case NULL:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+ case NUMBER:
+ return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+ case STRING:
+ return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+ default:
+ panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+ }
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644
index 0000000..d9993c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/lex.go
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+ "unicode"
+ "unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+ lexModeUnknown lexModeValue = iota
+ lexModeHcl
+ lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+ var (
+ r rune
+ w int
+ offset int
+ )
+
+ for {
+ r, w = utf8.DecodeRune(v[offset:])
+ offset += w
+ if unicode.IsSpace(r) {
+ continue
+ }
+ if r == '{' {
+ return lexModeJson
+ }
+ break
+ }
+
+ return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644
index 0000000..1fca53c
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl/parse.go
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+ "fmt"
+
+ "github.com/hashicorp/hcl/hcl/ast"
+ hclParser "github.com/hashicorp/hcl/hcl/parser"
+ jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+ return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+ switch lexMode(in) {
+ case lexModeHcl:
+ return hclParser.Parse(in)
+ case lexModeJson:
+ return jsonParser.Parse(in)
+ }
+
+ return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+ return parse([]byte(input))
+}