summaryrefslogtreecommitdiff
path: root/vendor/github.com/google
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-05-11 21:12:57 -0600
committermo khan <mo@mokhan.ca>2025-05-11 21:12:57 -0600
commit60440f90dca28e99a31dd328c5f6d5dc0f9b6a2e (patch)
tree2f54adf55086516f162f0a55a5347e6b25f7f176 /vendor/github.com/google
parent05ca9b8d3a9c7203a3a3b590beaa400900bd9007 (diff)
chore: vendor go dependencies
Diffstat (limited to 'vendor/github.com/google')
-rw-r--r--vendor/github.com/google/go-cmp/LICENSE27
-rw-r--r--vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go185
-rw-r--r--vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go206
-rw-r--r--vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go171
-rw-r--r--vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go189
-rw-r--r--vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go36
-rw-r--r--vendor/github.com/google/go-cmp/cmp/compare.go671
-rw-r--r--vendor/github.com/google/go-cmp/cmp/export.go31
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go18
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go123
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go402
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go9
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/function/func.go106
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/name.go164
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go34
-rw-r--r--vendor/github.com/google/go-cmp/cmp/internal/value/sort.go106
-rw-r--r--vendor/github.com/google/go-cmp/cmp/options.go562
-rw-r--r--vendor/github.com/google/go-cmp/cmp/path.go390
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report.go54
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_compare.go433
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_references.go264
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_reflect.go414
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_slices.go614
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_text.go432
-rw-r--r--vendor/github.com/google/go-cmp/cmp/report_value.go121
-rw-r--r--vendor/github.com/google/jsonapi/.gitignore1
-rw-r--r--vendor/github.com/google/jsonapi/.travis.yml13
-rw-r--r--vendor/github.com/google/jsonapi/LICENSE21
-rw-r--r--vendor/github.com/google/jsonapi/README.md477
-rw-r--r--vendor/github.com/google/jsonapi/constants.go56
-rw-r--r--vendor/github.com/google/jsonapi/doc.go70
-rw-r--r--vendor/github.com/google/jsonapi/errors.go52
-rw-r--r--vendor/github.com/google/jsonapi/node.go121
-rw-r--r--vendor/github.com/google/jsonapi/request.go656
-rw-r--r--vendor/github.com/google/jsonapi/response.go538
-rw-r--r--vendor/github.com/google/jsonapi/runtime.go129
-rw-r--r--vendor/github.com/google/uuid/CHANGELOG.md41
-rw-r--r--vendor/github.com/google/uuid/CONTRIBUTING.md26
-rw-r--r--vendor/github.com/google/uuid/CONTRIBUTORS9
-rw-r--r--vendor/github.com/google/uuid/LICENSE27
-rw-r--r--vendor/github.com/google/uuid/README.md21
-rw-r--r--vendor/github.com/google/uuid/dce.go80
-rw-r--r--vendor/github.com/google/uuid/doc.go12
-rw-r--r--vendor/github.com/google/uuid/hash.go59
-rw-r--r--vendor/github.com/google/uuid/marshal.go38
-rw-r--r--vendor/github.com/google/uuid/node.go90
-rw-r--r--vendor/github.com/google/uuid/node_js.go12
-rw-r--r--vendor/github.com/google/uuid/node_net.go33
-rw-r--r--vendor/github.com/google/uuid/null.go118
-rw-r--r--vendor/github.com/google/uuid/sql.go59
-rw-r--r--vendor/github.com/google/uuid/time.go134
-rw-r--r--vendor/github.com/google/uuid/util.go43
-rw-r--r--vendor/github.com/google/uuid/uuid.go365
-rw-r--r--vendor/github.com/google/uuid/version1.go44
-rw-r--r--vendor/github.com/google/uuid/version4.go76
-rw-r--r--vendor/github.com/google/uuid/version6.go56
-rw-r--r--vendor/github.com/google/uuid/version7.go104
-rw-r--r--vendor/github.com/google/yamlfmt/.gitignore14
-rw-r--r--vendor/github.com/google/yamlfmt/.goreleaser.yaml63
-rw-r--r--vendor/github.com/google/yamlfmt/.pre-commit-hooks.yaml20
-rw-r--r--vendor/github.com/google/yamlfmt/CONTRIBUTING.md29
-rw-r--r--vendor/github.com/google/yamlfmt/Dockerfile25
-rw-r--r--vendor/github.com/google/yamlfmt/LICENSE202
-rw-r--r--vendor/github.com/google/yamlfmt/Makefile62
-rw-r--r--vendor/github.com/google/yamlfmt/README.md112
-rw-r--r--vendor/github.com/google/yamlfmt/command/command.go258
-rw-r--r--vendor/github.com/google/yamlfmt/content_analyzer.go90
-rw-r--r--vendor/github.com/google/yamlfmt/engine.go127
-rw-r--r--vendor/github.com/google/yamlfmt/engine/consecutive_engine.go103
-rw-r--r--vendor/github.com/google/yamlfmt/engine/errors.go40
-rw-r--r--vendor/github.com/google/yamlfmt/engine/output.go138
-rw-r--r--vendor/github.com/google/yamlfmt/feature.go78
-rw-r--r--vendor/github.com/google/yamlfmt/formatter.go65
-rw-r--r--vendor/github.com/google/yamlfmt/formatters/basic/README.md3
-rw-r--r--vendor/github.com/google/yamlfmt/formatters/basic/anchors/check.go37
-rw-r--r--vendor/github.com/google/yamlfmt/formatters/basic/config.go52
-rw-r--r--vendor/github.com/google/yamlfmt/formatters/basic/errors.go33
-rw-r--r--vendor/github.com/google/yamlfmt/formatters/basic/factory.go45
-rw-r--r--vendor/github.com/google/yamlfmt/formatters/basic/features.go78
-rw-r--r--vendor/github.com/google/yamlfmt/formatters/basic/formatter.go133
-rw-r--r--vendor/github.com/google/yamlfmt/internal/collections/errors.go34
-rw-r--r--vendor/github.com/google/yamlfmt/internal/collections/set.go71
-rw-r--r--vendor/github.com/google/yamlfmt/internal/collections/slice.go24
-rw-r--r--vendor/github.com/google/yamlfmt/internal/features/eof_newline.go39
-rw-r--r--vendor/github.com/google/yamlfmt/internal/features/trim_whitespace.go43
-rw-r--r--vendor/github.com/google/yamlfmt/internal/gitlab/codequality.go79
-rw-r--r--vendor/github.com/google/yamlfmt/internal/hotfix/retain_line_break.go104
-rw-r--r--vendor/github.com/google/yamlfmt/internal/hotfix/strip_directives.go101
-rw-r--r--vendor/github.com/google/yamlfmt/internal/logger/debug.go51
-rw-r--r--vendor/github.com/google/yamlfmt/internal/multilinediff/multilinediff.go130
-rw-r--r--vendor/github.com/google/yamlfmt/linebreak.go42
-rw-r--r--vendor/github.com/google/yamlfmt/metadata.go114
-rw-r--r--vendor/github.com/google/yamlfmt/path_collector.go336
-rw-r--r--vendor/github.com/google/yamlfmt/schema.json93
94 files changed, 12411 insertions, 0 deletions
diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE
new file mode 100644
index 0000000..32017f8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go
new file mode 100644
index 0000000..3d8d0cd
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/equate.go
@@ -0,0 +1,185 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmpopts provides common options for the cmp package.
+package cmpopts
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+func equateAlways(_, _ interface{}) bool { return true }
+
+// EquateEmpty returns a [cmp.Comparer] option that determines all maps and slices
+// with a length of zero to be equal, regardless of whether they are nil.
+//
+// EquateEmpty can be used in conjunction with [SortSlices] and [SortMaps].
+func EquateEmpty() cmp.Option {
+ return cmp.FilterValues(isEmpty, cmp.Comparer(equateAlways))
+}
+
+func isEmpty(x, y interface{}) bool {
+ vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+ return (x != nil && y != nil && vx.Type() == vy.Type()) &&
+ (vx.Kind() == reflect.Slice || vx.Kind() == reflect.Map) &&
+ (vx.Len() == 0 && vy.Len() == 0)
+}
+
+// EquateApprox returns a [cmp.Comparer] option that determines float32 or float64
+// values to be equal if they are within a relative fraction or absolute margin.
+// This option is not used when either x or y is NaN or infinite.
+//
+// The fraction determines that the difference of two values must be within the
+// smaller fraction of the two values, while the margin determines that the two
+// values must be within some absolute margin.
+// To express only a fraction or only a margin, use 0 for the other parameter.
+// The fraction and margin must be non-negative.
+//
+// The mathematical expression used is equivalent to:
+//
+// |x-y| ≤ max(fraction*min(|x|, |y|), margin)
+//
+// EquateApprox can be used in conjunction with [EquateNaNs].
+func EquateApprox(fraction, margin float64) cmp.Option {
+ if margin < 0 || fraction < 0 || math.IsNaN(margin) || math.IsNaN(fraction) {
+ panic("margin or fraction must be a non-negative number")
+ }
+ a := approximator{fraction, margin}
+ return cmp.Options{
+ cmp.FilterValues(areRealF64s, cmp.Comparer(a.compareF64)),
+ cmp.FilterValues(areRealF32s, cmp.Comparer(a.compareF32)),
+ }
+}
+
+type approximator struct{ frac, marg float64 }
+
+func areRealF64s(x, y float64) bool {
+ return !math.IsNaN(x) && !math.IsNaN(y) && !math.IsInf(x, 0) && !math.IsInf(y, 0)
+}
+func areRealF32s(x, y float32) bool {
+ return areRealF64s(float64(x), float64(y))
+}
+func (a approximator) compareF64(x, y float64) bool {
+ relMarg := a.frac * math.Min(math.Abs(x), math.Abs(y))
+ return math.Abs(x-y) <= math.Max(a.marg, relMarg)
+}
+func (a approximator) compareF32(x, y float32) bool {
+ return a.compareF64(float64(x), float64(y))
+}
+
+// EquateNaNs returns a [cmp.Comparer] option that determines float32 and float64
+// NaN values to be equal.
+//
+// EquateNaNs can be used in conjunction with [EquateApprox].
+func EquateNaNs() cmp.Option {
+ return cmp.Options{
+ cmp.FilterValues(areNaNsF64s, cmp.Comparer(equateAlways)),
+ cmp.FilterValues(areNaNsF32s, cmp.Comparer(equateAlways)),
+ }
+}
+
+func areNaNsF64s(x, y float64) bool {
+ return math.IsNaN(x) && math.IsNaN(y)
+}
+func areNaNsF32s(x, y float32) bool {
+ return areNaNsF64s(float64(x), float64(y))
+}
+
+// EquateApproxTime returns a [cmp.Comparer] option that determines two non-zero
+// [time.Time] values to be equal if they are within some margin of one another.
+// If both times have a monotonic clock reading, then the monotonic time
+// difference will be used. The margin must be non-negative.
+func EquateApproxTime(margin time.Duration) cmp.Option {
+ if margin < 0 {
+ panic("margin must be a non-negative number")
+ }
+ a := timeApproximator{margin}
+ return cmp.FilterValues(areNonZeroTimes, cmp.Comparer(a.compare))
+}
+
+func areNonZeroTimes(x, y time.Time) bool {
+ return !x.IsZero() && !y.IsZero()
+}
+
+type timeApproximator struct {
+ margin time.Duration
+}
+
+func (a timeApproximator) compare(x, y time.Time) bool {
+ // Avoid subtracting times to avoid overflow when the
+ // difference is larger than the largest representable duration.
+ if x.After(y) {
+ // Ensure x is always before y
+ x, y = y, x
+ }
+ // We're within the margin if x+margin >= y.
+ // Note: time.Time doesn't have AfterOrEqual method hence the negation.
+ return !x.Add(a.margin).Before(y)
+}
+
+// AnyError is an error that matches any non-nil error.
+var AnyError anyError
+
+type anyError struct{}
+
+func (anyError) Error() string { return "any error" }
+func (anyError) Is(err error) bool { return err != nil }
+
+// EquateErrors returns a [cmp.Comparer] option that determines errors to be equal
+// if [errors.Is] reports them to match. The [AnyError] error can be used to
+// match any non-nil error.
+func EquateErrors() cmp.Option {
+ return cmp.FilterValues(areConcreteErrors, cmp.Comparer(compareErrors))
+}
+
+// areConcreteErrors reports whether x and y are types that implement error.
+// The input types are deliberately of the interface{} type rather than the
+// error type so that we can handle situations where the current type is an
+// interface{}, but the underlying concrete types both happen to implement
+// the error interface.
+func areConcreteErrors(x, y interface{}) bool {
+ _, ok1 := x.(error)
+ _, ok2 := y.(error)
+ return ok1 && ok2
+}
+
+func compareErrors(x, y interface{}) bool {
+ xe := x.(error)
+ ye := y.(error)
+ return errors.Is(xe, ye) || errors.Is(ye, xe)
+}
+
+// EquateComparable returns a [cmp.Option] that determines equality
+// of comparable types by directly comparing them using the == operator in Go.
+// The types to compare are specified by passing a value of that type.
+// This option should only be used on types that are documented as being
+// safe for direct == comparison. For example, [net/netip.Addr] is documented
+// as being semantically safe to use with ==, while [time.Time] is documented
+// to discourage the use of == on time values.
+func EquateComparable(typs ...interface{}) cmp.Option {
+ types := make(typesFilter)
+ for _, typ := range typs {
+ switch t := reflect.TypeOf(typ); {
+ case !t.Comparable():
+ panic(fmt.Sprintf("%T is not a comparable Go type", typ))
+ case types[t]:
+ panic(fmt.Sprintf("%T is already specified", typ))
+ default:
+ types[t] = true
+ }
+ }
+ return cmp.FilterPath(types.filter, cmp.Comparer(equateAny))
+}
+
+type typesFilter map[reflect.Type]bool
+
+func (tf typesFilter) filter(p cmp.Path) bool { return tf[p.Last().Type()] }
+
+func equateAny(x, y interface{}) bool { return x == y }
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go
new file mode 100644
index 0000000..fb84d11
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/ignore.go
@@ -0,0 +1,206 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmpopts
+
+import (
+ "fmt"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/internal/function"
+)
+
+// IgnoreFields returns an [cmp.Option] that ignores fields of the
+// given names on a single struct type. It respects the names of exported fields
+// that are forwarded due to struct embedding.
+// The struct type is specified by passing in a value of that type.
+//
+// The name may be a dot-delimited string (e.g., "Foo.Bar") to ignore a
+// specific sub-field that is embedded or nested within the parent struct.
+func IgnoreFields(typ interface{}, names ...string) cmp.Option {
+ sf := newStructFilter(typ, names...)
+ return cmp.FilterPath(sf.filter, cmp.Ignore())
+}
+
+// IgnoreTypes returns an [cmp.Option] that ignores all values assignable to
+// certain types, which are specified by passing in a value of each type.
+func IgnoreTypes(typs ...interface{}) cmp.Option {
+ tf := newTypeFilter(typs...)
+ return cmp.FilterPath(tf.filter, cmp.Ignore())
+}
+
+type typeFilter []reflect.Type
+
+func newTypeFilter(typs ...interface{}) (tf typeFilter) {
+ for _, typ := range typs {
+ t := reflect.TypeOf(typ)
+ if t == nil {
+ // This occurs if someone tries to pass in sync.Locker(nil)
+ panic("cannot determine type; consider using IgnoreInterfaces")
+ }
+ tf = append(tf, t)
+ }
+ return tf
+}
+func (tf typeFilter) filter(p cmp.Path) bool {
+ if len(p) < 1 {
+ return false
+ }
+ t := p.Last().Type()
+ for _, ti := range tf {
+ if t.AssignableTo(ti) {
+ return true
+ }
+ }
+ return false
+}
+
+// IgnoreInterfaces returns an [cmp.Option] that ignores all values or references of
+// values assignable to certain interface types. These interfaces are specified
+// by passing in an anonymous struct with the interface types embedded in it.
+// For example, to ignore [sync.Locker], pass in struct{sync.Locker}{}.
+func IgnoreInterfaces(ifaces interface{}) cmp.Option {
+ tf := newIfaceFilter(ifaces)
+ return cmp.FilterPath(tf.filter, cmp.Ignore())
+}
+
+type ifaceFilter []reflect.Type
+
+func newIfaceFilter(ifaces interface{}) (tf ifaceFilter) {
+ t := reflect.TypeOf(ifaces)
+ if ifaces == nil || t.Name() != "" || t.Kind() != reflect.Struct {
+ panic("input must be an anonymous struct")
+ }
+ for i := 0; i < t.NumField(); i++ {
+ fi := t.Field(i)
+ switch {
+ case !fi.Anonymous:
+ panic("struct cannot have named fields")
+ case fi.Type.Kind() != reflect.Interface:
+ panic("embedded field must be an interface type")
+ case fi.Type.NumMethod() == 0:
+ // This matches everything; why would you ever want this?
+ panic("cannot ignore empty interface")
+ default:
+ tf = append(tf, fi.Type)
+ }
+ }
+ return tf
+}
+func (tf ifaceFilter) filter(p cmp.Path) bool {
+ if len(p) < 1 {
+ return false
+ }
+ t := p.Last().Type()
+ for _, ti := range tf {
+ if t.AssignableTo(ti) {
+ return true
+ }
+ if t.Kind() != reflect.Ptr && reflect.PtrTo(t).AssignableTo(ti) {
+ return true
+ }
+ }
+ return false
+}
+
+// IgnoreUnexported returns an [cmp.Option] that only ignores the immediate unexported
+// fields of a struct, including anonymous fields of unexported types.
+// In particular, unexported fields within the struct's exported fields
+// of struct types, including anonymous fields, will not be ignored unless the
+// type of the field itself is also passed to IgnoreUnexported.
+//
+// Avoid ignoring unexported fields of a type which you do not control (i.e. a
+// type from another repository), as changes to the implementation of such types
+// may change how the comparison behaves. Prefer a custom [cmp.Comparer] instead.
+func IgnoreUnexported(typs ...interface{}) cmp.Option {
+ ux := newUnexportedFilter(typs...)
+ return cmp.FilterPath(ux.filter, cmp.Ignore())
+}
+
+type unexportedFilter struct{ m map[reflect.Type]bool }
+
+func newUnexportedFilter(typs ...interface{}) unexportedFilter {
+ ux := unexportedFilter{m: make(map[reflect.Type]bool)}
+ for _, typ := range typs {
+ t := reflect.TypeOf(typ)
+ if t == nil || t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T must be a non-pointer struct", typ))
+ }
+ ux.m[t] = true
+ }
+ return ux
+}
+func (xf unexportedFilter) filter(p cmp.Path) bool {
+ sf, ok := p.Index(-1).(cmp.StructField)
+ if !ok {
+ return false
+ }
+ return xf.m[p.Index(-2).Type()] && !isExported(sf.Name())
+}
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+ r, _ := utf8.DecodeRuneInString(id)
+ return unicode.IsUpper(r)
+}
+
+// IgnoreSliceElements returns an [cmp.Option] that ignores elements of []V.
+// The discard function must be of the form "func(T) bool" which is used to
+// ignore slice elements of type V, where V is assignable to T.
+// Elements are ignored if the function reports true.
+func IgnoreSliceElements(discardFunc interface{}) cmp.Option {
+ vf := reflect.ValueOf(discardFunc)
+ if !function.IsType(vf.Type(), function.ValuePredicate) || vf.IsNil() {
+ panic(fmt.Sprintf("invalid discard function: %T", discardFunc))
+ }
+ return cmp.FilterPath(func(p cmp.Path) bool {
+ si, ok := p.Index(-1).(cmp.SliceIndex)
+ if !ok {
+ return false
+ }
+ if !si.Type().AssignableTo(vf.Type().In(0)) {
+ return false
+ }
+ vx, vy := si.Values()
+ if vx.IsValid() && vf.Call([]reflect.Value{vx})[0].Bool() {
+ return true
+ }
+ if vy.IsValid() && vf.Call([]reflect.Value{vy})[0].Bool() {
+ return true
+ }
+ return false
+ }, cmp.Ignore())
+}
+
+// IgnoreMapEntries returns an [cmp.Option] that ignores entries of map[K]V.
+// The discard function must be of the form "func(T, R) bool" which is used to
+// ignore map entries of type K and V, where K and V are assignable to T and R.
+// Entries are ignored if the function reports true.
+func IgnoreMapEntries(discardFunc interface{}) cmp.Option {
+ vf := reflect.ValueOf(discardFunc)
+ if !function.IsType(vf.Type(), function.KeyValuePredicate) || vf.IsNil() {
+ panic(fmt.Sprintf("invalid discard function: %T", discardFunc))
+ }
+ return cmp.FilterPath(func(p cmp.Path) bool {
+ mi, ok := p.Index(-1).(cmp.MapIndex)
+ if !ok {
+ return false
+ }
+ if !mi.Key().Type().AssignableTo(vf.Type().In(0)) || !mi.Type().AssignableTo(vf.Type().In(1)) {
+ return false
+ }
+ k := mi.Key()
+ vx, vy := mi.Values()
+ if vx.IsValid() && vf.Call([]reflect.Value{k, vx})[0].Bool() {
+ return true
+ }
+ if vy.IsValid() && vf.Call([]reflect.Value{k, vy})[0].Bool() {
+ return true
+ }
+ return false
+ }, cmp.Ignore())
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go
new file mode 100644
index 0000000..720f3cd
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go
@@ -0,0 +1,171 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmpopts
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/internal/function"
+)
+
+// SortSlices returns a [cmp.Transformer] option that sorts all []V.
+// The lessOrCompareFunc function must be either
+// a less function of the form "func(T, T) bool" or
+// a compare function of the format "func(T, T) int"
+// which is used to sort any slice with element type V that is assignable to T.
+//
+// A less function must be:
+// - Deterministic: less(x, y) == less(x, y)
+// - Irreflexive: !less(x, x)
+// - Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
+//
+// A compare function must be:
+// - Deterministic: compare(x, y) == compare(x, y)
+// - Irreflexive: compare(x, x) == 0
+// - Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
+//
+// The function does not have to be "total". That is, if x != y, but
+// less or compare report inequality, their relative order is maintained.
+//
+// SortSlices can be used in conjunction with [EquateEmpty].
+func SortSlices(lessOrCompareFunc interface{}) cmp.Option {
+ vf := reflect.ValueOf(lessOrCompareFunc)
+ if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() {
+ panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc))
+ }
+ ss := sliceSorter{vf.Type().In(0), vf}
+ return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort))
+}
+
+type sliceSorter struct {
+ in reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+}
+
+func (ss sliceSorter) filter(x, y interface{}) bool {
+ vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+ if !(x != nil && y != nil && vx.Type() == vy.Type()) ||
+ !(vx.Kind() == reflect.Slice && vx.Type().Elem().AssignableTo(ss.in)) ||
+ (vx.Len() <= 1 && vy.Len() <= 1) {
+ return false
+ }
+ // Check whether the slices are already sorted to avoid an infinite
+ // recursion cycle applying the same transform to itself.
+ ok1 := sort.SliceIsSorted(x, func(i, j int) bool { return ss.less(vx, i, j) })
+ ok2 := sort.SliceIsSorted(y, func(i, j int) bool { return ss.less(vy, i, j) })
+ return !ok1 || !ok2
+}
+func (ss sliceSorter) sort(x interface{}) interface{} {
+ src := reflect.ValueOf(x)
+ dst := reflect.MakeSlice(src.Type(), src.Len(), src.Len())
+ for i := 0; i < src.Len(); i++ {
+ dst.Index(i).Set(src.Index(i))
+ }
+ sort.SliceStable(dst.Interface(), func(i, j int) bool { return ss.less(dst, i, j) })
+ ss.checkSort(dst)
+ return dst.Interface()
+}
+func (ss sliceSorter) checkSort(v reflect.Value) {
+ start := -1 // Start of a sequence of equal elements.
+ for i := 1; i < v.Len(); i++ {
+ if ss.less(v, i-1, i) {
+ // Check that first and last elements in v[start:i] are equal.
+ if start >= 0 && (ss.less(v, start, i-1) || ss.less(v, i-1, start)) {
+ panic(fmt.Sprintf("incomparable values detected: want equal elements: %v", v.Slice(start, i)))
+ }
+ start = -1
+ } else if start == -1 {
+ start = i
+ }
+ }
+}
+func (ss sliceSorter) less(v reflect.Value, i, j int) bool {
+ vx, vy := v.Index(i), v.Index(j)
+ vo := ss.fnc.Call([]reflect.Value{vx, vy})[0]
+ if vo.Kind() == reflect.Bool {
+ return vo.Bool()
+ } else {
+ return vo.Int() < 0
+ }
+}
+
+// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be
+// a sorted []struct{K, V}. The lessOrCompareFunc function must be either
+// a less function of the form "func(T, T) bool" or
+// a compare function of the format "func(T, T) int"
+// which is used to sort any map with key K that is assignable to T.
+//
+// Flattening the map into a slice has the property that [cmp.Equal] is able to
+// use [cmp.Comparer] options on K or the K.Equal method if it exists.
+//
+// A less function must be:
+// - Deterministic: less(x, y) == less(x, y)
+// - Irreflexive: !less(x, x)
+// - Transitive: if !less(x, y) and !less(y, z), then !less(x, z)
+// - Total: if x != y, then either less(x, y) or less(y, x)
+//
+// A compare function must be:
+// - Deterministic: compare(x, y) == compare(x, y)
+// - Irreflexive: compare(x, x) == 0
+// - Transitive: if compare(x, y) < 0 and compare(y, z) < 0, then compare(x, z) < 0
+// - Total: if x != y, then compare(x, y) != 0
+//
+// SortMaps can be used in conjunction with [EquateEmpty].
+func SortMaps(lessOrCompareFunc interface{}) cmp.Option {
+ vf := reflect.ValueOf(lessOrCompareFunc)
+ if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() {
+ panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc))
+ }
+ ms := mapSorter{vf.Type().In(0), vf}
+ return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort))
+}
+
+type mapSorter struct {
+ in reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+}
+
+func (ms mapSorter) filter(x, y interface{}) bool {
+ vx, vy := reflect.ValueOf(x), reflect.ValueOf(y)
+ return (x != nil && y != nil && vx.Type() == vy.Type()) &&
+ (vx.Kind() == reflect.Map && vx.Type().Key().AssignableTo(ms.in)) &&
+ (vx.Len() != 0 || vy.Len() != 0)
+}
+func (ms mapSorter) sort(x interface{}) interface{} {
+ src := reflect.ValueOf(x)
+ outType := reflect.StructOf([]reflect.StructField{
+ {Name: "K", Type: src.Type().Key()},
+ {Name: "V", Type: src.Type().Elem()},
+ })
+ dst := reflect.MakeSlice(reflect.SliceOf(outType), src.Len(), src.Len())
+ for i, k := range src.MapKeys() {
+ v := reflect.New(outType).Elem()
+ v.Field(0).Set(k)
+ v.Field(1).Set(src.MapIndex(k))
+ dst.Index(i).Set(v)
+ }
+ sort.Slice(dst.Interface(), func(i, j int) bool { return ms.less(dst, i, j) })
+ ms.checkSort(dst)
+ return dst.Interface()
+}
+func (ms mapSorter) checkSort(v reflect.Value) {
+ for i := 1; i < v.Len(); i++ {
+ if !ms.less(v, i-1, i) {
+ panic(fmt.Sprintf("partial order detected: want %v < %v", v.Index(i-1), v.Index(i)))
+ }
+ }
+}
+func (ms mapSorter) less(v reflect.Value, i, j int) bool {
+ vx, vy := v.Index(i).Field(0), v.Index(j).Field(0)
+ vo := ms.fnc.Call([]reflect.Value{vx, vy})[0]
+ if vo.Kind() == reflect.Bool {
+ return vo.Bool()
+ } else {
+ return vo.Int() < 0
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go
new file mode 100644
index 0000000..ca11a40
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/struct_filter.go
@@ -0,0 +1,189 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmpopts
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp"
+)
+
+// filterField returns a new Option where opt is only evaluated on paths that
+// include a specific exported field on a single struct type.
+// The struct type is specified by passing in a value of that type.
+//
+// The name may be a dot-delimited string (e.g., "Foo.Bar") to select a
+// specific sub-field that is embedded or nested within the parent struct.
+func filterField(typ interface{}, name string, opt cmp.Option) cmp.Option {
+ // TODO: This is currently unexported over concerns of how helper filters
+ // can be composed together easily.
+ // TODO: Add tests for FilterField.
+
+ sf := newStructFilter(typ, name)
+ return cmp.FilterPath(sf.filter, opt)
+}
+
+type structFilter struct {
+ t reflect.Type // The root struct type to match on
+ ft fieldTree // Tree of fields to match on
+}
+
+func newStructFilter(typ interface{}, names ...string) structFilter {
+ // TODO: Perhaps allow * as a special identifier to allow ignoring any
+ // number of path steps until the next field match?
+ // This could be useful when a concrete struct gets transformed into
+ // an anonymous struct where it is not possible to specify that by type,
+ // but the transformer happens to provide guarantees about the names of
+ // the transformed fields.
+
+ t := reflect.TypeOf(typ)
+ if t == nil || t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("%T must be a non-pointer struct", typ))
+ }
+ var ft fieldTree
+ for _, name := range names {
+ cname, err := canonicalName(t, name)
+ if err != nil {
+ panic(fmt.Sprintf("%s: %v", strings.Join(cname, "."), err))
+ }
+ ft.insert(cname)
+ }
+ return structFilter{t, ft}
+}
+
+func (sf structFilter) filter(p cmp.Path) bool {
+ for i, ps := range p {
+ if ps.Type().AssignableTo(sf.t) && sf.ft.matchPrefix(p[i+1:]) {
+ return true
+ }
+ }
+ return false
+}
+
+// fieldTree represents a set of dot-separated identifiers.
+//
+// For example, inserting the following selectors:
+//
+// Foo
+// Foo.Bar.Baz
+// Foo.Buzz
+// Nuka.Cola.Quantum
+//
+// Results in a tree of the form:
+//
+// {sub: {
+// "Foo": {ok: true, sub: {
+// "Bar": {sub: {
+// "Baz": {ok: true},
+// }},
+// "Buzz": {ok: true},
+// }},
+// "Nuka": {sub: {
+// "Cola": {sub: {
+// "Quantum": {ok: true},
+// }},
+// }},
+// }}
+type fieldTree struct {
+ ok bool // Whether this is a specified node
+ sub map[string]fieldTree // The sub-tree of fields under this node
+}
+
+// insert inserts a sequence of field accesses into the tree.
+func (ft *fieldTree) insert(cname []string) {
+ if ft.sub == nil {
+ ft.sub = make(map[string]fieldTree)
+ }
+ if len(cname) == 0 {
+ ft.ok = true
+ return
+ }
+ sub := ft.sub[cname[0]]
+ sub.insert(cname[1:])
+ ft.sub[cname[0]] = sub
+}
+
+// matchPrefix reports whether any selector in the fieldTree matches
+// the start of path p.
+func (ft fieldTree) matchPrefix(p cmp.Path) bool {
+ for _, ps := range p {
+ switch ps := ps.(type) {
+ case cmp.StructField:
+ ft = ft.sub[ps.Name()]
+ if ft.ok {
+ return true
+ }
+ if len(ft.sub) == 0 {
+ return false
+ }
+ case cmp.Indirect:
+ default:
+ return false
+ }
+ }
+ return false
+}
+
+// canonicalName returns a list of identifiers where any struct field access
+// through an embedded field is expanded to include the names of the embedded
+// types themselves.
+//
+// For example, suppose field "Foo" is not directly in the parent struct,
+// but actually from an embedded struct of type "Bar". Then, the canonical name
+// of "Foo" is actually "Bar.Foo".
+//
+// Suppose field "Foo" is not directly in the parent struct, but actually
+// a field in two different embedded structs of types "Bar" and "Baz".
+// Then the selector "Foo" causes a panic since it is ambiguous which one it
+// refers to. The user must specify either "Bar.Foo" or "Baz.Foo".
+func canonicalName(t reflect.Type, sel string) ([]string, error) {
+ var name string
+ sel = strings.TrimPrefix(sel, ".")
+ if sel == "" {
+ return nil, fmt.Errorf("name must not be empty")
+ }
+ if i := strings.IndexByte(sel, '.'); i < 0 {
+ name, sel = sel, ""
+ } else {
+ name, sel = sel[:i], sel[i:]
+ }
+
+ // Type must be a struct or pointer to struct.
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("%v must be a struct", t)
+ }
+
+ // Find the canonical name for this current field name.
+ // If the field exists in an embedded struct, then it will be expanded.
+ sf, _ := t.FieldByName(name)
+ if !isExported(name) {
+ // Avoid using reflect.Type.FieldByName for unexported fields due to
+ // buggy behavior with regard to embeddeding and unexported fields.
+ // See https://golang.org/issue/4876 for details.
+ sf = reflect.StructField{}
+ for i := 0; i < t.NumField() && sf.Name == ""; i++ {
+ if t.Field(i).Name == name {
+ sf = t.Field(i)
+ }
+ }
+ }
+ if sf.Name == "" {
+ return []string{name}, fmt.Errorf("does not exist")
+ }
+ var ss []string
+ for i := range sf.Index {
+ ss = append(ss, t.FieldByIndex(sf.Index[:i+1]).Name)
+ }
+ if sel == "" {
+ return ss, nil
+ }
+ ssPost, err := canonicalName(sf.Type, sel)
+ return append(ss, ssPost...), err
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go
new file mode 100644
index 0000000..25b4bd0
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/xform.go
@@ -0,0 +1,36 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmpopts
+
+import (
+ "github.com/google/go-cmp/cmp"
+)
+
+type xformFilter struct{ xform cmp.Option }
+
+func (xf xformFilter) filter(p cmp.Path) bool {
+ for _, ps := range p {
+ if t, ok := ps.(cmp.Transform); ok && t.Option() == xf.xform {
+ return false
+ }
+ }
+ return true
+}
+
+// AcyclicTransformer returns a [cmp.Transformer] with a filter applied that ensures
+// that the transformer cannot be recursively applied upon its own output.
+//
+// An example use case is a transformer that splits a string by lines:
+//
+// AcyclicTransformer("SplitLines", func(s string) []string{
+// return strings.Split(s, "\n")
+// })
+//
+// Had this been an unfiltered [cmp.Transformer] instead, this would result in an
+// infinite cycle converting a string to []string to [][]string and so on.
+func AcyclicTransformer(name string, xformFunc interface{}) cmp.Option {
+ xf := xformFilter{cmp.Transformer(name, xformFunc)}
+ return cmp.FilterPath(xf.filter, xf.xform)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
new file mode 100644
index 0000000..0f5b8a4
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -0,0 +1,671 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// [reflect.DeepEqual] for comparing whether two values are semantically equal.
+// It is intended to only be used in tests, as performance is not a goal and
+// it may panic if it cannot compare the values. Its propensity towards
+// panicking means that its unsuitable for production environments where a
+// spurious panic may be fatal.
+//
+// The primary features of cmp are:
+//
+// - When the default behavior of equality does not suit the test's needs,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as
+// they are within some tolerance of each other.
+//
+// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
+// to determine equality. This allows package authors to determine
+// the equality operation for the types that they define.
+//
+// - If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on
+// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
+// unexported fields are not compared by default; they result in panics
+// unless suppressed by using an [Ignore] option
+// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
+// or explicitly compared using the [Exporter] option.
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+ "github.com/google/go-cmp/cmp/internal/function"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// TODO(≥go1.18): Use any instead of interface{}.
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one [Ignore] exists in S, then the comparison is ignored.
+// If the number of [Transformer] and [Comparer] options in S is non-zero,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single [Transformer], then use that to transform
+// the current values and recursively call Equal on the output values.
+// If S contains a single [Comparer], then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
+//
+// - If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
+//
+// - Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings,
+// and channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
+//
+// Structs are equal if recursively calling Equal on all fields report equal.
+// If a struct contains unexported fields, Equal panics unless an [Ignore] option
+// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
+// or the [Exporter] option explicitly permits comparing the unexported field.
+//
+// Slices are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored slice or array elements report equal.
+// Empty non-nil slices and nil slices are not equal; to equate empty slices,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Maps are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored map entries report equal.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using
+// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
+// Empty non-nil maps and nil maps are not equal; to equate empty maps,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Pointers and interfaces are equal if they are both nil or both non-nil,
+// where they have the same underlying concrete type and recursively
+// calling Equal on the underlying values reports equal.
+//
+// Before recursing into a pointer, slice element, or map, the current path
+// is checked to detect whether the address has already been visited.
+// If there is a cycle, then the pointed at values are considered equal
+// only if both addresses were previously visited in the same path step.
+func Equal(x, y interface{}, opts ...Option) bool {
+ s := newState(opts)
+ s.compareAny(rootStep(x, y))
+ return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values:
+// y - x. It returns an empty string if and only if Equal returns true for the
+// same input values and options.
+//
+// The output is displayed as a literal in pseudo-Go syntax.
+// At the start of each line, a "-" prefix indicates an element removed from x,
+// a "+" prefix to indicates an element added from y, and the lack of a prefix
+// indicates an element common to both x and y. If possible, the output
+// uses fmt.Stringer.String or error.Error methods to produce more humanly
+// readable outputs. In such cases, the string is prefixed with either an
+// 's' or 'e' character, respectively, to indicate that the method was called.
+//
+// Do not depend on this output being stable. If you need the ability to
+// programmatically interpret the difference, consider using a custom Reporter.
+func Diff(x, y interface{}, opts ...Option) string {
+ s := newState(opts)
+
+ // Optimization: If there are no other reporters, we can optimize for the
+ // common case where the result is equal (and thus no reported difference).
+ // This avoids the expensive construction of a difference tree.
+ if len(s.reporters) == 0 {
+ s.compareAny(rootStep(x, y))
+ if s.result.Equal() {
+ return ""
+ }
+ s.result = diff.Result{} // Reset results
+ }
+
+ r := new(defaultReporter)
+ s.reporters = append(s.reporters, reporter{r})
+ s.compareAny(rootStep(x, y))
+ d := r.String()
+ if (d == "") != s.result.Equal() {
+ panic("inconsistent difference and equality results")
+ }
+ return d
+}
+
+// rootStep constructs the first path step. If x and y have differing types,
+// then they are stored within an empty interface type.
+func rootStep(x, y interface{}) PathStep {
+ vx := reflect.ValueOf(x)
+ vy := reflect.ValueOf(y)
+
+ // If the inputs are different types, auto-wrap them in an empty interface
+ // so that they have the same parent type.
+ var t reflect.Type
+ if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
+ t = anyType
+ if vx.IsValid() {
+ vvx := reflect.New(t).Elem()
+ vvx.Set(vx)
+ vx = vvx
+ }
+ if vy.IsValid() {
+ vvy := reflect.New(t).Elem()
+ vvy.Set(vy)
+ vy = vvy
+ }
+ } else {
+ t = vx.Type()
+ }
+
+ return &pathStep{t, vx, vy}
+}
+
+type state struct {
+ // These fields represent the "comparison state".
+ // Calling statelessCompare must not result in observable changes to these.
+ result diff.Result // The current result of comparison
+ curPath Path // The current path in the value tree
+ curPtrs pointerPath // The current set of visited pointers
+ reporters []reporter // Optional reporters
+
+ // recChecker checks for infinite cycles applying the same set of
+ // transformers upon the output of itself.
+ recChecker recChecker
+
+ // dynChecker triggers pseudo-random checks for option correctness.
+ // It is safe for statelessCompare to mutate this value.
+ dynChecker dynChecker
+
+ // These fields, once set by processOption, will not change.
+ exporters []exporter // List of exporters for structs with unexported fields
+ opts Options // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+ // Always ensure a validator option exists to validate the inputs.
+ s := &state{opts: Options{validator{}}}
+ s.curPtrs.Init()
+ s.processOption(Options(opts))
+ return s
+}
+
+func (s *state) processOption(opt Option) {
+ switch opt := opt.(type) {
+ case nil:
+ case Options:
+ for _, o := range opt {
+ s.processOption(o)
+ }
+ case coreOption:
+ type filtered interface {
+ isFiltered() bool
+ }
+ if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+ panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+ }
+ s.opts = append(s.opts, opt)
+ case exporter:
+ s.exporters = append(s.exporters, opt)
+ case reporter:
+ s.reporters = append(s.reporters, opt)
+ default:
+ panic(fmt.Sprintf("unknown option %T", opt))
+ }
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(step PathStep) diff.Result {
+ // We do not save and restore curPath and curPtrs because all of the
+ // compareX methods should properly push and pop from them.
+ // It is an implementation bug if the contents of the paths differ from
+ // when calling this function to when returning from it.
+
+ oldResult, oldReporters := s.result, s.reporters
+ s.result = diff.Result{} // Reset result
+ s.reporters = nil // Remove reporters to avoid spurious printouts
+ s.compareAny(step)
+ res := s.result
+ s.result, s.reporters = oldResult, oldReporters
+ return res
+}
+
+func (s *state) compareAny(step PathStep) {
+ // Update the path stack.
+ s.curPath.push(step)
+ defer s.curPath.pop()
+ for _, r := range s.reporters {
+ r.PushStep(step)
+ defer r.PopStep()
+ }
+ s.recChecker.Check(s.curPath)
+
+ // Cycle-detection for slice elements (see NOTE in compareSlice).
+ t := step.Type()
+ vx, vy := step.Values()
+ if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
+ px, py := vx.Addr(), vy.Addr()
+ if eq, visited := s.curPtrs.Push(px, py); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(px, py)
+ }
+
+ // Rule 1: Check whether an option applies on this node in the value tree.
+ if s.tryOptions(t, vx, vy) {
+ return
+ }
+
+ // Rule 2: Check whether the type has a valid Equal method.
+ if s.tryMethod(t, vx, vy) {
+ return
+ }
+
+ // Rule 3: Compare based on the underlying kind.
+ switch t.Kind() {
+ case reflect.Bool:
+ s.report(vx.Bool() == vy.Bool(), 0)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s.report(vx.Int() == vy.Int(), 0)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ s.report(vx.Uint() == vy.Uint(), 0)
+ case reflect.Float32, reflect.Float64:
+ s.report(vx.Float() == vy.Float(), 0)
+ case reflect.Complex64, reflect.Complex128:
+ s.report(vx.Complex() == vy.Complex(), 0)
+ case reflect.String:
+ s.report(vx.String() == vy.String(), 0)
+ case reflect.Chan, reflect.UnsafePointer:
+ s.report(vx.Pointer() == vy.Pointer(), 0)
+ case reflect.Func:
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ case reflect.Struct:
+ s.compareStruct(t, vx, vy)
+ case reflect.Slice, reflect.Array:
+ s.compareSlice(t, vx, vy)
+ case reflect.Map:
+ s.compareMap(t, vx, vy)
+ case reflect.Ptr:
+ s.comparePtr(t, vx, vy)
+ case reflect.Interface:
+ s.compareInterface(t, vx, vy)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+ }
+}
+
+func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
+ // Evaluate all filters and apply the remaining options.
+ if opt := s.opts.filter(s, t, vx, vy); opt != nil {
+ opt.apply(s, vx, vy)
+ return true
+ }
+ return false
+}
+
+func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
+ // Check if this type even has an Equal method.
+ m, ok := t.MethodByName("Equal")
+ if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+ return false
+ }
+
+ eq := s.callTTBFunc(m.Func, vx, vy)
+ s.report(eq, reportByMethod)
+ return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{v})[0]
+ }
+
+ // Run the function twice and ensure that we get the same results back.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, v)
+ got := <-c
+ want := f.Call([]reflect.Value{v})[0]
+ if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
+ // To avoid false-positives with non-reflexive equality operations,
+ // we sanity check whether a value is equal to itself.
+ if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
+ return want
+ }
+ panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{x, y})[0].Bool()
+ }
+
+ // Swapping the input arguments is sufficient to check that
+ // f is symmetric and deterministic.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, y, x)
+ got := <-c
+ want := f.Call([]reflect.Value{x, y})[0].Bool()
+ if !got.IsValid() || got.Bool() != want {
+ panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+ var ret reflect.Value
+ defer func() {
+ recover() // Ignore panics, let the other call to f panic instead
+ c <- ret
+ }()
+ ret = f.Call(vs)[0]
+}
+
+func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
+ var addr bool
+ var vax, vay reflect.Value // Addressable versions of vx and vy
+
+ var mayForce, mayForceInit bool
+ step := StructField{&structField{}}
+ for i := 0; i < t.NumField(); i++ {
+ step.typ = t.Field(i).Type
+ step.vx = vx.Field(i)
+ step.vy = vy.Field(i)
+ step.name = t.Field(i).Name
+ step.idx = i
+ step.unexported = !isExported(step.name)
+ if step.unexported {
+ if step.name == "_" {
+ continue
+ }
+ // Defer checking of unexported fields until later to give an
+ // Ignore a chance to ignore the field.
+ if !vax.IsValid() || !vay.IsValid() {
+ // For retrieveUnexportedField to work, the parent struct must
+ // be addressable. Create a new copy of the values if
+ // necessary to make them addressable.
+ addr = vx.CanAddr() || vy.CanAddr()
+ vax = makeAddressable(vx)
+ vay = makeAddressable(vy)
+ }
+ if !mayForceInit {
+ for _, xf := range s.exporters {
+ mayForce = mayForce || xf(t)
+ }
+ mayForceInit = true
+ }
+ step.mayForce = mayForce
+ step.paddr = addr
+ step.pvx = vax
+ step.pvy = vay
+ step.field = t.Field(i)
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
+ isSlice := t.Kind() == reflect.Slice
+ if isSlice && (vx.IsNil() || vy.IsNil()) {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
+ // since slices represents a list of pointers, rather than a single pointer.
+ // The pointer checking logic must be handled on a per-element basis
+ // in compareAny.
+ //
+ // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
+ // pointer P, a length N, and a capacity C. Supposing each slice element has
+ // a memory size of M, then the slice is equivalent to the list of pointers:
+ // [P+i*M for i in range(N)]
+ //
+ // For example, v[:0] and v[:1] are slices with the same starting pointer,
+ // but they are clearly different values. Using the slice pointer alone
+ // violates the assumption that equal pointers implies equal values.
+
+ step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
+ withIndexes := func(ix, iy int) SliceIndex {
+ if ix >= 0 {
+ step.vx, step.xkey = vx.Index(ix), ix
+ } else {
+ step.vx, step.xkey = reflect.Value{}, -1
+ }
+ if iy >= 0 {
+ step.vy, step.ykey = vy.Index(iy), iy
+ } else {
+ step.vy, step.ykey = reflect.Value{}, -1
+ }
+ return step
+ }
+
+ // Ignore options are able to ignore missing elements in a slice.
+ // However, detecting these reliably requires an optimal differencing
+ // algorithm, for which diff.Difference is not.
+ //
+ // Instead, we first iterate through both slices to detect which elements
+ // would be ignored if standing alone. The index of non-discarded elements
+ // are stored in a separate slice, which diffing is then performed on.
+ var indexesX, indexesY []int
+ var ignoredX, ignoredY []bool
+ for ix := 0; ix < vx.Len(); ix++ {
+ ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
+ if !ignored {
+ indexesX = append(indexesX, ix)
+ }
+ ignoredX = append(ignoredX, ignored)
+ }
+ for iy := 0; iy < vy.Len(); iy++ {
+ ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
+ if !ignored {
+ indexesY = append(indexesY, iy)
+ }
+ ignoredY = append(ignoredY, ignored)
+ }
+
+ // Compute an edit-script for slices vx and vy (excluding ignored elements).
+ edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
+ return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
+ })
+
+ // Replay the ignore-scripts and the edit-script.
+ var ix, iy int
+ for ix < vx.Len() || iy < vy.Len() {
+ var e diff.EditType
+ switch {
+ case ix < len(ignoredX) && ignoredX[ix]:
+ e = diff.UniqueX
+ case iy < len(ignoredY) && ignoredY[iy]:
+ e = diff.UniqueY
+ default:
+ e, edits = edits[0], edits[1:]
+ }
+ switch e {
+ case diff.UniqueX:
+ s.compareAny(withIndexes(ix, -1))
+ ix++
+ case diff.UniqueY:
+ s.compareAny(withIndexes(-1, iy))
+ iy++
+ default:
+ s.compareAny(withIndexes(ix, iy))
+ ix++
+ iy++
+ }
+ }
+}
+
+func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for maps.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ // We combine and sort the two map keys so that we can perform the
+ // comparisons in a deterministic order.
+ step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
+ for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+ step.vx = vx.MapIndex(k)
+ step.vy = vy.MapIndex(k)
+ step.key = k
+ if !step.vx.IsValid() && !step.vy.IsValid() {
+ // It is possible for both vx and vy to be invalid if the
+ // key contained a NaN value in it.
+ //
+ // Even with the ability to retrieve NaN keys in Go 1.12,
+ // there still isn't a sensible way to compare the values since
+ // a NaN key may map to multiple unordered values.
+ // The most reasonable way to compare NaNs would be to compare the
+ // set of values. However, this is impossible to do efficiently
+ // since set equality is provably an O(n^2) operation given only
+ // an Equal function. If we had a Less function or Hash function,
+ // this could be done in O(n*log(n)) or O(n), respectively.
+ //
+ // Rather than adding complex logic to deal with NaNs, make it
+ // the user's responsibility to compare such obscure maps.
+ const help = "consider providing a Comparer to compare the map"
+ panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for pointers.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ vx, vy = vx.Elem(), vy.Elem()
+ s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
+}
+
+func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+ vx, vy = vx.Elem(), vy.Elem()
+ if vx.Type() != vy.Type() {
+ s.report(false, 0)
+ return
+ }
+ s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
+}
+
+func (s *state) report(eq bool, rf resultFlags) {
+ if rf&reportByIgnore == 0 {
+ if eq {
+ s.result.NumSame++
+ rf |= reportEqual
+ } else {
+ s.result.NumDiff++
+ rf |= reportUnequal
+ }
+ }
+ for _, r := range s.reporters {
+ r.Report(Result{flags: rf})
+ }
+}
+
+// recChecker tracks the state needed to periodically perform checks that
+// user provided transformers are not stuck in an infinitely recursive cycle.
+type recChecker struct{ next int }
+
+// Check scans the Path for any recursive transformers and panics when any
+// recursive transformers are detected. Note that the presence of a
+// recursive Transformer does not necessarily imply an infinite cycle.
+// As such, this check only activates after some minimal number of path steps.
+func (rc *recChecker) Check(p Path) {
+ const minLen = 1 << 16
+ if rc.next == 0 {
+ rc.next = minLen
+ }
+ if len(p) < rc.next {
+ return
+ }
+ rc.next <<= 1
+
+ // Check whether the same transformer has appeared at least twice.
+ var ss []string
+ m := map[Option]int{}
+ for _, ps := range p {
+ if t, ok := ps.(Transform); ok {
+ t := t.Option()
+ if m[t] == 1 { // Transformer was used exactly once before
+ tf := t.(*transformer).fnc.Type()
+ ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
+ }
+ m[t]++
+ }
+ }
+ if len(ss) > 0 {
+ const warning = "recursive set of Transformers detected"
+ const help = "consider using cmpopts.AcyclicTransformer"
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
+ }
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//
+// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+//
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+ ok := dc.curr == dc.next
+ if ok {
+ dc.curr = 0
+ dc.next++
+ }
+ dc.curr++
+ return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+ if v.CanAddr() {
+ return v
+ }
+ vc := reflect.New(v.Type()).Elem()
+ vc.Set(v)
+ return vc
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/export.go b/vendor/github.com/google/go-cmp/cmp/export.go
new file mode 100644
index 0000000..29f82fe
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/export.go
@@ -0,0 +1,31 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
+// a struct such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve. If addr is false,
+// then the returned value will be shallowed copied to be non-addressable.
+func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
+ ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
+ if !addr {
+ // A field is addressable if and only if the struct is addressable.
+ // If the original parent value was not addressable, shallow copy the
+ // value to make it non-addressable to avoid leaking an implementation
+ // detail of how forcibly exporting a field works.
+ if ve.Kind() == reflect.Interface && ve.IsNil() {
+ return reflect.Zero(f.Type)
+ }
+ return reflect.ValueOf(ve.Interface()).Convert(f.Type)
+ }
+ return ve
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
new file mode 100644
index 0000000..36062a6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -0,0 +1,18 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmp_debug
+// +build !cmp_debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+ return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
new file mode 100644
index 0000000..a3b97a1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -0,0 +1,123 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmp_debug
+// +build cmp_debug
+
+package diff
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+// go test -tags=cmp_debug -v
+//
+// Example output:
+// === RUN TestDifference/#34
+// ┌───────────────────────────────┐
+// │ \ · · · · · · · · · · · · · · │
+// │ · # · · · · · · · · · · · · · │
+// │ · \ · · · · · · · · · · · · · │
+// │ · · \ · · · · · · · · · · · · │
+// │ · · · X # · · · · · · · · · · │
+// │ · · · # \ · · · · · · · · · · │
+// │ · · · · · # # · · · · · · · · │
+// │ · · · · · # \ · · · · · · · · │
+// │ · · · · · · · \ · · · · · · · │
+// │ · · · · · · · · \ · · · · · · │
+// │ · · · · · · · · · \ · · · · · │
+// │ · · · · · · · · · · \ · · # · │
+// │ · · · · · · · · · · · \ # # · │
+// │ · · · · · · · · · · · # # # · │
+// │ · · · · · · · · · · # # # # · │
+// │ · · · · · · · · · # # # # # · │
+// │ · · · · · · · · · · · · · · \ │
+// └───────────────────────────────┘
+// [.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The '·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+ updateDelay = 100 * time.Millisecond
+ finishDelay = 500 * time.Millisecond
+ ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+ sync.Mutex
+ p1, p2 EditScript
+ fwdPath, revPath *EditScript
+ grid []byte
+ lines int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+ dbg.Lock()
+ dbg.fwdPath, dbg.revPath = p1, p2
+ top := "┌─" + strings.Repeat("──", nx) + "┐\n"
+ row := "│ " + strings.Repeat("· ", nx) + "│\n"
+ btm := "└─" + strings.Repeat("──", nx) + "┘\n"
+ dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+ dbg.lines = strings.Count(dbg.String(), "\n")
+ fmt.Print(dbg)
+
+ // Wrap the EqualFunc so that we can intercept each result.
+ return func(ix, iy int) (r Result) {
+ cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
+ for i := range cell {
+ cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+ }
+ switch r = f(ix, iy); {
+ case r.Equal():
+ cell[0] = '\\'
+ case r.Similar():
+ cell[0] = 'X'
+ default:
+ cell[0] = '#'
+ }
+ return
+ }
+}
+
+func (dbg *debugger) Update() {
+ dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+ dbg.print(finishDelay)
+ dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+ dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+ for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+ dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+ }
+ return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+ if ansiTerminal {
+ fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+ }
+ fmt.Print(dbg)
+ time.Sleep(d)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
new file mode 100644
index 0000000..a248e54
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -0,0 +1,402 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+ // Identity indicates that a symbol pair is identical in both list X and Y.
+ Identity EditType = iota
+ // UniqueX indicates that a symbol only exists in X and not Y.
+ UniqueX
+ // UniqueY indicates that a symbol only exists in Y and not X.
+ UniqueY
+ // Modified indicates that a symbol pair is a modification of each other.
+ Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+ b := make([]byte, len(es))
+ for i, e := range es {
+ switch e {
+ case Identity:
+ b[i] = '.'
+ case UniqueX:
+ b[i] = 'X'
+ case UniqueY:
+ b[i] = 'Y'
+ case Modified:
+ b[i] = 'M'
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+ for _, e := range es {
+ switch e {
+ case Identity:
+ s.NI++
+ case UniqueX:
+ s.NX++
+ case UniqueY:
+ s.NY++
+ case Modified:
+ s.NM++
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NumSame is the number of sub-elements that are equal.
+// NumDiff is the number of sub-elements that are not equal.
+type Result struct{ NumSame, NumDiff int }
+
+// BoolResult returns a Result that is either Equal or not Equal.
+func BoolResult(b bool) Result {
+ if b {
+ return Result{NumSame: 1} // Equal, Similar
+ } else {
+ return Result{NumDiff: 2} // Not Equal, not Similar
+ }
+}
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NumDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NumDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NumSame to NumDiff to determine similarity may change.
+func (r Result) Similar() bool {
+ // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
+ return r.NumSame+1 >= r.NumDiff
+}
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+// - eq == (es.Dist()==0)
+// - nx == es.LenX()
+// - ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+ // This algorithm is based on traversing what is known as an "edit-graph".
+ // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+ // by Eugene W. Myers. Since D can be as large as N itself, this is
+ // effectively O(N^2). Unlike the algorithm from that paper, we are not
+ // interested in the optimal path, but at least some "decent" path.
+ //
+ // For example, let X and Y be lists of symbols:
+ // X = [A B C A B B A]
+ // Y = [C B A B A C]
+ //
+ // The edit-graph can be drawn as the following:
+ // A B C A B B A
+ // ┌─────────────┐
+ // C │_|_|\|_|_|_|_│ 0
+ // B │_|\|_|_|\|\|_│ 1
+ // A │\|_|_|\|_|_|\│ 2
+ // B │_|\|_|_|\|\|_│ 3
+ // A │\|_|_|\|_|_|\│ 4
+ // C │ | |\| | | | │ 5
+ // └─────────────┘ 6
+ // 0 1 2 3 4 5 6 7
+ //
+ // List X is written along the horizontal axis, while list Y is written
+ // along the vertical axis. At any point on this grid, if the symbol in
+ // list X matches the corresponding symbol in list Y, then a '\' is drawn.
+ // The goal of any minimal edit-script algorithm is to find a path from the
+ // top-left corner to the bottom-right corner, while traveling through the
+ // fewest horizontal or vertical edges.
+ // A horizontal edge is equivalent to inserting a symbol from list X.
+ // A vertical edge is equivalent to inserting a symbol from list Y.
+ // A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+ // Invariants:
+ // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+ // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+ //
+ // In general:
+ // - fwdFrontier.X < revFrontier.X
+ // - fwdFrontier.Y < revFrontier.Y
+ //
+ // Unless, it is time for the algorithm to terminate.
+ fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+ revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+ fwdFrontier := fwdPath.point // Forward search frontier
+ revFrontier := revPath.point // Reverse search frontier
+
+ // Search budget bounds the cost of searching for better paths.
+ // The longest sequence of non-matching symbols that can be tolerated is
+ // approximately the square-root of the search budget.
+ searchBudget := 4 * (nx + ny) // O(n)
+
+ // Running the tests with the "cmp_debug" build tag prints a visualization
+ // of the algorithm running in real-time. This is educational for
+ // understanding how the algorithm works. See debug_enable.go.
+ f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+
+ // The algorithm below is a greedy, meet-in-the-middle algorithm for
+ // computing sub-optimal edit-scripts between two lists.
+ //
+ // The algorithm is approximately as follows:
+ // - Searching for differences switches back-and-forth between
+ // a search that starts at the beginning (the top-left corner), and
+ // a search that starts at the end (the bottom-right corner).
+ // The goal of the search is connect with the search
+ // from the opposite corner.
+ // - As we search, we build a path in a greedy manner,
+ // where the first match seen is added to the path (this is sub-optimal,
+ // but provides a decent result in practice). When matches are found,
+ // we try the next pair of symbols in the lists and follow all matches
+ // as far as possible.
+ // - When searching for matches, we search along a diagonal going through
+ // through the "frontier" point. If no matches are found,
+ // we advance the frontier towards the opposite corner.
+ // - This algorithm terminates when either the X coordinates or the
+ // Y coordinates of the forward and reverse frontier points ever intersect.
+
+ // This algorithm is correct even if searching only in the forward direction
+ // or in the reverse direction. We do both because it is commonly observed
+ // that two lists commonly differ because elements were added to the front
+ // or end of the other list.
+ //
+ // Non-deterministically start with either the forward or reverse direction
+ // to introduce some deliberate instability so that we have the flexibility
+ // to change this algorithm in the future.
+ if flags.Deterministic || randBool {
+ goto forwardSearch
+ } else {
+ goto reverseSearch
+ }
+
+forwardSearch:
+ {
+ // Forward search from the beginning.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+ switch {
+ case p.X >= revPath.X || p.Y < fwdPath.Y:
+ stop1 = true // Hit top-right corner
+ case p.Y >= revPath.Y || p.X < fwdPath.X:
+ stop2 = true // Hit bottom-left corner
+ case f(p.X, p.Y).Equal():
+ // Match found, so connect the path to this point.
+ fwdPath.connect(p, f)
+ fwdPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(fwdPath.X, fwdPath.Y).Equal() {
+ break
+ }
+ fwdPath.append(Identity)
+ }
+ fwdFrontier = fwdPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards reverse point.
+ if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+ fwdFrontier.X++
+ } else {
+ fwdFrontier.Y++
+ }
+ goto reverseSearch
+ }
+
+reverseSearch:
+ {
+ // Reverse search from the end.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{revFrontier.X - z, revFrontier.Y + z}
+ switch {
+ case fwdPath.X >= p.X || revPath.Y < p.Y:
+ stop1 = true // Hit bottom-left corner
+ case fwdPath.Y >= p.Y || revPath.X < p.X:
+ stop2 = true // Hit top-right corner
+ case f(p.X-1, p.Y-1).Equal():
+ // Match found, so connect the path to this point.
+ revPath.connect(p, f)
+ revPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(revPath.X-1, revPath.Y-1).Equal() {
+ break
+ }
+ revPath.append(Identity)
+ }
+ revFrontier = revPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards forward point.
+ if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+ revFrontier.X--
+ } else {
+ revFrontier.Y--
+ }
+ goto forwardSearch
+ }
+
+finishSearch:
+ // Join the forward and reverse paths and then append the reverse path.
+ fwdPath.connect(revPath.point, f)
+ for i := len(revPath.es) - 1; i >= 0; i-- {
+ t := revPath.es[i]
+ revPath.es = revPath.es[:i]
+ fwdPath.append(t)
+ }
+ debug.Finish()
+ return fwdPath.es
+}
+
+type path struct {
+ dir int // +1 if forward, -1 if reverse
+ point // Leading point of the EditScript path
+ es EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+ if p.dir > 0 {
+ // Connect in forward direction.
+ for dst.X > p.X && dst.Y > p.Y {
+ switch r := f(p.X, p.Y); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case dst.X-p.X >= dst.Y-p.Y:
+ p.append(UniqueX)
+ default:
+ p.append(UniqueY)
+ }
+ }
+ for dst.X > p.X {
+ p.append(UniqueX)
+ }
+ for dst.Y > p.Y {
+ p.append(UniqueY)
+ }
+ } else {
+ // Connect in reverse direction.
+ for p.X > dst.X && p.Y > dst.Y {
+ switch r := f(p.X-1, p.Y-1); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case p.Y-dst.Y >= p.X-dst.X:
+ p.append(UniqueY)
+ default:
+ p.append(UniqueX)
+ }
+ }
+ for p.X > dst.X {
+ p.append(UniqueX)
+ }
+ for p.Y > dst.Y {
+ p.append(UniqueY)
+ }
+ }
+}
+
+func (p *path) append(t EditType) {
+ p.es = append(p.es, t)
+ switch t {
+ case Identity, Modified:
+ p.add(p.dir, p.dir)
+ case UniqueX:
+ p.add(p.dir, 0)
+ case UniqueY:
+ p.add(0, p.dir)
+ }
+ debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//
+// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+ if x&1 != 0 {
+ x = ^x
+ }
+ return x >> 1
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644
index 0000000..d8e459c
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+// Deterministic controls whether the output of Diff should be deterministic.
+// This is only used for testing.
+var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
new file mode 100644
index 0000000..def01a6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package function provides functionality for identifying function types.
+package function
+
+import (
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+type funcType int
+
+const (
+ _ funcType = iota
+
+ tbFunc // func(T) bool
+ ttbFunc // func(T, T) bool
+ ttiFunc // func(T, T) int
+ trbFunc // func(T, R) bool
+ tibFunc // func(T, I) bool
+ trFunc // func(T) R
+
+ Equal = ttbFunc // func(T, T) bool
+ EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+ Transformer = trFunc // func(T) R
+ ValueFilter = ttbFunc // func(T, T) bool
+ Less = ttbFunc // func(T, T) bool
+ Compare = ttiFunc // func(T, T) int
+ ValuePredicate = tbFunc // func(T) bool
+ KeyValuePredicate = trbFunc // func(T, R) bool
+)
+
+var boolType = reflect.TypeOf(true)
+var intType = reflect.TypeOf(0)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+ if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+ return false
+ }
+ ni, no := t.NumIn(), t.NumOut()
+ switch ft {
+ case tbFunc: // func(T) bool
+ if ni == 1 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case ttbFunc: // func(T, T) bool
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+ return true
+ }
+ case ttiFunc: // func(T, T) int
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
+ return true
+ }
+ case trbFunc: // func(T, R) bool
+ if ni == 2 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case tibFunc: // func(T, I) bool
+ if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+ return true
+ }
+ case trFunc: // func(T) R
+ if ni == 1 && no == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
+
+// NameOf returns the name of the function value.
+func NameOf(v reflect.Value) string {
+ fnc := runtime.FuncForPC(v.Pointer())
+ if fnc == nil {
+ return "<unknown>"
+ }
+ fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
+
+ // Method closures have a "-fm" suffix.
+ fullName = strings.TrimSuffix(fullName, "-fm")
+
+ var name string
+ for len(fullName) > 0 {
+ inParen := strings.HasSuffix(fullName, ")")
+ fullName = strings.TrimSuffix(fullName, ")")
+
+ s := lastIdentRx.FindString(fullName)
+ if s == "" {
+ break
+ }
+ name = s + "." + name
+ fullName = strings.TrimSuffix(fullName, s)
+
+ if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
+ fullName = fullName[:i]
+ }
+ fullName = strings.TrimSuffix(fullName, ".")
+ }
+ return strings.TrimSuffix(name, ".")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
new file mode 100644
index 0000000..7b498bb
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
@@ -0,0 +1,164 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "strconv"
+)
+
+var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+
+// TypeString is nearly identical to reflect.Type.String,
+// but has an additional option to specify that full type names be used.
+func TypeString(t reflect.Type, qualified bool) string {
+ return string(appendTypeName(nil, t, qualified, false))
+}
+
+func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
+ // BUG: Go reflection provides no way to disambiguate two named types
+ // of the same name and within the same package,
+ // but declared within the namespace of different functions.
+
+ // Use the "any" alias instead of "interface{}" for better readability.
+ if t == anyType {
+ return append(b, "any"...)
+ }
+
+ // Named type.
+ if t.Name() != "" {
+ if qualified && t.PkgPath() != "" {
+ b = append(b, '"')
+ b = append(b, t.PkgPath()...)
+ b = append(b, '"')
+ b = append(b, '.')
+ b = append(b, t.Name()...)
+ } else {
+ b = append(b, t.String()...)
+ }
+ return b
+ }
+
+ // Unnamed type.
+ switch k := t.Kind(); k {
+ case reflect.Bool, reflect.String, reflect.UnsafePointer,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ b = append(b, k.String()...)
+ case reflect.Chan:
+ if t.ChanDir() == reflect.RecvDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, "chan"...)
+ if t.ChanDir() == reflect.SendDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Func:
+ if !elideFunc {
+ b = append(b, "func"...)
+ }
+ b = append(b, '(')
+ for i := 0; i < t.NumIn(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ if i == t.NumIn()-1 && t.IsVariadic() {
+ b = append(b, "..."...)
+ b = appendTypeName(b, t.In(i).Elem(), qualified, false)
+ } else {
+ b = appendTypeName(b, t.In(i), qualified, false)
+ }
+ }
+ b = append(b, ')')
+ switch t.NumOut() {
+ case 0:
+ // Do nothing
+ case 1:
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Out(0), qualified, false)
+ default:
+ b = append(b, " ("...)
+ for i := 0; i < t.NumOut(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ b = appendTypeName(b, t.Out(i), qualified, false)
+ }
+ b = append(b, ')')
+ }
+ case reflect.Struct:
+ b = append(b, "struct{ "...)
+ for i := 0; i < t.NumField(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ sf := t.Field(i)
+ if !sf.Anonymous {
+ if qualified && sf.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, sf.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, sf.Name...)
+ b = append(b, ' ')
+ }
+ b = appendTypeName(b, sf.Type, qualified, false)
+ if sf.Tag != "" {
+ b = append(b, ' ')
+ b = strconv.AppendQuote(b, string(sf.Tag))
+ }
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ case reflect.Slice, reflect.Array:
+ b = append(b, '[')
+ if k == reflect.Array {
+ b = strconv.AppendUint(b, uint64(t.Len()), 10)
+ }
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Map:
+ b = append(b, "map["...)
+ b = appendTypeName(b, t.Key(), qualified, false)
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Ptr:
+ b = append(b, '*')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Interface:
+ b = append(b, "interface{ "...)
+ for i := 0; i < t.NumMethod(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ m := t.Method(i)
+ if qualified && m.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, m.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, m.Name...)
+ b = appendTypeName(b, m.Type, qualified, true)
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ default:
+ panic("invalid kind: " + k.String())
+ }
+ return b
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
new file mode 100644
index 0000000..e5dfff6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
@@ -0,0 +1,34 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+ p unsafe.Pointer
+ t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+ // The proper representation of a pointer is unsafe.Pointer,
+ // which is necessary if the GC ever uses a moving collector.
+ return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p Pointer) IsNil() bool {
+ return p.p == nil
+}
+
+// Uintptr returns the pointer as a uintptr.
+func (p Pointer) Uintptr() uintptr {
+ return uintptr(p.p)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
new file mode 100644
index 0000000..98533b0
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+ if len(vs) == 0 {
+ return vs
+ }
+
+ // Sort the map keys.
+ sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
+
+ // Deduplicate keys (fails for NaNs).
+ vs2 := vs[:1]
+ for _, v := range vs[1:] {
+ if isLess(vs2[len(vs2)-1], v) {
+ vs2 = append(vs2, v)
+ }
+ }
+ return vs2
+}
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+ switch x.Type().Kind() {
+ case reflect.Bool:
+ return !x.Bool() && y.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return x.Int() < y.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return x.Uint() < y.Uint()
+ case reflect.Float32, reflect.Float64:
+ // NOTE: This does not sort -0 as less than +0
+ // since Go maps treat -0 and +0 as equal keys.
+ fx, fy := x.Float(), y.Float()
+ return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+ case reflect.Complex64, reflect.Complex128:
+ cx, cy := x.Complex(), y.Complex()
+ rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+ if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+ return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+ }
+ return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+ case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+ return x.Pointer() < y.Pointer()
+ case reflect.String:
+ return x.String() < y.String()
+ case reflect.Array:
+ for i := 0; i < x.Len(); i++ {
+ if isLess(x.Index(i), y.Index(i)) {
+ return true
+ }
+ if isLess(y.Index(i), x.Index(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Struct:
+ for i := 0; i < x.NumField(); i++ {
+ if isLess(x.Field(i), y.Field(i)) {
+ return true
+ }
+ if isLess(y.Field(i), x.Field(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Interface:
+ vx, vy := x.Elem(), y.Elem()
+ if !vx.IsValid() || !vy.IsValid() {
+ return !vx.IsValid() && vy.IsValid()
+ }
+ tx, ty := vx.Type(), vy.Type()
+ if tx == ty {
+ return isLess(x.Elem(), y.Elem())
+ }
+ if tx.Kind() != ty.Kind() {
+ return vx.Kind() < vy.Kind()
+ }
+ if tx.String() != ty.String() {
+ return tx.String() < ty.String()
+ }
+ if tx.PkgPath() != ty.PkgPath() {
+ return tx.PkgPath() < ty.PkgPath()
+ }
+ // This can happen in rare situations, so we fallback to just comparing
+ // the unique pointer for a reflect.Type. This guarantees deterministic
+ // ordering within a program, but it is obviously not stable.
+ return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+ default:
+ // Must be Func, Map, or Slice; which are not comparable.
+ panic(fmt.Sprintf("%T is not comparable", x.Type()))
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
new file mode 100644
index 0000000..ba3fce8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -0,0 +1,562 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of [Equal] and [Diff]. In particular,
+// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters ([FilterPath] and
+// [FilterValues]) to control the scope over which they are applied.
+//
+// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
+// for creating options that may be used with [Equal] and [Diff].
+type Option interface {
+ // filter applies all filters and returns the option that remains.
+ // Each option may only read s.curPath and call s.callTTBFunc.
+ //
+ // An Options is returned only if multiple comparers or transformers
+ // can apply simultaneously and will only contain values of those types
+ // or sub-Options containing values of those types.
+ filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
+}
+
+// applicableOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Grouping: Options
+type applicableOption interface {
+ Option
+
+ // apply executes the option, which may mutate s or panic.
+ apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Filters: *pathFilter | *valuesFilter
+type coreOption interface {
+ Option
+ isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of [Option] values that also satisfies the [Option] interface.
+// Helper comparison packages may return an Options value when packing multiple
+// [Option] values into a single [Option]. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
+ for _, opt := range opts {
+ switch opt := opt.filter(s, t, vx, vy); opt.(type) {
+ case ignore:
+ return ignore{} // Only ignore can short-circuit evaluation
+ case validator:
+ out = validator{} // Takes precedence over comparer or transformer
+ case *comparer, *transformer, Options:
+ switch out.(type) {
+ case nil:
+ out = opt
+ case validator:
+ // Keep validator
+ case *comparer, *transformer, Options:
+ out = Options{out, opt} // Conflicting comparers or transformers
+ }
+ }
+ }
+ return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+ const warning = "ambiguous set of applicable options"
+ const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+ var ss []string
+ for _, opt := range flattenOptions(nil, opts) {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+ var ss []string
+ for _, opt := range opts {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new [Option] where opt is only evaluated if filter f
+// returns true for the current [Path] in the value tree.
+//
+// This filter is called even if a slice element or map entry is missing and
+// provides an opportunity to ignore such cases. The filter function must be
+// symmetric such that the filter result is identical regardless of whether the
+// missing value is from x or y.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterPath(f func(Path) bool, opt Option) Option {
+ if f == nil {
+ panic("invalid path filter function")
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ return &pathFilter{fnc: f, opt: opt}
+ }
+ return nil
+}
+
+type pathFilter struct {
+ core
+ fnc func(Path) bool
+ opt Option
+}
+
+func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if f.fnc(s.curPath) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f pathFilter) String() string {
+ return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
+}
+
+// FilterValues returns a new [Option] where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If either value is invalid or
+// the type of the values is not assignable to T, then this filter implicitly
+// returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterValues(f interface{}, opt Option) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+ panic(fmt.Sprintf("invalid values filter function: %T", f))
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ vf := &valuesFilter{fnc: v, opt: opt}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ vf.typ = ti
+ }
+ return vf
+ }
+ return nil
+}
+
+type valuesFilter struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+ opt Option
+}
+
+func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
+ return nil
+ }
+ if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f valuesFilter) String() string {
+ return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
+}
+
+// Ignore is an [Option] that causes all comparisons to be ignored.
+// This value is intended to be combined with [FilterPath] or [FilterValues].
+// It is an error to pass an unfiltered Ignore option to [Equal].
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool { return false }
+func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
+func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
+func (ignore) String() string { return "Ignore()" }
+
+// validator is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields, missing slice elements, or
+// missing map entries. Both values are validator only for unexported fields.
+type validator struct{ core }
+
+func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vy.IsValid() {
+ return validator{}
+ }
+ if !vx.CanInterface() || !vy.CanInterface() {
+ return validator{}
+ }
+ return nil
+}
+func (validator) apply(s *state, vx, vy reflect.Value) {
+ // Implies missing slice element or map entry.
+ if !vx.IsValid() || !vy.IsValid() {
+ s.report(vx.IsValid() == vy.IsValid(), 0)
+ return
+ }
+
+ // Unable to Interface implies unexported field without visibility access.
+ if !vx.CanInterface() || !vy.CanInterface() {
+ help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
+ var name string
+ if t := s.curPath.Index(-2).Type(); t.Name() != "" {
+ // Named type with unexported fields.
+ name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
+ isProtoMessage := func(t reflect.Type) bool {
+ m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
+ return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
+ m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
+ m.Type.Out(0).Name() == "Message"
+ }
+ if isProtoMessage(t) {
+ help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
+ } else if _, ok := reflect.New(t).Interface().(error); ok {
+ help = "consider using cmpopts.EquateErrors to compare error values"
+ } else if t.Comparable() {
+ help = "consider using cmpopts.EquateComparable to compare comparable Go types"
+ }
+ } else {
+ // Unnamed type with unexported fields. Derive PkgPath from field.
+ var pkgPath string
+ for i := 0; i < t.NumField() && pkgPath == ""; i++ {
+ pkgPath = t.Field(i).PkgPath
+ }
+ name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
+ }
+ panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
+ }
+
+ panic("not reachable")
+}
+
+// identRx represents a valid identifier according to the Go specification.
+const identRx = `[_\p{L}][_\p{L}\p{N}]*`
+
+var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
+
+// Transformer returns an [Option] that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the [Path] since the last non-[Transform] step.
+// For situations where the implicit filter is still insufficient,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
+// which adds a filter to prevent the transformer from
+// being recursively applied upon itself.
+//
+// The name is a user provided label that is used as the [Transform.Name] in the
+// transformation [PathStep] (and eventually shown in the [Diff] output).
+// The name must be a valid identifier or qualified identifier in Go syntax.
+// If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+ panic(fmt.Sprintf("invalid transformer function: %T", f))
+ }
+ if name == "" {
+ name = function.NameOf(v)
+ if !identsRx.MatchString(name) {
+ name = "λ" // Lambda-symbol as placeholder name
+ }
+ } else if !identsRx.MatchString(name) {
+ panic(fmt.Sprintf("invalid name: %q", name))
+ }
+ tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ tr.typ = ti
+ }
+ return tr
+}
+
+type transformer struct {
+ core
+ name string
+ typ reflect.Type // T
+ fnc reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ for i := len(s.curPath) - 1; i >= 0; i-- {
+ if t, ok := s.curPath[i].(Transform); !ok {
+ break // Hit most recent non-Transform step
+ } else if tr == t.trans {
+ return nil // Cannot directly use same Transform
+ }
+ }
+ if tr.typ == nil || t.AssignableTo(tr.typ) {
+ return tr
+ }
+ return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+ step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
+ vvx := s.callTRFunc(tr.fnc, vx, step)
+ vvy := s.callTRFunc(tr.fnc, vy, step)
+ step.vx, step.vy = vvx, vvy
+ s.compareAny(step)
+}
+
+func (tr transformer) String() string {
+ return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
+}
+
+// Comparer returns an [Option] that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+// - Symmetric: equal(x, y) == equal(y, x)
+// - Deterministic: equal(x, y) == equal(x, y)
+// - Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+ panic(fmt.Sprintf("invalid comparer function: %T", f))
+ }
+ cm := &comparer{fnc: v}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ cm.typ = ti
+ }
+ return cm
+}
+
+type comparer struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ if cm.typ == nil || t.AssignableTo(cm.typ) {
+ return cm
+ }
+ return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+ eq := s.callTTBFunc(cm.fnc, vx, vy)
+ s.report(eq, reportByFunc)
+}
+
+func (cm comparer) String() string {
+ return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
+}
+
+// Exporter returns an [Option] that specifies whether [Equal] is allowed to
+// introspect into the unexported fields of certain struct types.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of [Equal]
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// In many cases, a custom [Comparer] should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the [reflect.Type] documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *[regexp.Regexp] types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using [Comparer] options:
+//
+// Comparer(func(x, y reflect.Type) bool { return x == y })
+// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
+// option can be used to ignore all unexported fields on specified struct types.
+func Exporter(f func(reflect.Type) bool) Option {
+ return exporter(f)
+}
+
+type exporter func(reflect.Type) bool
+
+func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
+// unexported fields of the specified struct types.
+//
+// See [Exporter] for the proper use of this option.
+func AllowUnexported(types ...interface{}) Option {
+ m := make(map[reflect.Type]bool)
+ for _, typ := range types {
+ t := reflect.TypeOf(typ)
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("invalid struct type: %T", typ))
+ }
+ m[t] = true
+ }
+ return exporter(func(t reflect.Type) bool { return m[t] })
+}
+
+// Result represents the comparison result for a single node and
+// is provided by cmp when calling Report (see [Reporter]).
+type Result struct {
+ _ [0]func() // Make Result incomparable
+ flags resultFlags
+}
+
+// Equal reports whether the node was determined to be equal or not.
+// As a special case, ignored nodes are considered equal.
+func (r Result) Equal() bool {
+ return r.flags&(reportEqual|reportByIgnore) != 0
+}
+
+// ByIgnore reports whether the node is equal because it was ignored.
+// This never reports true if [Result.Equal] reports false.
+func (r Result) ByIgnore() bool {
+ return r.flags&reportByIgnore != 0
+}
+
+// ByMethod reports whether the Equal method determined equality.
+func (r Result) ByMethod() bool {
+ return r.flags&reportByMethod != 0
+}
+
+// ByFunc reports whether a [Comparer] function determined equality.
+func (r Result) ByFunc() bool {
+ return r.flags&reportByFunc != 0
+}
+
+// ByCycle reports whether a reference cycle was detected.
+func (r Result) ByCycle() bool {
+ return r.flags&reportByCycle != 0
+}
+
+type resultFlags uint
+
+const (
+ _ resultFlags = (1 << iota) / 2
+
+ reportEqual
+ reportUnequal
+ reportByIgnore
+ reportByMethod
+ reportByFunc
+ reportByCycle
+)
+
+// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
+// the value trees, it calls PushStep as it descends into each node in the
+// tree and PopStep as it ascend out of the node. The leaves of the tree are
+// either compared (determined to be equal or not equal) or ignored and reported
+// as such by calling the Report method.
+func Reporter(r interface {
+ // PushStep is called when a tree-traversal operation is performed.
+ // The PathStep itself is only valid until the step is popped.
+ // The PathStep.Values are valid for the duration of the entire traversal
+ // and must not be mutated.
+ //
+ // Equal always calls PushStep at the start to provide an operation-less
+ // PathStep used to report the root values.
+ //
+ // Within a slice, the exact set of inserted, removed, or modified elements
+ // is unspecified and may change in future implementations.
+ // The entries of a map are iterated through in an unspecified order.
+ PushStep(PathStep)
+
+ // Report is called exactly once on leaf nodes to report whether the
+ // comparison identified the node as equal, unequal, or ignored.
+ // A leaf node is one that is immediately preceded by and followed by
+ // a pair of PushStep and PopStep calls.
+ Report(Result)
+
+ // PopStep ascends back up the value tree.
+ // There is always a matching pop call for every push call.
+ PopStep()
+}) Option {
+ return reporter{r}
+}
+
+type reporter struct{ reporterIface }
+type reporterIface interface {
+ PushStep(PathStep)
+ Report(Result)
+ PopStep()
+}
+
+func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+ switch opts := flattenOptions(nil, Options{src}); len(opts) {
+ case 0:
+ return nil
+ case 1:
+ return opts[0]
+ default:
+ return opts
+ }
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+ for _, opt := range src {
+ switch opt := opt.(type) {
+ case nil:
+ continue
+ case Options:
+ dst = flattenOptions(dst, opt)
+ case coreOption:
+ dst = append(dst, opt)
+ default:
+ panic(fmt.Sprintf("invalid option type: %T", opt))
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
new file mode 100644
index 0000000..c3c1456
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -0,0 +1,390 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// Path is a list of [PathStep] describing the sequence of operations to get
+// from some root type to the current position in the value tree.
+// The first Path element is always an operation-less [PathStep] that exists
+// simply to identify the initial type.
+//
+// When traversing structs with embedded structs, the embedded struct will
+// always be accessed as a field before traversing the fields of the
+// embedded struct themselves. That is, an exported field from the
+// embedded struct will never be accessed directly from the parent struct.
+type Path []PathStep
+
+// PathStep is a union-type for specific operations to traverse
+// a value's tree structure. Users of this package never need to implement
+// these types as values of this type will be returned by this package.
+//
+// Implementations of this interface:
+// - [StructField]
+// - [SliceIndex]
+// - [MapIndex]
+// - [Indirect]
+// - [TypeAssertion]
+// - [Transform]
+type PathStep interface {
+ String() string
+
+ // Type is the resulting type after performing the path step.
+ Type() reflect.Type
+
+ // Values is the resulting values after performing the path step.
+ // The type of each valid value is guaranteed to be identical to Type.
+ //
+ // In some cases, one or both may be invalid or have restrictions:
+ // - For StructField, both are not interface-able if the current field
+ // is unexported and the struct type is not explicitly permitted by
+ // an Exporter to traverse unexported fields.
+ // - For SliceIndex, one may be invalid if an element is missing from
+ // either the x or y slice.
+ // - For MapIndex, one may be invalid if an entry is missing from
+ // either the x or y map.
+ //
+ // The provided values must not be mutated.
+ Values() (vx, vy reflect.Value)
+}
+
+var (
+ _ PathStep = StructField{}
+ _ PathStep = SliceIndex{}
+ _ PathStep = MapIndex{}
+ _ PathStep = Indirect{}
+ _ PathStep = TypeAssertion{}
+ _ PathStep = Transform{}
+)
+
+func (pa *Path) push(s PathStep) {
+ *pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+ *pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last [PathStep] in the Path.
+// If the path is empty, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Last() PathStep {
+ return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Index(i int) PathStep {
+ if i < 0 {
+ i = len(pa) + i
+ }
+ if i < 0 || i >= len(pa) {
+ return pathStep{}
+ }
+ return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//
+// MyMap.MySlices.MyField
+func (pa Path) String() string {
+ var ss []string
+ for _, s := range pa {
+ if _, ok := s.(StructField); ok {
+ ss = append(ss, s.String())
+ }
+ }
+ return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//
+// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+ var ssPre, ssPost []string
+ var numIndirect int
+ for i, s := range pa {
+ var nextStep PathStep
+ if i+1 < len(pa) {
+ nextStep = pa[i+1]
+ }
+ switch s := s.(type) {
+ case Indirect:
+ numIndirect++
+ pPre, pPost := "(", ")"
+ switch nextStep.(type) {
+ case Indirect:
+ continue // Next step is indirection, so let them batch up
+ case StructField:
+ numIndirect-- // Automatic indirection on struct fields
+ case nil:
+ pPre, pPost = "", "" // Last step; no need for parenthesis
+ }
+ if numIndirect > 0 {
+ ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+ ssPost = append(ssPost, pPost)
+ }
+ numIndirect = 0
+ continue
+ case Transform:
+ ssPre = append(ssPre, s.trans.name+"(")
+ ssPost = append(ssPost, ")")
+ continue
+ }
+ ssPost = append(ssPost, s.String())
+ }
+ for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+ ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+ }
+ return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type pathStep struct {
+ typ reflect.Type
+ vx, vy reflect.Value
+}
+
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
+func (ps pathStep) String() string {
+ if ps.typ == nil {
+ return "<nil>"
+ }
+ s := value.TypeString(ps.typ, false)
+ if s == "" || strings.ContainsAny(s, "{}\n") {
+ return "root" // Type too simple or complex to print
+ }
+ return fmt.Sprintf("{%s}", s)
+}
+
+// StructField is a [PathStep] that represents a struct field access
+// on a field called [StructField.Name].
+type StructField struct{ *structField }
+type structField struct {
+ pathStep
+ name string
+ idx int
+
+ // These fields are used for forcibly accessing an unexported field.
+ // pvx, pvy, and field are only valid if unexported is true.
+ unexported bool
+ mayForce bool // Forcibly allow visibility
+ paddr bool // Was parent addressable?
+ pvx, pvy reflect.Value // Parent values (always addressable)
+ field reflect.StructField // Field information
+}
+
+func (sf StructField) Type() reflect.Type { return sf.typ }
+func (sf StructField) Values() (vx, vy reflect.Value) {
+ if !sf.unexported {
+ return sf.vx, sf.vy // CanInterface reports true
+ }
+
+ // Forcibly obtain read-write access to an unexported struct field.
+ if sf.mayForce {
+ vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
+ vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
+ return vx, vy // CanInterface reports true
+ }
+ return sf.vx, sf.vy // CanInterface reports false
+}
+func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
+
+// Name is the field name.
+func (sf StructField) Name() string { return sf.name }
+
+// Index is the index of the field in the parent struct type.
+// See [reflect.Type.Field].
+func (sf StructField) Index() int { return sf.idx }
+
+// SliceIndex is a [PathStep] that represents an index operation on
+// a slice or array at some index [SliceIndex.Key].
+type SliceIndex struct{ *sliceIndex }
+type sliceIndex struct {
+ pathStep
+ xkey, ykey int
+ isSlice bool // False for reflect.Array
+}
+
+func (si SliceIndex) Type() reflect.Type { return si.typ }
+func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
+func (si SliceIndex) String() string {
+ switch {
+ case si.xkey == si.ykey:
+ return fmt.Sprintf("[%d]", si.xkey)
+ case si.ykey == -1:
+ // [5->?] means "I don't know where X[5] went"
+ return fmt.Sprintf("[%d->?]", si.xkey)
+ case si.xkey == -1:
+ // [?->3] means "I don't know where Y[3] came from"
+ return fmt.Sprintf("[?->%d]", si.ykey)
+ default:
+ // [5->3] means "X[5] moved to Y[3]"
+ return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+ }
+}
+
+// Key is the index key; it may return -1 if in a split state
+func (si SliceIndex) Key() int {
+ if si.xkey != si.ykey {
+ return -1
+ }
+ return si.xkey
+}
+
+// SplitKeys are the indexes for indexing into slices in the
+// x and y values, respectively. These indexes may differ due to the
+// insertion or removal of an element in one of the slices, causing
+// all of the indexes to be shifted. If an index is -1, then that
+// indicates that the element does not exist in the associated slice.
+//
+// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
+// returned by SplitKeys are not the same. SplitKeys will never return -1 for
+// both indexes.
+func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
+
+// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
+type MapIndex struct{ *mapIndex }
+type mapIndex struct {
+ pathStep
+ key reflect.Value
+}
+
+func (mi MapIndex) Type() reflect.Type { return mi.typ }
+func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
+func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
+
+// Key is the value of the map key.
+func (mi MapIndex) Key() reflect.Value { return mi.key }
+
+// Indirect is a [PathStep] that represents pointer indirection on the parent type.
+type Indirect struct{ *indirect }
+type indirect struct {
+ pathStep
+}
+
+func (in Indirect) Type() reflect.Type { return in.typ }
+func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
+func (in Indirect) String() string { return "*" }
+
+// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
+type TypeAssertion struct{ *typeAssertion }
+type typeAssertion struct {
+ pathStep
+}
+
+func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
+func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
+func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
+
+// Transform is a [PathStep] that represents a transformation
+// from the parent type to the current type.
+type Transform struct{ *transform }
+type transform struct {
+ pathStep
+ trans *transformer
+}
+
+func (tf Transform) Type() reflect.Type { return tf.typ }
+func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
+func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
+
+// Name is the name of the [Transformer].
+func (tf Transform) Name() string { return tf.trans.name }
+
+// Func is the function pointer to the transformer function.
+func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
+
+// Option returns the originally constructed [Transformer] option.
+// The == operator can be used to detect the exact option used.
+func (tf Transform) Option() Option { return tf.trans }
+
+// pointerPath represents a dual-stack of pointers encountered when
+// recursively traversing the x and y values. This data structure supports
+// detection of cycles and determining whether the cycles are equal.
+// In Go, cycles can occur via pointers, slices, and maps.
+//
+// The pointerPath uses a map to represent a stack; where descension into a
+// pointer pushes the address onto the stack, and ascension from a pointer
+// pops the address from the stack. Thus, when traversing into a pointer from
+// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
+// by checking whether the pointer has already been visited. The cycle detection
+// uses a separate stack for the x and y values.
+//
+// If a cycle is detected we need to determine whether the two pointers
+// should be considered equal. The definition of equality chosen by Equal
+// requires two graphs to have the same structure. To determine this, both the
+// x and y values must have a cycle where the previous pointers were also
+// encountered together as a pair.
+//
+// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
+// MapIndex with pointer information for the x and y values.
+// Suppose px and py are two pointers to compare, we then search the
+// Path for whether px was ever encountered in the Path history of x, and
+// similarly so with py. If either side has a cycle, the comparison is only
+// equal if both px and py have a cycle resulting from the same PathStep.
+//
+// Using a map as a stack is more performant as we can perform cycle detection
+// in O(1) instead of O(N) where N is len(Path).
+type pointerPath struct {
+ // mx is keyed by x pointers, where the value is the associated y pointer.
+ mx map[value.Pointer]value.Pointer
+ // my is keyed by y pointers, where the value is the associated x pointer.
+ my map[value.Pointer]value.Pointer
+}
+
+func (p *pointerPath) Init() {
+ p.mx = make(map[value.Pointer]value.Pointer)
+ p.my = make(map[value.Pointer]value.Pointer)
+}
+
+// Push indicates intent to descend into pointers vx and vy where
+// visited reports whether either has been seen before. If visited before,
+// equal reports whether both pointers were encountered together.
+// Pop must be called if and only if the pointers were never visited.
+//
+// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
+// and be non-nil.
+func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
+ px := value.PointerOf(vx)
+ py := value.PointerOf(vy)
+ _, ok1 := p.mx[px]
+ _, ok2 := p.my[py]
+ if ok1 || ok2 {
+ equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
+ return equal, true
+ }
+ p.mx[px] = py
+ p.my[py] = px
+ return false, false
+}
+
+// Pop ascends from pointers vx and vy.
+func (p pointerPath) Pop(vx, vy reflect.Value) {
+ delete(p.mx, value.PointerOf(vx))
+ delete(p.my, value.PointerOf(vy))
+}
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+ r, _ := utf8.DecodeRuneInString(id)
+ return unicode.IsUpper(r)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644
index 0000000..f43cd12
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report.go
@@ -0,0 +1,54 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+// defaultReporter implements the reporter interface.
+//
+// As Equal serially calls the PushStep, Report, and PopStep methods, the
+// defaultReporter constructs a tree-based representation of the compared value
+// and the result of each comparison (see valueNode).
+//
+// When the String method is called, the FormatDiff method transforms the
+// valueNode tree into a textNode tree, which is a tree-based representation
+// of the textual output (see textNode).
+//
+// Lastly, the textNode.String method produces the final report as a string.
+type defaultReporter struct {
+ root *valueNode
+ curr *valueNode
+}
+
+func (r *defaultReporter) PushStep(ps PathStep) {
+ r.curr = r.curr.PushStep(ps)
+ if r.root == nil {
+ r.root = r.curr
+ }
+}
+func (r *defaultReporter) Report(rs Result) {
+ r.curr.Report(rs)
+}
+func (r *defaultReporter) PopStep() {
+ r.curr = r.curr.PopStep()
+}
+
+// String provides a full report of the differences detected as a structured
+// literal in pseudo-Go syntax. String may only be called after the entire tree
+// has been traversed.
+func (r *defaultReporter) String() string {
+ assert(r.root != nil && r.curr == nil)
+ if r.root.NumDiff == 0 {
+ return ""
+ }
+ ptrs := new(pointerReferences)
+ text := formatOptions{}.FormatDiff(r.root, ptrs)
+ resolveReferences(text)
+ return text.String()
+}
+
+func assert(ok bool) {
+ if !ok {
+ panic("assertion failure")
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644
index 0000000..2050bf6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -0,0 +1,433 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// numContextRecords is the number of surrounding equal records to print.
+const numContextRecords = 2
+
+type diffMode byte
+
+const (
+ diffUnknown diffMode = 0
+ diffIdentical diffMode = ' '
+ diffRemoved diffMode = '-'
+ diffInserted diffMode = '+'
+)
+
+type typeMode int
+
+const (
+ // emitType always prints the type.
+ emitType typeMode = iota
+ // elideType never prints the type.
+ elideType
+ // autoType prints the type only for composite kinds
+ // (i.e., structs, slices, arrays, and maps).
+ autoType
+)
+
+type formatOptions struct {
+ // DiffMode controls the output mode of FormatDiff.
+ //
+ // If diffUnknown, then produce a diff of the x and y values.
+ // If diffIdentical, then emit values as if they were equal.
+ // If diffRemoved, then only emit x values (ignoring y values).
+ // If diffInserted, then only emit y values (ignoring x values).
+ DiffMode diffMode
+
+ // TypeMode controls whether to print the type for the current node.
+ //
+ // As a general rule of thumb, we always print the type of the next node
+ // after an interface, and always elide the type of the next node after
+ // a slice or map node.
+ TypeMode typeMode
+
+ // formatValueOptions are options specific to printing reflect.Values.
+ formatValueOptions
+}
+
+func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
+ opts.DiffMode = d
+ return opts
+}
+func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
+ opts.TypeMode = t
+ return opts
+}
+func (opts formatOptions) WithVerbosity(level int) formatOptions {
+ opts.VerbosityLevel = level
+ opts.LimitVerbosity = true
+ return opts
+}
+func (opts formatOptions) verbosity() uint {
+ switch {
+ case opts.VerbosityLevel < 0:
+ return 0
+ case opts.VerbosityLevel > 16:
+ return 16 // some reasonable maximum to avoid shift overflow
+ default:
+ return uint(opts.VerbosityLevel)
+ }
+}
+
+const maxVerbosityPreset = 6
+
+// verbosityPreset modifies the verbosity settings given an index
+// between 0 and maxVerbosityPreset, inclusive.
+func verbosityPreset(opts formatOptions, i int) formatOptions {
+ opts.VerbosityLevel = int(opts.verbosity()) + 2*i
+ if i > 0 {
+ opts.AvoidStringer = true
+ }
+ if i >= maxVerbosityPreset {
+ opts.PrintAddresses = true
+ opts.QualifiedNames = true
+ }
+ return opts
+}
+
+// FormatDiff converts a valueNode tree into a textNode tree, where the later
+// is a textual representation of the differences detected in the former.
+func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
+ if opts.DiffMode == diffIdentical {
+ opts = opts.WithVerbosity(1)
+ } else if opts.verbosity() < 3 {
+ opts = opts.WithVerbosity(3)
+ }
+
+ // Check whether we have specialized formatting for this node.
+ // This is not necessary, but helpful for producing more readable outputs.
+ if opts.CanFormatDiffSlice(v) {
+ return opts.FormatDiffSlice(v)
+ }
+
+ var parentKind reflect.Kind
+ if v.parent != nil && v.parent.TransformerName == "" {
+ parentKind = v.parent.Type.Kind()
+ }
+
+ // For leaf nodes, format the value based on the reflect.Values alone.
+ // As a special case, treat equal []byte as a leaf nodes.
+ isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
+ isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
+ if v.MaxDepth == 0 || isEqualBytes {
+ switch opts.DiffMode {
+ case diffUnknown, diffIdentical:
+ // Format Equal.
+ if v.NumDiff == 0 {
+ outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
+ if v.NumIgnored > 0 && v.NumSame == 0 {
+ return textEllipsis
+ } else if outx.Len() < outy.Len() {
+ return outx
+ } else {
+ return outy
+ }
+ }
+
+ // Format unequal.
+ assert(opts.DiffMode == diffUnknown)
+ var list textList
+ outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
+ outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
+ outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: '-', Value: outx})
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: '+', Value: outy})
+ }
+ return opts.WithTypeMode(emitType).FormatType(v.Type, list)
+ case diffRemoved:
+ return opts.FormatValue(v.ValueX, parentKind, ptrs)
+ case diffInserted:
+ return opts.FormatValue(v.ValueY, parentKind, ptrs)
+ default:
+ panic("invalid diff mode")
+ }
+ }
+
+ // Register slice element to support cycle detection.
+ if parentKind == reflect.Slice {
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
+ }
+
+ // Descend into the child value node.
+ if v.TransformerName != "" {
+ out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
+ return opts.FormatType(v.Type, out)
+ } else {
+ switch k := v.Type.Kind(); k {
+ case reflect.Struct, reflect.Array, reflect.Slice:
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Map:
+ // Register map to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Ptr:
+ // Register pointer to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.FormatDiff(v.Value, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ case reflect.Interface:
+ out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ default:
+ panic(fmt.Sprintf("%v cannot have children", k))
+ }
+ return out
+ }
+}
+
+func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
+ // Derive record name based on the data structure kind.
+ var name string
+ var formatKey func(reflect.Value) string
+ switch k {
+ case reflect.Struct:
+ name = "field"
+ opts = opts.WithTypeMode(autoType)
+ formatKey = func(v reflect.Value) string { return v.String() }
+ case reflect.Slice, reflect.Array:
+ name = "element"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(reflect.Value) string { return "" }
+ case reflect.Map:
+ name = "entry"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
+ }
+
+ maxLen := -1
+ if opts.LimitVerbosity {
+ if opts.DiffMode == diffIdentical {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ } else {
+ maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
+ }
+ opts.VerbosityLevel--
+ }
+
+ // Handle unification.
+ switch opts.DiffMode {
+ case diffIdentical, diffRemoved, diffInserted:
+ var list textList
+ var deferredEllipsis bool // Add final "..." to indicate records were dropped
+ for _, r := range recs {
+ if len(list) == maxLen {
+ deferredEllipsis = true
+ break
+ }
+
+ // Elide struct fields that are zero value.
+ if k == reflect.Struct {
+ var isZero bool
+ switch opts.DiffMode {
+ case diffIdentical:
+ isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
+ case diffRemoved:
+ isZero = r.Value.ValueX.IsZero()
+ case diffInserted:
+ isZero = r.Value.ValueY.IsZero()
+ }
+ if isZero {
+ continue
+ }
+ }
+ // Elide ignored nodes.
+ if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
+ deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
+ if !deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ continue
+ }
+ if out := opts.FormatDiff(r.Value, ptrs); out != nil {
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ }
+ if deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case diffUnknown:
+ default:
+ panic("invalid diff mode")
+ }
+
+ // Handle differencing.
+ var numDiffs int
+ var list textList
+ var keys []reflect.Value // invariant: len(list) == len(keys)
+ groups := coalesceAdjacentRecords(name, recs)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Handle equal records.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing records to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
+ if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numLo++
+ }
+ for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numHi++
+ }
+ if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
+ numHi++ // Avoid pointless coalescing of a single equal record
+ }
+
+ // Format the equal values.
+ for _, r := range recs[:numLo] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ for _, r := range recs[numEqual-numHi : numEqual] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ recs = recs[numEqual:]
+ continue
+ }
+
+ // Handle unequal records.
+ for _, r := range recs[:ds.NumDiff()] {
+ switch {
+ case opts.CanFormatDiffSlice(r.Value):
+ out := opts.FormatDiffSlice(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ case r.Value.NumChildren == r.Value.MaxDepth:
+ outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i)
+ outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
+ keys = append(keys, r.Key)
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
+ keys = append(keys, r.Key)
+ }
+ default:
+ out := opts.FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ }
+ recs = recs[ds.NumDiff():]
+ numDiffs += ds.NumDiff()
+ }
+ if maxGroup.IsZero() {
+ assert(len(recs) == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ assert(len(list) == len(keys))
+
+ // For maps, the default formatting logic uses fmt.Stringer which may
+ // produce ambiguous output. Avoid calling String to disambiguate.
+ if k == reflect.Map {
+ var ambiguous bool
+ seenKeys := map[string]reflect.Value{}
+ for i, currKey := range keys {
+ if currKey.IsValid() {
+ strKey := list[i].Key
+ prevKey, seen := seenKeys[strKey]
+ if seen && prevKey.CanInterface() && currKey.CanInterface() {
+ ambiguous = prevKey.Interface() != currKey.Interface()
+ if ambiguous {
+ break
+ }
+ }
+ seenKeys[strKey] = currKey
+ }
+ }
+ if ambiguous {
+ for i, k := range keys {
+ if k.IsValid() {
+ list[i].Key = formatMapKey(k, true, ptrs)
+ }
+ }
+ }
+ }
+
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+}
+
+// coalesceAdjacentRecords coalesces the list of records into groups of
+// adjacent equal, or unequal counts.
+func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
+ var prevCase int // Arbitrary index into which case last occurred
+ lastStats := func(i int) *diffStats {
+ if prevCase != i {
+ groups = append(groups, diffStats{Name: name})
+ prevCase = i
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, r := range recs {
+ switch rv := r.Value; {
+ case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
+ lastStats(1).NumIgnored++
+ case rv.NumDiff == 0:
+ lastStats(1).NumIdentical++
+ case rv.NumDiff > 0 && !rv.ValueY.IsValid():
+ lastStats(2).NumRemoved++
+ case rv.NumDiff > 0 && !rv.ValueX.IsValid():
+ lastStats(2).NumInserted++
+ default:
+ lastStats(2).NumModified++
+ }
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go
new file mode 100644
index 0000000..be31b33
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_references.go
@@ -0,0 +1,264 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+const (
+ pointerDelimPrefix = "⟪"
+ pointerDelimSuffix = "⟫"
+)
+
+// formatPointer prints the address of the pointer.
+func formatPointer(p value.Pointer, withDelims bool) string {
+ v := p.Uintptr()
+ if flags.Deterministic {
+ v = 0xdeadf00f // Only used for stable testing purposes
+ }
+ if withDelims {
+ return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
+ }
+ return formatHex(uint64(v))
+}
+
+// pointerReferences is a stack of pointers visited so far.
+type pointerReferences [][2]value.Pointer
+
+func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
+ if deref && vx.IsValid() {
+ vx = vx.Addr()
+ }
+ if deref && vy.IsValid() {
+ vy = vy.Addr()
+ }
+ switch d {
+ case diffUnknown, diffIdentical:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
+ case diffRemoved:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
+ case diffInserted:
+ pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
+ }
+ *ps = append(*ps, pp)
+ return pp
+}
+
+func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
+ p = value.PointerOf(v)
+ for _, pp := range *ps {
+ if p == pp[0] || p == pp[1] {
+ return p, true
+ }
+ }
+ *ps = append(*ps, [2]value.Pointer{p, p})
+ return p, false
+}
+
+func (ps *pointerReferences) Pop() {
+ *ps = (*ps)[:len(*ps)-1]
+}
+
+// trunkReferences is metadata for a textNode indicating that the sub-tree
+// represents the value for either pointer in a pair of references.
+type trunkReferences struct{ pp [2]value.Pointer }
+
+// trunkReference is metadata for a textNode indicating that the sub-tree
+// represents the value for the given pointer reference.
+type trunkReference struct{ p value.Pointer }
+
+// leafReference is metadata for a textNode indicating that the value is
+// truncated as it refers to another part of the tree (i.e., a trunk).
+type leafReference struct{ p value.Pointer }
+
+func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
+ switch {
+ case pp[0].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
+ case pp[1].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ case pp[0] == pp[1]:
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ default:
+ return &textWrap{Value: s, Metadata: trunkReferences{pp}}
+ }
+}
+func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
+}
+func makeLeafReference(p value.Pointer, printAddress bool) textNode {
+ out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
+}
+
+// resolveReferences walks the textNode tree searching for any leaf reference
+// metadata and resolves each against the corresponding trunk references.
+// Since pointer addresses in memory are not particularly readable to the user,
+// it replaces each pointer value with an arbitrary and unique reference ID.
+func resolveReferences(s textNode) {
+ var walkNodes func(textNode, func(textNode))
+ walkNodes = func(s textNode, f func(textNode)) {
+ f(s)
+ switch s := s.(type) {
+ case *textWrap:
+ walkNodes(s.Value, f)
+ case textList:
+ for _, r := range s {
+ walkNodes(r.Value, f)
+ }
+ }
+ }
+
+ // Collect all trunks and leaves with reference metadata.
+ var trunks, leaves []*textWrap
+ walkNodes(s, func(s textNode) {
+ if s, ok := s.(*textWrap); ok {
+ switch s.Metadata.(type) {
+ case leafReference:
+ leaves = append(leaves, s)
+ case trunkReference, trunkReferences:
+ trunks = append(trunks, s)
+ }
+ }
+ })
+
+ // No leaf references to resolve.
+ if len(leaves) == 0 {
+ return
+ }
+
+ // Collect the set of all leaf references to resolve.
+ leafPtrs := make(map[value.Pointer]bool)
+ for _, leaf := range leaves {
+ leafPtrs[leaf.Metadata.(leafReference).p] = true
+ }
+
+ // Collect the set of trunk pointers that are always paired together.
+ // This allows us to assign a single ID to both pointers for brevity.
+ // If a pointer in a pair ever occurs by itself or as a different pair,
+ // then the pair is broken.
+ pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
+ unpair := func(p value.Pointer) {
+ if !pairedTrunkPtrs[p].IsNil() {
+ pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
+ }
+ pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ unpair(p.p) // standalone pointer cannot be part of a pair
+ case trunkReferences:
+ p0, ok0 := pairedTrunkPtrs[p.pp[0]]
+ p1, ok1 := pairedTrunkPtrs[p.pp[1]]
+ switch {
+ case !ok0 && !ok1:
+ // Register the newly seen pair.
+ pairedTrunkPtrs[p.pp[0]] = p.pp[1]
+ pairedTrunkPtrs[p.pp[1]] = p.pp[0]
+ case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
+ // Exact pair already seen; do nothing.
+ default:
+ // Pair conflicts with some other pair; break all pairs.
+ unpair(p.pp[0])
+ unpair(p.pp[1])
+ }
+ }
+ }
+
+ // Correlate each pointer referenced by leaves to a unique identifier,
+ // and print the IDs for each trunk that matches those pointers.
+ var nextID uint
+ ptrIDs := make(map[value.Pointer]uint)
+ newID := func() uint {
+ id := nextID
+ nextID++
+ return id
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ if print := leafPtrs[p.p]; print {
+ id, ok := ptrIDs[p.p]
+ if !ok {
+ id = newID()
+ ptrIDs[p.p] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ }
+ case trunkReferences:
+ print0 := leafPtrs[p.pp[0]]
+ print1 := leafPtrs[p.pp[1]]
+ if print0 || print1 {
+ id0, ok0 := ptrIDs[p.pp[0]]
+ id1, ok1 := ptrIDs[p.pp[1]]
+ isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
+ if isPair {
+ var id uint
+ assert(ok0 == ok1) // must be seen together or not at all
+ if ok0 {
+ assert(id0 == id1) // must have the same ID
+ id = id0
+ } else {
+ id = newID()
+ ptrIDs[p.pp[0]] = id
+ ptrIDs[p.pp[1]] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ } else {
+ if print0 && !ok0 {
+ id0 = newID()
+ ptrIDs[p.pp[0]] = id0
+ }
+ if print1 && !ok1 {
+ id1 = newID()
+ ptrIDs[p.pp[1]] = id1
+ }
+ switch {
+ case print0 && print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
+ case print0:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
+ case print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
+ }
+ }
+ }
+ }
+ }
+
+ // Update all leaf references with the unique identifier.
+ for _, leaf := range leaves {
+ if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
+ leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
+ }
+ }
+}
+
+func formatReference(id uint) string {
+ return fmt.Sprintf("ref#%d", id)
+}
+
+func updateReferencePrefix(prefix, ref string) string {
+ if prefix == "" {
+ return pointerDelimPrefix + ref + pointerDelimSuffix
+ }
+ suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
+ return pointerDelimPrefix + ref + ": " + suffix
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644
index 0000000..e39f422
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -0,0 +1,414 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+var (
+ anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+ stringType = reflect.TypeOf((*string)(nil)).Elem()
+ bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
+ byteType = reflect.TypeOf((*byte)(nil)).Elem()
+)
+
+type formatValueOptions struct {
+ // AvoidStringer controls whether to avoid calling custom stringer
+ // methods like error.Error or fmt.Stringer.String.
+ AvoidStringer bool
+
+ // PrintAddresses controls whether to print the address of all pointers,
+ // slice elements, and maps.
+ PrintAddresses bool
+
+ // QualifiedNames controls whether FormatType uses the fully qualified name
+ // (including the full package path as opposed to just the package name).
+ QualifiedNames bool
+
+ // VerbosityLevel controls the amount of output to produce.
+ // A higher value produces more output. A value of zero or lower produces
+ // no output (represented using an ellipsis).
+ // If LimitVerbosity is false, then the level is treated as infinite.
+ VerbosityLevel int
+
+ // LimitVerbosity specifies that formatting should respect VerbosityLevel.
+ LimitVerbosity bool
+}
+
+// FormatType prints the type as if it were wrapping s.
+// This may return s as-is depending on the current type and TypeMode mode.
+func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
+ // Check whether to emit the type or not.
+ switch opts.TypeMode {
+ case autoType:
+ switch t.Kind() {
+ case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
+ if s.Equal(textNil) {
+ return s
+ }
+ default:
+ return s
+ }
+ if opts.DiffMode == diffIdentical {
+ return s // elide type for identical nodes
+ }
+ case elideType:
+ return s
+ }
+
+ // Determine the type label, applying special handling for unnamed types.
+ typeName := value.TypeString(t, opts.QualifiedNames)
+ if t.Name() == "" {
+ // According to Go grammar, certain type literals contain symbols that
+ // do not strongly bind to the next lexicographical token (e.g., *T).
+ switch t.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Ptr:
+ typeName = "(" + typeName + ")"
+ }
+ }
+ return &textWrap{Prefix: typeName, Value: wrapParens(s)}
+}
+
+// wrapParens wraps s with a set of parenthesis, but avoids it if the
+// wrapped node itself is already surrounded by a pair of parenthesis or braces.
+// It handles unwrapping one level of pointer-reference nodes.
+func wrapParens(s textNode) textNode {
+ var refNode *textWrap
+ if s2, ok := s.(*textWrap); ok {
+ // Unwrap a single pointer reference node.
+ switch s2.Metadata.(type) {
+ case leafReference, trunkReference, trunkReferences:
+ refNode = s2
+ if s3, ok := refNode.Value.(*textWrap); ok {
+ s2 = s3
+ }
+ }
+
+ // Already has delimiters that make parenthesis unnecessary.
+ hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
+ hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
+ if hasParens || hasBraces {
+ return s
+ }
+ }
+ if refNode != nil {
+ refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
+ return s
+ }
+ return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
+}
+
+// FormatValue prints the reflect.Value, taking extra care to avoid descending
+// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
+func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
+ if !v.IsValid() {
+ return nil
+ }
+ t := v.Type()
+
+ // Check slice element for cycles.
+ if parentKind == reflect.Slice {
+ ptrRef, visited := ptrs.Push(v.Addr())
+ if visited {
+ return makeLeafReference(ptrRef, false)
+ }
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
+ }
+
+ // Check whether there is an Error or String method to call.
+ if !opts.AvoidStringer && v.CanInterface() {
+ // Avoid calling Error or String methods on nil receivers since many
+ // implementations crash when doing so.
+ if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
+ var prefix, strVal string
+ func() {
+ // Swallow and ignore any panics from String or Error.
+ defer func() { recover() }()
+ switch v := v.Interface().(type) {
+ case error:
+ strVal = v.Error()
+ prefix = "e"
+ case fmt.Stringer:
+ strVal = v.String()
+ prefix = "s"
+ }
+ }()
+ if prefix != "" {
+ return opts.formatString(prefix, strVal)
+ }
+ }
+ }
+
+ // Check whether to explicitly wrap the result with the type.
+ var skipType bool
+ defer func() {
+ if !skipType {
+ out = opts.FormatType(t, out)
+ }
+ }()
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return textLine(fmt.Sprint(v.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return textLine(fmt.Sprint(v.Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uint8:
+ if parentKind == reflect.Slice || parentKind == reflect.Array {
+ return textLine(formatHex(v.Uint()))
+ }
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uintptr:
+ return textLine(formatHex(v.Uint()))
+ case reflect.Float32, reflect.Float64:
+ return textLine(fmt.Sprint(v.Float()))
+ case reflect.Complex64, reflect.Complex128:
+ return textLine(fmt.Sprint(v.Complex()))
+ case reflect.String:
+ return opts.formatString("", v.String())
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ return textLine(formatPointer(value.PointerOf(v), true))
+ case reflect.Struct:
+ var list textList
+ v := makeAddressable(v) // needed for retrieveUnexportedField
+ maxLen := v.NumField()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ for i := 0; i < v.NumField(); i++ {
+ vv := v.Field(i)
+ if vv.IsZero() {
+ continue // Elide fields with zero values
+ }
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sf := t.Field(i)
+ if !isExported(sf.Name) {
+ vv = retrieveUnexportedField(v, sf, true)
+ }
+ s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sf.Name, Value: s})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case reflect.Slice:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check whether this is a []byte of text data.
+ if t.Elem() == byteType {
+ b := v.Bytes()
+ isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
+ if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
+ out = opts.formatString("", string(b))
+ skipType = true
+ return opts.FormatType(t, out)
+ }
+ }
+
+ fallthrough
+ case reflect.Array:
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for i := 0; i < v.Len(); i++ {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
+ list = append(list, textRecord{Value: s})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if t.Kind() == reflect.Slice && opts.PrintAddresses {
+ header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
+ out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
+ }
+ return out
+ case reflect.Map:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ return makeLeafReference(ptrRef, opts.PrintAddresses)
+ }
+ defer ptrs.Pop()
+
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for _, k := range value.SortKeys(v.MapKeys()) {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sk := formatMapKey(k, false, ptrs)
+ sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sk, Value: sv})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ return out
+ case reflect.Ptr:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ out = makeLeafReference(ptrRef, opts.PrintAddresses)
+ return &textWrap{Prefix: "&", Value: out}
+ }
+ defer ptrs.Pop()
+
+ // Skip the name only if this is an unnamed pointer type.
+ // Otherwise taking the address of a value does not reproduce
+ // the named pointer type.
+ if v.Type().Name() == "" {
+ skipType = true // Let the underlying value print the type instead
+ }
+ out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ return out
+ case reflect.Interface:
+ if v.IsNil() {
+ return textNil
+ }
+ // Interfaces accept different concrete types,
+ // so configure the underlying value to explicitly print the type.
+ return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+ }
+}
+
+func (opts formatOptions) formatString(prefix, s string) textNode {
+ maxLen := len(s)
+ maxLines := strings.Count(s, "\n") + 1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
+ maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ }
+
+ // For multiline strings, use the triple-quote syntax,
+ // but only use it when printing removed or inserted nodes since
+ // we only want the extra verbosity for those cases.
+ lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
+ isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
+ for i := 0; i < len(lines) && isTripleQuoted; i++ {
+ lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ line := lines[i]
+ isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
+ }
+ if isTripleQuoted {
+ var list textList
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ for i, line := range lines {
+ if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
+ comment := commentString(fmt.Sprintf("%d elided lines", numElided))
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
+ break
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
+ }
+
+ // Format the string as a single-line quoted string.
+ if len(s) > maxLen+len(textEllipsis) {
+ return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
+ }
+ return textLine(prefix + formatString(s))
+}
+
+// formatMapKey formats v as if it were a map key.
+// The result is guaranteed to be a single line.
+func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
+ var opts formatOptions
+ opts.DiffMode = diffIdentical
+ opts.TypeMode = elideType
+ opts.PrintAddresses = disambiguate
+ opts.AvoidStringer = disambiguate
+ opts.QualifiedNames = disambiguate
+ opts.VerbosityLevel = maxVerbosityPreset
+ opts.LimitVerbosity = true
+ s := opts.FormatValue(v, reflect.Map, ptrs).String()
+ return strings.TrimSpace(s)
+}
+
+// formatString prints s as a double-quoted or backtick-quoted string.
+func formatString(s string) string {
+ // Use quoted string if it the same length as a raw string literal.
+ // Otherwise, attempt to use the raw string form.
+ qs := strconv.Quote(s)
+ if len(qs) == 1+len(s)+1 {
+ return qs
+ }
+
+ // Disallow newlines to ensure output is a single line.
+ // Only allow printable runes for readability purposes.
+ rawInvalid := func(r rune) bool {
+ return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
+ }
+ if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
+ return "`" + s + "`"
+ }
+ return qs
+}
+
+// formatHex prints u as a hexadecimal integer in Go notation.
+func formatHex(u uint64) string {
+ var f string
+ switch {
+ case u <= 0xff:
+ f = "0x%02x"
+ case u <= 0xffff:
+ f = "0x%04x"
+ case u <= 0xffffff:
+ f = "0x%06x"
+ case u <= 0xffffffff:
+ f = "0x%08x"
+ case u <= 0xffffffffff:
+ f = "0x%010x"
+ case u <= 0xffffffffffff:
+ f = "0x%012x"
+ case u <= 0xffffffffffffff:
+ f = "0x%014x"
+ case u <= 0xffffffffffffffff:
+ f = "0x%016x"
+ }
+ return fmt.Sprintf(f, u)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644
index 0000000..23e444f
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -0,0 +1,614 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+)
+
+// CanFormatDiffSlice reports whether we support custom formatting for nodes
+// that are slices of primitive kinds or strings.
+func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
+ switch {
+ case opts.DiffMode != diffUnknown:
+ return false // Must be formatting in diff mode
+ case v.NumDiff == 0:
+ return false // No differences detected
+ case !v.ValueX.IsValid() || !v.ValueY.IsValid():
+ return false // Both values must be valid
+ case v.NumIgnored > 0:
+ return false // Some ignore option was used
+ case v.NumTransformed > 0:
+ return false // Some transform option was used
+ case v.NumCompared > 1:
+ return false // More than one comparison was used
+ case v.NumCompared == 1 && v.Type.Name() != "":
+ // The need for cmp to check applicability of options on every element
+ // in a slice is a significant performance detriment for large []byte.
+ // The workaround is to specify Comparer(bytes.Equal),
+ // which enables cmp to compare []byte more efficiently.
+ // If they differ, we still want to provide batched diffing.
+ // The logic disallows named types since they tend to have their own
+ // String method, with nicer formatting than what this provides.
+ return false
+ }
+
+ // Check whether this is an interface with the same concrete types.
+ t := v.Type
+ vx, vy := v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ }
+
+ // Check whether we provide specialized diffing for this type.
+ switch t.Kind() {
+ case reflect.String:
+ case reflect.Array, reflect.Slice:
+ // Only slices of primitive types have specialized handling.
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ default:
+ return false
+ }
+
+ // Both slice values have to be non-empty.
+ if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
+ return false
+ }
+
+ // If a sufficient number of elements already differ,
+ // use specialized formatting even if length requirement is not met.
+ if v.NumDiff > v.NumSame {
+ return true
+ }
+ default:
+ return false
+ }
+
+ // Use specialized string diffing for longer slices or strings.
+ const minLength = 32
+ return vx.Len() >= minLength && vy.Len() >= minLength
+}
+
+// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
+// This provides custom-tailored logic to make printing of differences in
+// textual strings and slices of primitive kinds more readable.
+func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
+ assert(opts.DiffMode == diffUnknown)
+ t, vx, vy := v.Type, v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ opts = opts.WithTypeMode(emitType)
+ }
+
+ // Auto-detect the type of the data.
+ var sx, sy string
+ var ssx, ssy []string
+ var isString, isMostlyText, isPureLinedText, isBinary bool
+ switch {
+ case t.Kind() == reflect.String:
+ sx, sy = vx.String(), vy.String()
+ isString = true
+ case t.Kind() == reflect.Slice && t.Elem() == byteType:
+ sx, sy = string(vx.Bytes()), string(vy.Bytes())
+ isString = true
+ case t.Kind() == reflect.Array:
+ // Arrays need to be addressable for slice operations to work.
+ vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
+ vx2.Set(vx)
+ vy2.Set(vy)
+ vx, vy = vx2, vy2
+ }
+ if isString {
+ var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
+ for i, r := range sx + sy {
+ numTotalRunes++
+ if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
+ numValidRunes++
+ }
+ if r == '\n' {
+ if maxLineLen < i-lastLineIdx {
+ maxLineLen = i - lastLineIdx
+ }
+ lastLineIdx = i + 1
+ numLines++
+ }
+ }
+ isPureText := numValidRunes == numTotalRunes
+ isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
+ isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
+ isBinary = !isMostlyText
+
+ // Avoid diffing by lines if it produces a significantly more complex
+ // edit script than diffing by bytes.
+ if isPureLinedText {
+ ssx = strings.Split(sx, "\n")
+ ssy = strings.Split(sy, "\n")
+ esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(ssx[ix] == ssy[iy])
+ })
+ esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(sx[ix] == sy[iy])
+ })
+ efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
+ efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
+ quotedLength := len(strconv.Quote(sx + sy))
+ unquotedLength := len(sx) + len(sy)
+ escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
+ isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
+ }
+ }
+
+ // Format the string into printable records.
+ var list textList
+ var delim string
+ switch {
+ // If the text appears to be multi-lined text,
+ // then perform differencing across individual lines.
+ case isPureLinedText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.Index(0).String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ delim = "\n"
+
+ // If possible, use a custom triple-quote (""") syntax for printing
+ // differences in a string literal. This format is more readable,
+ // but has edge-cases where differences are visually indistinguishable.
+ // This format is avoided under the following conditions:
+ // - A line starts with `"""`
+ // - A line starts with "..."
+ // - A line contains non-printable characters
+ // - Adjacent different lines differ only by whitespace
+ //
+ // For example:
+ //
+ // """
+ // ... // 3 identical lines
+ // foo
+ // bar
+ // - baz
+ // + BAZ
+ // """
+ isTripleQuoted := true
+ prevRemoveLines := map[string]bool{}
+ prevInsertLines := map[string]bool{}
+ var list2 textList
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ for _, r := range list {
+ if !r.Value.Equal(textEllipsis) {
+ line, _ := strconv.Unquote(string(r.Value.(textLine)))
+ line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ normLine := strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return -1 // drop whitespace to avoid visually indistinguishable output
+ }
+ return r
+ }, line)
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
+ switch r.Diff {
+ case diffRemoved:
+ isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
+ prevRemoveLines[normLine] = true
+ case diffInserted:
+ isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
+ prevInsertLines[normLine] = true
+ }
+ if !isTripleQuoted {
+ break
+ }
+ r.Value = textLine(line)
+ r.ElideComma = true
+ }
+ if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
+ prevRemoveLines = map[string]bool{}
+ prevInsertLines = map[string]bool{}
+ }
+ list2 = append(list2, r)
+ }
+ if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
+ list2 = list2[:len(list2)-1] // elide single empty line at the end
+ }
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ if isTripleQuoted {
+ var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
+ switch t.Kind() {
+ case reflect.String:
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ // Always emit type for slices since the triple-quote syntax
+ // looks like a string (not a slice).
+ opts = opts.WithTypeMode(emitType)
+ out = opts.FormatType(t, out)
+ }
+ return out
+ }
+
+ // If the text appears to be single-lined text,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is printed as quoted strings.
+ case isMostlyText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+
+ // If the text appears to be binary data,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is inspired by hexdump.
+ case isBinary:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ }
+ s := strings.Join(ss, ", ")
+ comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
+ return textRecord{Diff: d, Value: textLine(s), Comment: comment}
+ },
+ )
+
+ // For all other slices of primitive types,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The size of each chunk depends on the width of the element kind.
+ default:
+ var chunkSize int
+ if t.Elem().Kind() == reflect.Bool {
+ chunkSize = 16
+ } else {
+ switch t.Elem().Bits() {
+ case 8:
+ chunkSize = 16
+ case 16:
+ chunkSize = 12
+ case 32:
+ chunkSize = 8
+ default:
+ chunkSize = 8
+ }
+ }
+ list = opts.formatDiffSlice(
+ vx, vy, chunkSize, t.Elem().Kind().String(),
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
+ case reflect.Uint8, reflect.Uintptr:
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
+ }
+ }
+ s := strings.Join(ss, ", ")
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ }
+
+ // Wrap the output with appropriate type information.
+ var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if !isMostlyText {
+ // The "{...}" byte-sequence literal is not valid Go syntax for strings.
+ // Emit the type for extra clarity (e.g. "string{...}").
+ if t.Kind() == reflect.String {
+ opts = opts.WithTypeMode(emitType)
+ }
+ return opts.FormatType(t, out)
+ }
+ switch t.Kind() {
+ case reflect.String:
+ out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != bytesType {
+ out = opts.FormatType(t, out)
+ }
+ }
+ return out
+}
+
+// formatASCII formats s as an ASCII string.
+// This is useful for printing binary strings in a semi-legible way.
+func formatASCII(s string) string {
+ b := bytes.Repeat([]byte{'.'}, len(s))
+ for i := 0; i < len(s); i++ {
+ if ' ' <= s[i] && s[i] <= '~' {
+ b[i] = s[i]
+ }
+ }
+ return string(b)
+}
+
+func (opts formatOptions) formatDiffSlice(
+ vx, vy reflect.Value, chunkSize int, name string,
+ makeRec func(reflect.Value, diffMode) textRecord,
+) (list textList) {
+ eq := func(ix, iy int) bool {
+ return vx.Index(ix).Interface() == vy.Index(iy).Interface()
+ }
+ es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+ return diff.BoolResult(eq(ix, iy))
+ })
+
+ appendChunks := func(v reflect.Value, d diffMode) int {
+ n0 := v.Len()
+ for v.Len() > 0 {
+ n := chunkSize
+ if n > v.Len() {
+ n = v.Len()
+ }
+ list = append(list, makeRec(v.Slice(0, n), d))
+ v = v.Slice(n, v.Len())
+ }
+ return n0 - v.Len()
+ }
+
+ var numDiffs int
+ maxLen := -1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ opts.VerbosityLevel--
+ }
+
+ groups := coalesceAdjacentEdits(name, es)
+ groups = coalesceInterveningIdentical(groups, chunkSize/4)
+ groups = cleanupSurroundingIdentical(groups, eq)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Print equal.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing equal bytes to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
+ numLo++
+ }
+ for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ numHi++
+ }
+ if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
+ numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
+ }
+
+ // Print the equal bytes.
+ appendChunks(vx.Slice(0, numLo), diffIdentical)
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ }
+ appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
+ vx = vx.Slice(numEqual, vx.Len())
+ vy = vy.Slice(numEqual, vy.Len())
+ continue
+ }
+
+ // Print unequal.
+ len0 := len(list)
+ nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
+ vx = vx.Slice(nx, vx.Len())
+ ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
+ vy = vy.Slice(ny, vy.Len())
+ numDiffs += len(list) - len0
+ }
+ if maxGroup.IsZero() {
+ assert(vx.Len() == 0 && vy.Len() == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ }
+ return list
+}
+
+// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
+// equal or unequal counts.
+//
+// Example:
+//
+// Input: "..XXY...Y"
+// Output: [
+// {NumIdentical: 2},
+// {NumRemoved: 2, NumInserted 1},
+// {NumIdentical: 3},
+// {NumInserted: 1},
+// ]
+func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
+ var prevMode byte
+ lastStats := func(mode byte) *diffStats {
+ if prevMode != mode {
+ groups = append(groups, diffStats{Name: name})
+ prevMode = mode
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, e := range es {
+ switch e {
+ case diff.Identity:
+ lastStats('=').NumIdentical++
+ case diff.UniqueX:
+ lastStats('!').NumRemoved++
+ case diff.UniqueY:
+ lastStats('!').NumInserted++
+ case diff.Modified:
+ lastStats('!').NumModified++
+ }
+ }
+ return groups
+}
+
+// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
+// equal groups into adjacent unequal groups that currently result in a
+// dual inserted/removed printout. This acts as a high-pass filter to smooth
+// out high-frequency changes within the windowSize.
+//
+// Example:
+//
+// WindowSize: 16,
+// Input: [
+// {NumIdentical: 61}, // group 0
+// {NumRemoved: 3, NumInserted: 1}, // group 1
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 9}, // └── coalesce
+// {NumIdentical: 64}, // group 2
+// {NumRemoved: 3, NumInserted: 1}, // group 3
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 7}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 2}, // └── coalesce
+// {NumIdentical: 63}, // group 4
+// ]
+// Output: [
+// {NumIdentical: 61},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 64},
+// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 63},
+// ]
+func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
+ groups, groupsOrig := groups[:0], groups
+ for i, ds := range groupsOrig {
+ if len(groups) >= 2 && ds.NumDiff() > 0 {
+ prev := &groups[len(groups)-2] // Unequal group
+ curr := &groups[len(groups)-1] // Equal group
+ next := &groupsOrig[i] // Unequal group
+ hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
+ hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
+ if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
+ *prev = prev.Append(*curr).Append(*next)
+ groups = groups[:len(groups)-1] // Truncate off equal group
+ continue
+ }
+ }
+ groups = append(groups, ds)
+ }
+ return groups
+}
+
+// cleanupSurroundingIdentical scans through all unequal groups, and
+// moves any leading sequence of equal elements to the preceding equal group and
+// moves and trailing sequence of equal elements to the succeeding equal group.
+//
+// This is necessary since coalesceInterveningIdentical may coalesce edit groups
+// together such that leading/trailing spans of equal elements becomes possible.
+// Note that this can occur even with an optimal diffing algorithm.
+//
+// Example:
+//
+// Input: [
+// {NumIdentical: 61},
+// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
+// {NumIdentical: 67},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
+// {NumIdentical: 54},
+// ]
+// Output: [
+// {NumIdentical: 64}, // incremented by 3
+// {NumRemoved: 9},
+// {NumIdentical: 67},
+// {NumRemoved: 9},
+// {NumIdentical: 64}, // incremented by 10
+// ]
+func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
+ var ix, iy int // indexes into sequence x and y
+ for i, ds := range groups {
+ // Handle equal group.
+ if ds.NumDiff() == 0 {
+ ix += ds.NumIdentical
+ iy += ds.NumIdentical
+ continue
+ }
+
+ // Handle unequal group.
+ nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
+ ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
+ var numLeadingIdentical, numTrailingIdentical int
+ for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
+ numLeadingIdentical++
+ }
+ for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
+ numTrailingIdentical++
+ }
+ if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
+ if numLeadingIdentical > 0 {
+ // Remove leading identical span from this group and
+ // insert it into the preceding group.
+ if i-1 >= 0 {
+ groups[i-1].NumIdentical += numLeadingIdentical
+ } else {
+ // No preceding group exists, so prepend a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
+ }()
+ }
+ // Increment indexes since the preceding group would have handled this.
+ ix += numLeadingIdentical
+ iy += numLeadingIdentical
+ }
+ if numTrailingIdentical > 0 {
+ // Remove trailing identical span from this group and
+ // insert it into the succeeding group.
+ if i+1 < len(groups) {
+ groups[i+1].NumIdentical += numTrailingIdentical
+ } else {
+ // No succeeding group exists, so append a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
+ }()
+ }
+ // Do not increment indexes since the succeeding group will handle this.
+ }
+
+ // Update this group since some identical elements were removed.
+ nx -= numIdentical
+ ny -= numIdentical
+ groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
+ }
+ ix += nx
+ iy += ny
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644
index 0000000..388fcf5
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -0,0 +1,432 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+const maxColumnLength = 80
+
+type indentMode int
+
+func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
+ // The output of Diff is documented as being unstable to provide future
+ // flexibility in changing the output for more humanly readable reports.
+ // This logic intentionally introduces instability to the exact output
+ // so that users can detect accidental reliance on stability early on,
+ // rather than much later when an actual change to the format occurs.
+ if flags.Deterministic || randBool {
+ // Use regular spaces (U+0020).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ } else {
+ // Use non-breaking spaces (U+00a0).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, "  "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ }
+ return repeatCount(n).appendChar(b, '\t')
+}
+
+type repeatCount int
+
+func (n repeatCount) appendChar(b []byte, c byte) []byte {
+ for ; n > 0; n-- {
+ b = append(b, c)
+ }
+ return b
+}
+
+// textNode is a simplified tree-based representation of structured text.
+// Possible node types are textWrap, textList, or textLine.
+type textNode interface {
+ // Len reports the length in bytes of a single-line version of the tree.
+ // Nested textRecord.Diff and textRecord.Comment fields are ignored.
+ Len() int
+ // Equal reports whether the two trees are structurally identical.
+ // Nested textRecord.Diff and textRecord.Comment fields are compared.
+ Equal(textNode) bool
+ // String returns the string representation of the text tree.
+ // It is not guaranteed that len(x.String()) == x.Len(),
+ // nor that x.String() == y.String() implies that x.Equal(y).
+ String() string
+
+ // formatCompactTo formats the contents of the tree as a single-line string
+ // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
+ // fields are ignored.
+ //
+ // However, not all nodes in the tree should be collapsed as a single-line.
+ // If a node can be collapsed as a single-line, it is replaced by a textLine
+ // node. Since the top-level node cannot replace itself, this also returns
+ // the current node itself.
+ //
+ // This does not mutate the receiver.
+ formatCompactTo([]byte, diffMode) ([]byte, textNode)
+ // formatExpandedTo formats the contents of the tree as a multi-line string
+ // to the provided buffer. In order for column alignment to operate well,
+ // formatCompactTo must be called before calling formatExpandedTo.
+ formatExpandedTo([]byte, diffMode, indentMode) []byte
+}
+
+// textWrap is a wrapper that concatenates a prefix and/or a suffix
+// to the underlying node.
+type textWrap struct {
+ Prefix string // e.g., "bytes.Buffer{"
+ Value textNode // textWrap | textList | textLine
+ Suffix string // e.g., "}"
+ Metadata interface{} // arbitrary metadata; has no effect on formatting
+}
+
+func (s *textWrap) Len() int {
+ return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
+}
+func (s1 *textWrap) Equal(s2 textNode) bool {
+ if s2, ok := s2.(*textWrap); ok {
+ return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
+ }
+ return false
+}
+func (s *textWrap) String() string {
+ var d diffMode
+ var n indentMode
+ _, s2 := s.formatCompactTo(nil, d)
+ b := n.appendIndent(nil, d) // Leading indent
+ b = s2.formatExpandedTo(b, d, n) // Main body
+ b = append(b, '\n') // Trailing newline
+ return string(b)
+}
+func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ n0 := len(b) // Original buffer length
+ b = append(b, s.Prefix...)
+ b, s.Value = s.Value.formatCompactTo(b, d)
+ b = append(b, s.Suffix...)
+ if _, ok := s.Value.(textLine); ok {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ b = append(b, s.Prefix...)
+ b = s.Value.formatExpandedTo(b, d, n)
+ b = append(b, s.Suffix...)
+ return b
+}
+
+// textList is a comma-separated list of textWrap or textLine nodes.
+// The list may be formatted as multi-lines or single-line at the discretion
+// of the textList.formatCompactTo method.
+type textList []textRecord
+type textRecord struct {
+ Diff diffMode // e.g., 0 or '-' or '+'
+ Key string // e.g., "MyField"
+ Value textNode // textWrap | textLine
+ ElideComma bool // avoid trailing comma
+ Comment fmt.Stringer // e.g., "6 identical fields"
+}
+
+// AppendEllipsis appends a new ellipsis node to the list if none already
+// exists at the end. If cs is non-zero it coalesces the statistics with the
+// previous diffStats.
+func (s *textList) AppendEllipsis(ds diffStats) {
+ hasStats := !ds.IsZero()
+ if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
+ if hasStats {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
+ } else {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
+ }
+ return
+ }
+ if hasStats {
+ (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
+ }
+}
+
+func (s textList) Len() (n int) {
+ for i, r := range s {
+ n += len(r.Key)
+ if r.Key != "" {
+ n += len(": ")
+ }
+ n += r.Value.Len()
+ if i < len(s)-1 {
+ n += len(", ")
+ }
+ }
+ return n
+}
+
+func (s1 textList) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textList); ok {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ r1, r2 := s1[i], s2[i]
+ if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (s textList) String() string {
+ return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
+}
+
+func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ s = append(textList(nil), s...) // Avoid mutating original
+
+ // Determine whether we can collapse this list as a single line.
+ n0 := len(b) // Original buffer length
+ var multiLine bool
+ for i, r := range s {
+ if r.Diff == diffInserted || r.Diff == diffRemoved {
+ multiLine = true
+ }
+ b = append(b, r.Key...)
+ if r.Key != "" {
+ b = append(b, ": "...)
+ }
+ b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
+ if _, ok := s[i].Value.(textLine); !ok {
+ multiLine = true
+ }
+ if r.Comment != nil {
+ multiLine = true
+ }
+ if i < len(s)-1 {
+ b = append(b, ", "...)
+ }
+ }
+ // Force multi-lined output when printing a removed/inserted node that
+ // is sufficiently long.
+ if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
+ multiLine = true
+ }
+ if !multiLine {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+
+func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ alignKeyLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return r.Key == "" || !isLine
+ },
+ func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
+ )
+ alignValueLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
+ },
+ func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
+ )
+
+ // Format lists of simple lists in a batched form.
+ // If the list is sequence of only textLine values,
+ // then batch multiple values on a single line.
+ var isSimple bool
+ for _, r := range s {
+ _, isLine := r.Value.(textLine)
+ isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
+ if !isSimple {
+ break
+ }
+ }
+ if isSimple {
+ n++
+ var batch []byte
+ emitBatch := func() {
+ if len(batch) > 0 {
+ b = n.appendIndent(append(b, '\n'), d)
+ b = append(b, bytes.TrimRight(batch, " ")...)
+ batch = batch[:0]
+ }
+ }
+ for _, r := range s {
+ line := r.Value.(textLine)
+ if len(batch)+len(line)+len(", ") > maxColumnLength {
+ emitBatch()
+ }
+ batch = append(batch, line...)
+ batch = append(batch, ", "...)
+ }
+ emitBatch()
+ n--
+ return n.appendIndent(append(b, '\n'), d)
+ }
+
+ // Format the list as a multi-lined output.
+ n++
+ for i, r := range s {
+ b = n.appendIndent(append(b, '\n'), d|r.Diff)
+ if r.Key != "" {
+ b = append(b, r.Key+": "...)
+ }
+ b = alignKeyLens[i].appendChar(b, ' ')
+
+ b = r.Value.formatExpandedTo(b, d|r.Diff, n)
+ if !r.ElideComma {
+ b = append(b, ',')
+ }
+ b = alignValueLens[i].appendChar(b, ' ')
+
+ if r.Comment != nil {
+ b = append(b, " // "+r.Comment.String()...)
+ }
+ }
+ n--
+
+ return n.appendIndent(append(b, '\n'), d)
+}
+
+func (s textList) alignLens(
+ skipFunc func(textRecord) bool,
+ lenFunc func(textRecord) int,
+) []repeatCount {
+ var startIdx, endIdx, maxLen int
+ lens := make([]repeatCount, len(s))
+ for i, r := range s {
+ if skipFunc(r) {
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ startIdx, endIdx, maxLen = i+1, i+1, 0
+ } else {
+ if maxLen < lenFunc(r) {
+ maxLen = lenFunc(r)
+ }
+ endIdx = i + 1
+ }
+ }
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ return lens
+}
+
+// textLine is a single-line segment of text and is always a leaf node
+// in the textNode tree.
+type textLine []byte
+
+var (
+ textNil = textLine("nil")
+ textEllipsis = textLine("...")
+)
+
+func (s textLine) Len() int {
+ return len(s)
+}
+func (s1 textLine) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textLine); ok {
+ return bytes.Equal([]byte(s1), []byte(s2))
+ }
+ return false
+}
+func (s textLine) String() string {
+ return string(s)
+}
+func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ return append(b, s...), s
+}
+func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
+ return append(b, s...)
+}
+
+type diffStats struct {
+ Name string
+ NumIgnored int
+ NumIdentical int
+ NumRemoved int
+ NumInserted int
+ NumModified int
+}
+
+func (s diffStats) IsZero() bool {
+ s.Name = ""
+ return s == diffStats{}
+}
+
+func (s diffStats) NumDiff() int {
+ return s.NumRemoved + s.NumInserted + s.NumModified
+}
+
+func (s diffStats) Append(ds diffStats) diffStats {
+ assert(s.Name == ds.Name)
+ s.NumIgnored += ds.NumIgnored
+ s.NumIdentical += ds.NumIdentical
+ s.NumRemoved += ds.NumRemoved
+ s.NumInserted += ds.NumInserted
+ s.NumModified += ds.NumModified
+ return s
+}
+
+// String prints a humanly-readable summary of coalesced records.
+//
+// Example:
+//
+// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
+func (s diffStats) String() string {
+ var ss []string
+ var sum int
+ labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
+ counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
+ for i, n := range counts {
+ if n > 0 {
+ ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
+ }
+ sum += n
+ }
+
+ // Pluralize the name (adjusting for some obscure English grammar rules).
+ name := s.Name
+ if sum > 1 {
+ name += "s"
+ if strings.HasSuffix(name, "ys") {
+ name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
+ }
+ }
+
+ // Format the list according to English grammar (with Oxford comma).
+ switch n := len(ss); n {
+ case 0:
+ return ""
+ case 1, 2:
+ return strings.Join(ss, " and ") + " " + name
+ default:
+ return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
+ }
+}
+
+type commentString string
+
+func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644
index 0000000..668d470
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_value.go
@@ -0,0 +1,121 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import "reflect"
+
+// valueNode represents a single node within a report, which is a
+// structured representation of the value tree, containing information
+// regarding which nodes are equal or not.
+type valueNode struct {
+ parent *valueNode
+
+ Type reflect.Type
+ ValueX reflect.Value
+ ValueY reflect.Value
+
+ // NumSame is the number of leaf nodes that are equal.
+ // All descendants are equal only if NumDiff is 0.
+ NumSame int
+ // NumDiff is the number of leaf nodes that are not equal.
+ NumDiff int
+ // NumIgnored is the number of leaf nodes that are ignored.
+ NumIgnored int
+ // NumCompared is the number of leaf nodes that were compared
+ // using an Equal method or Comparer function.
+ NumCompared int
+ // NumTransformed is the number of non-leaf nodes that were transformed.
+ NumTransformed int
+ // NumChildren is the number of transitive descendants of this node.
+ // This counts from zero; thus, leaf nodes have no descendants.
+ NumChildren int
+ // MaxDepth is the maximum depth of the tree. This counts from zero;
+ // thus, leaf nodes have a depth of zero.
+ MaxDepth int
+
+ // Records is a list of struct fields, slice elements, or map entries.
+ Records []reportRecord // If populated, implies Value is not populated
+
+ // Value is the result of a transformation, pointer indirect, of
+ // type assertion.
+ Value *valueNode // If populated, implies Records is not populated
+
+ // TransformerName is the name of the transformer.
+ TransformerName string // If non-empty, implies Value is populated
+}
+type reportRecord struct {
+ Key reflect.Value // Invalid for slice element
+ Value *valueNode
+}
+
+func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
+ vx, vy := ps.Values()
+ child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
+ switch s := ps.(type) {
+ case StructField:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
+ case SliceIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Value: child})
+ case MapIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
+ case Indirect:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case TypeAssertion:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case Transform:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ parent.TransformerName = s.Name()
+ parent.NumTransformed++
+ default:
+ assert(parent == nil) // Must be the root step
+ }
+ return child
+}
+
+func (r *valueNode) Report(rs Result) {
+ assert(r.MaxDepth == 0) // May only be called on leaf nodes
+
+ if rs.ByIgnore() {
+ r.NumIgnored++
+ } else {
+ if rs.Equal() {
+ r.NumSame++
+ } else {
+ r.NumDiff++
+ }
+ }
+ assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
+
+ if rs.ByMethod() {
+ r.NumCompared++
+ }
+ if rs.ByFunc() {
+ r.NumCompared++
+ }
+ assert(r.NumCompared <= 1)
+}
+
+func (child *valueNode) PopStep() (parent *valueNode) {
+ if child.parent == nil {
+ return nil
+ }
+ parent = child.parent
+ parent.NumSame += child.NumSame
+ parent.NumDiff += child.NumDiff
+ parent.NumIgnored += child.NumIgnored
+ parent.NumCompared += child.NumCompared
+ parent.NumTransformed += child.NumTransformed
+ parent.NumChildren += child.NumChildren + 1
+ if parent.MaxDepth < child.MaxDepth+1 {
+ parent.MaxDepth = child.MaxDepth + 1
+ }
+ return parent
+}
diff --git a/vendor/github.com/google/jsonapi/.gitignore b/vendor/github.com/google/jsonapi/.gitignore
new file mode 100644
index 0000000..19b1e1c
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/.gitignore
@@ -0,0 +1 @@
+/examples/examples
diff --git a/vendor/github.com/google/jsonapi/.travis.yml b/vendor/github.com/google/jsonapi/.travis.yml
new file mode 100644
index 0000000..abc7d1b
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+arch:
+ - amd64
+ - ppc64le
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - 1.14.x
+ - 1.15.x
+ - 1.16.x
+ - tip
+script: go test ./... -v
diff --git a/vendor/github.com/google/jsonapi/LICENSE b/vendor/github.com/google/jsonapi/LICENSE
new file mode 100644
index 0000000..c97912c
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Google Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/google/jsonapi/README.md b/vendor/github.com/google/jsonapi/README.md
new file mode 100644
index 0000000..8dfb943
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/README.md
@@ -0,0 +1,477 @@
+# jsonapi
+
+[![Build Status](https://travis-ci.org/google/jsonapi.svg?branch=master)](https://travis-ci.org/google/jsonapi)
+[![Go Report Card](https://goreportcard.com/badge/github.com/google/jsonapi)](https://goreportcard.com/report/github.com/google/jsonapi)
+[![GoDoc](https://godoc.org/github.com/google/jsonapi?status.svg)](http://godoc.org/github.com/google/jsonapi)
+[![No Maintenance Intended](http://unmaintained.tech/badge.svg)](http://unmaintained.tech/)
+
+A serializer/deserializer for JSON payloads that comply to the
+[JSON API - jsonapi.org](http://jsonapi.org) spec in go.
+
+
+
+## Installation
+
+```
+go get -u github.com/google/jsonapi
+```
+
+Or, see [Alternative Installation](#alternative-installation).
+
+## Background
+
+You are working in your Go web application and you have a struct that is
+organized similarly to your database schema. You need to send and
+receive json payloads that adhere to the JSON API spec. Once you realize that
+your json needed to take on this special form, you go down the path of
+creating more structs to be able to serialize and deserialize JSON API
+payloads. Then there are more models required with this additional
+structure. Ugh! With JSON API, you can keep your model structs as is and
+use [StructTags](http://golang.org/pkg/reflect/#StructTag) to indicate
+to JSON API how you want your response built or your request
+deserialized. What about your relationships? JSON API supports
+relationships out of the box and will even put them in your response
+into an `included` side-loaded slice--that contains associated records.
+
+## Introduction
+
+JSON API uses [StructField](http://golang.org/pkg/reflect/#StructField)
+tags to annotate the structs fields that you already have and use in
+your app and then reads and writes [JSON API](http://jsonapi.org)
+output based on the instructions you give the library in your JSON API
+tags. Let's take an example. In your app, you most likely have structs
+that look similar to these:
+
+
+```go
+type Blog struct {
+ ID int `json:"id"`
+ Title string `json:"title"`
+ Posts []*Post `json:"posts"`
+ CurrentPost *Post `json:"current_post"`
+ CurrentPostId int `json:"current_post_id"`
+ CreatedAt time.Time `json:"created_at"`
+ ViewCount int `json:"view_count"`
+}
+
+type Post struct {
+ ID int `json:"id"`
+ BlogID int `json:"blog_id"`
+ Title string `json:"title"`
+ Body string `json:"body"`
+ Comments []*Comment `json:"comments"`
+}
+
+type Comment struct {
+ Id int `json:"id"`
+ PostID int `json:"post_id"`
+ Body string `json:"body"`
+ Likes uint `json:"likes_count,omitempty"`
+}
+```
+
+These structs may or may not resemble the layout of your database. But
+these are the ones that you want to use right? You wouldn't want to use
+structs like those that JSON API sends because it is difficult to get at
+all of your data easily.
+
+## Example App
+
+[examples/app.go](https://github.com/google/jsonapi/blob/master/examples/app.go)
+
+This program demonstrates the implementation of a create, a show,
+and a list [http.Handler](http://golang.org/pkg/net/http#Handler). It
+outputs some example requests and responses as well as serialized
+examples of the source/target structs to json. That is to say, I show
+you that the library has successfully taken your JSON API request and
+turned it into your struct types.
+
+To run,
+
+* Make sure you have [Go installed](https://golang.org/doc/install)
+* Create the following directories or similar: `~/go`
+* Set `GOPATH` to `PWD` in your shell session, `export GOPATH=$PWD`
+* `go get github.com/google/jsonapi`. (Append `-u` after `get` if you
+ are updating.)
+* `cd $GOPATH/src/github.com/google/jsonapi/examples`
+* `go build && ./examples`
+
+## `jsonapi` Tag Reference
+
+### Example
+
+The `jsonapi` [StructTags](http://golang.org/pkg/reflect/#StructTag)
+tells this library how to marshal and unmarshal your structs into
+JSON API payloads and your JSON API payloads to structs, respectively.
+Then Use JSON API's Marshal and Unmarshal methods to construct and read
+your responses and replies. Here's an example of the structs above
+using JSON API tags:
+
+```go
+type Blog struct {
+ ID int `jsonapi:"primary,blogs"`
+ Title string `jsonapi:"attr,title"`
+ Posts []*Post `jsonapi:"relation,posts"`
+ CurrentPost *Post `jsonapi:"relation,current_post"`
+ CurrentPostID int `jsonapi:"attr,current_post_id"`
+ CreatedAt time.Time `jsonapi:"attr,created_at"`
+ ViewCount int `jsonapi:"attr,view_count"`
+}
+
+type Post struct {
+ ID int `jsonapi:"primary,posts"`
+ BlogID int `jsonapi:"attr,blog_id"`
+ Title string `jsonapi:"attr,title"`
+ Body string `jsonapi:"attr,body"`
+ Comments []*Comment `jsonapi:"relation,comments"`
+}
+
+type Comment struct {
+ ID int `jsonapi:"primary,comments"`
+ PostID int `jsonapi:"attr,post_id"`
+ Body string `jsonapi:"attr,body"`
+ Likes uint `jsonapi:"attr,likes-count,omitempty"`
+}
+```
+
+### Permitted Tag Values
+
+#### `primary`
+
+```
+`jsonapi:"primary,<type field output>"`
+```
+
+This indicates this is the primary key field for this struct type.
+Tag value arguments are comma separated. The first argument must be,
+`primary`, and the second must be the name that should appear in the
+`type`\* field for all data objects that represent this type of model.
+
+\* According the [JSON API](http://jsonapi.org) spec, the plural record
+types are shown in the examples, but not required.
+
+#### `attr`
+
+```
+`jsonapi:"attr,<key name in attributes hash>,<optional: omitempty>"`
+```
+
+These fields' values will end up in the `attributes`hash for a record.
+The first argument must be, `attr`, and the second should be the name
+for the key to display in the `attributes` hash for that record. The optional
+third argument is `omitempty` - if it is present the field will not be present
+in the `"attributes"` if the field's value is equivalent to the field types
+empty value (ie if the `count` field is of type `int`, `omitempty` will omit the
+field when `count` has a value of `0`). Lastly, the spec indicates that
+`attributes` key names should be dasherized for multiple word field names.
+
+#### `relation`
+
+```
+`jsonapi:"relation,<key name in relationships hash>,<optional: omitempty>"`
+```
+
+Relations are struct fields that represent a one-to-one or one-to-many
+relationship with other structs. JSON API will traverse the graph of
+relationships and marshal or unmarshal records. The first argument must
+be, `relation`, and the second should be the name of the relationship,
+used as the key in the `relationships` hash for the record. The optional
+third argument is `omitempty` - if present will prevent non existent to-one and
+to-many from being serialized.
+
+## Methods Reference
+
+**All `Marshal` and `Unmarshal` methods expect pointers to struct
+instance or slices of the same contained with the `interface{}`s**
+
+Now you have your structs prepared to be serialized or materialized, What
+about the rest?
+
+### Create Record Example
+
+You can Unmarshal a JSON API payload using
+[jsonapi.UnmarshalPayload](http://godoc.org/github.com/google/jsonapi#UnmarshalPayload).
+It reads from an [io.Reader](https://golang.org/pkg/io/#Reader)
+containing a JSON API payload for one record (but can have related
+records). Then, it materializes a struct that you created and passed in
+(using new or &). Again, the method supports single records only, at
+the top level, in request payloads at the moment. Bulk creates and
+updates are not supported yet.
+
+After saving your record, you can use,
+[MarshalOnePayload](http://godoc.org/github.com/google/jsonapi#MarshalOnePayload),
+to write the JSON API response to an
+[io.Writer](https://golang.org/pkg/io/#Writer).
+
+#### `UnmarshalPayload`
+
+```go
+UnmarshalPayload(in io.Reader, model interface{})
+```
+
+Visit [godoc](http://godoc.org/github.com/google/jsonapi#UnmarshalPayload)
+
+#### `MarshalPayload`
+
+```go
+MarshalPayload(w io.Writer, models interface{}) error
+```
+
+Visit [godoc](http://godoc.org/github.com/google/jsonapi#MarshalPayload)
+
+Writes a JSON API response, with related records sideloaded, into an
+`included` array. This method encodes a response for either a single record or
+many records.
+
+##### Handler Example Code
+
+```go
+func CreateBlog(w http.ResponseWriter, r *http.Request) {
+ blog := new(Blog)
+
+ if err := jsonapi.UnmarshalPayload(r.Body, blog); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // ...save your blog...
+
+ w.Header().Set("Content-Type", jsonapi.MediaType)
+ w.WriteHeader(http.StatusCreated)
+
+ if err := jsonapi.MarshalPayload(w, blog); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+```
+
+### Create Records Example
+
+#### `UnmarshalManyPayload`
+
+```go
+UnmarshalManyPayload(in io.Reader, t reflect.Type) ([]interface{}, error)
+```
+
+Visit [godoc](http://godoc.org/github.com/google/jsonapi#UnmarshalManyPayload)
+
+Takes an `io.Reader` and a `reflect.Type` representing the uniform type
+contained within the `"data"` JSON API member.
+
+##### Handler Example Code
+
+```go
+func CreateBlogs(w http.ResponseWriter, r *http.Request) {
+ // ...create many blogs at once
+
+ blogs, err := UnmarshalManyPayload(r.Body, reflect.TypeOf(new(Blog)))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, blog := range blogs {
+ b, ok := blog.(*Blog)
+ // ...save each of your blogs
+ }
+
+ w.Header().Set("Content-Type", jsonapi.MediaType)
+ w.WriteHeader(http.StatusCreated)
+
+ if err := jsonapi.MarshalPayload(w, blogs); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+```
+
+
+### Links
+
+If you need to include [link objects](http://jsonapi.org/format/#document-links) along with response data, implement the `Linkable` interface for document-links, and `RelationshipLinkable` for relationship links:
+
+```go
+func (post Post) JSONAPILinks() *Links {
+ return &Links{
+ "self": "href": fmt.Sprintf("https://example.com/posts/%d", post.ID),
+ "comments": Link{
+ Href: fmt.Sprintf("https://example.com/api/blogs/%d/comments", post.ID),
+ Meta: map[string]interface{}{
+ "counts": map[string]uint{
+ "likes": 4,
+ },
+ },
+ },
+ }
+}
+
+// Invoked for each relationship defined on the Post struct when marshaled
+func (post Post) JSONAPIRelationshipLinks(relation string) *Links {
+ if relation == "comments" {
+ return &Links{
+ "related": fmt.Sprintf("https://example.com/posts/%d/comments", post.ID),
+ }
+ }
+ return nil
+}
+```
+
+### Meta
+
+ If you need to include [meta objects](http://jsonapi.org/format/#document-meta) along with response data, implement the `Metable` interface for document-meta, and `RelationshipMetable` for relationship meta:
+
+ ```go
+func (post Post) JSONAPIMeta() *Meta {
+ return &Meta{
+ "details": "sample details here",
+ }
+}
+
+// Invoked for each relationship defined on the Post struct when marshaled
+func (post Post) JSONAPIRelationshipMeta(relation string) *Meta {
+ if relation == "comments" {
+ return &Meta{
+ "this": map[string]interface{}{
+ "can": map[string]interface{}{
+ "go": []interface{}{
+ "as",
+ "deep",
+ map[string]interface{}{
+ "as": "required",
+ },
+ },
+ },
+ },
+ }
+ }
+ return nil
+}
+```
+
+### Custom types
+
+Custom types are supported for primitive types, only, as attributes. Examples,
+
+```go
+type CustomIntType int
+type CustomFloatType float64
+type CustomStringType string
+```
+
+Types like following are not supported, but may be in the future:
+
+```go
+type CustomMapType map[string]interface{}
+type CustomSliceMapType []map[string]interface{}
+```
+
+### Errors
+This package also implements support for JSON API compatible `errors` payloads using the following types.
+
+#### `MarshalErrors`
+```go
+MarshalErrors(w io.Writer, errs []*ErrorObject) error
+```
+
+Writes a JSON API response using the given `[]error`.
+
+#### `ErrorsPayload`
+```go
+type ErrorsPayload struct {
+ Errors []*ErrorObject `json:"errors"`
+}
+```
+
+ErrorsPayload is a serializer struct for representing a valid JSON API errors payload.
+
+#### `ErrorObject`
+```go
+type ErrorObject struct { ... }
+
+// Error implements the `Error` interface.
+func (e *ErrorObject) Error() string {
+ return fmt.Sprintf("Error: %s %s\n", e.Title, e.Detail)
+}
+```
+
+ErrorObject is an `Error` implementation as well as an implementation of the JSON API error object.
+
+The main idea behind this struct is that you can use it directly in your code as an error type and pass it directly to `MarshalErrors` to get a valid JSON API errors payload.
+
+##### Errors Example Code
+```go
+// An error has come up in your code, so set an appropriate status, and serialize the error.
+if err := validate(&myStructToValidate); err != nil {
+ context.SetStatusCode(http.StatusBadRequest) // Or however you need to set a status.
+ jsonapi.MarshalErrors(w, []*ErrorObject{{
+ Title: "Validation Error",
+ Detail: "Given request body was invalid.",
+ Status: "400",
+ Meta: map[string]interface{}{"field": "some_field", "error": "bad type", "expected": "string", "received": "float64"},
+ }})
+ return
+}
+```
+
+## Testing
+
+### `MarshalOnePayloadEmbedded`
+
+```go
+MarshalOnePayloadEmbedded(w io.Writer, model interface{}) error
+```
+
+Visit [godoc](http://godoc.org/github.com/google/jsonapi#MarshalOnePayloadEmbedded)
+
+This method is not strictly meant to for use in implementation code,
+although feel free. It was mainly created for use in tests; in most cases,
+your request payloads for create will be embedded rather than sideloaded
+for related records. This method will serialize a single struct pointer
+into an embedded json response. In other words, there will be no,
+`included`, array in the json; all relationships will be serialized
+inline with the data.
+
+However, in tests, you may want to construct payloads to post to create
+methods that are embedded to most closely model the payloads that will
+be produced by the client. This method aims to enable that.
+
+### Example
+
+```go
+out := bytes.NewBuffer(nil)
+
+// testModel returns a pointer to a Blog
+jsonapi.MarshalOnePayloadEmbedded(out, testModel())
+
+h := new(BlogsHandler)
+
+w := httptest.NewRecorder()
+r, _ := http.NewRequest(http.MethodPost, "/blogs", out)
+
+h.CreateBlog(w, r)
+
+blog := new(Blog)
+jsonapi.UnmarshalPayload(w.Body, blog)
+
+// ... assert stuff about blog here ...
+```
+
+## Alternative Installation
+I use git subtrees to manage dependencies rather than `go get` so that
+the src is committed to my repo.
+
+```
+git subtree add --squash --prefix=src/github.com/google/jsonapi https://github.com/google/jsonapi.git master
+```
+
+To update,
+
+```
+git subtree pull --squash --prefix=src/github.com/google/jsonapi https://github.com/google/jsonapi.git master
+```
+
+This assumes that I have my repo structured with a `src` dir containing
+a collection of packages and `GOPATH` is set to the root
+folder--containing `src`.
+
+## Contributing
+
+Fork, Change, Pull Request *with tests*.
diff --git a/vendor/github.com/google/jsonapi/constants.go b/vendor/github.com/google/jsonapi/constants.go
new file mode 100644
index 0000000..35bbe05
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/constants.go
@@ -0,0 +1,56 @@
+package jsonapi
+
+const (
+ // StructTag annotation strings
+ annotationJSONAPI = "jsonapi"
+ annotationPrimary = "primary"
+ annotationClientID = "client-id"
+ annotationAttribute = "attr"
+ annotationRelation = "relation"
+ annotationOmitEmpty = "omitempty"
+ annotationISO8601 = "iso8601"
+ annotationRFC3339 = "rfc3339"
+ annotationSeperator = ","
+
+ iso8601TimeFormat = "2006-01-02T15:04:05Z"
+
+ // MediaType is the identifier for the JSON API media type
+ //
+ // see http://jsonapi.org/format/#document-structure
+ MediaType = "application/vnd.api+json"
+
+ // Pagination Constants
+ //
+ // http://jsonapi.org/format/#fetching-pagination
+
+ // KeyFirstPage is the key to the links object whose value contains a link to
+ // the first page of data
+ KeyFirstPage = "first"
+ // KeyLastPage is the key to the links object whose value contains a link to
+ // the last page of data
+ KeyLastPage = "last"
+ // KeyPreviousPage is the key to the links object whose value contains a link
+ // to the previous page of data
+ KeyPreviousPage = "prev"
+ // KeyNextPage is the key to the links object whose value contains a link to
+ // the next page of data
+ KeyNextPage = "next"
+
+ // QueryParamPageNumber is a JSON API query parameter used in a page based
+ // pagination strategy in conjunction with QueryParamPageSize
+ QueryParamPageNumber = "page[number]"
+ // QueryParamPageSize is a JSON API query parameter used in a page based
+ // pagination strategy in conjunction with QueryParamPageNumber
+ QueryParamPageSize = "page[size]"
+
+ // QueryParamPageOffset is a JSON API query parameter used in an offset based
+ // pagination strategy in conjunction with QueryParamPageLimit
+ QueryParamPageOffset = "page[offset]"
+ // QueryParamPageLimit is a JSON API query parameter used in an offset based
+ // pagination strategy in conjunction with QueryParamPageOffset
+ QueryParamPageLimit = "page[limit]"
+
+ // QueryParamPageCursor is a JSON API query parameter used with a cursor-based
+ // strategy
+ QueryParamPageCursor = "page[cursor]"
+)
diff --git a/vendor/github.com/google/jsonapi/doc.go b/vendor/github.com/google/jsonapi/doc.go
new file mode 100644
index 0000000..ba4068a
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/doc.go
@@ -0,0 +1,70 @@
+/*
+Package jsonapi provides a serializer and deserializer for jsonapi.org spec payloads.
+
+You can keep your model structs as is and use struct field tags to indicate to jsonapi
+how you want your response built or your request deserialized. What about my relationships?
+jsonapi supports relationships out of the box and will even side load them in your response
+into an "included" array--that contains associated objects.
+
+jsonapi uses StructField tags to annotate the structs fields that you already have and use
+in your app and then reads and writes jsonapi.org output based on the instructions you give
+the library in your jsonapi tags.
+
+Example structs using a Blog > Post > Comment structure,
+
+ type Blog struct {
+ ID int `jsonapi:"primary,blogs"`
+ Title string `jsonapi:"attr,title"`
+ Posts []*Post `jsonapi:"relation,posts"`
+ CurrentPost *Post `jsonapi:"relation,current_post"`
+ CurrentPostID int `jsonapi:"attr,current_post_id"`
+ CreatedAt time.Time `jsonapi:"attr,created_at"`
+ ViewCount int `jsonapi:"attr,view_count"`
+ }
+
+ type Post struct {
+ ID int `jsonapi:"primary,posts"`
+ BlogID int `jsonapi:"attr,blog_id"`
+ Title string `jsonapi:"attr,title"`
+ Body string `jsonapi:"attr,body"`
+ Comments []*Comment `jsonapi:"relation,comments"`
+ }
+
+ type Comment struct {
+ ID int `jsonapi:"primary,comments"`
+ PostID int `jsonapi:"attr,post_id"`
+ Body string `jsonapi:"attr,body"`
+ }
+
+jsonapi Tag Reference
+
+Value, primary: "primary,<type field output>"
+
+This indicates that this is the primary key field for this struct type. Tag
+value arguments are comma separated. The first argument must be, "primary", and
+the second must be the name that should appear in the "type" field for all data
+objects that represent this type of model.
+
+Value, attr: "attr,<key name in attributes hash>[,<extra arguments>]"
+
+These fields' values should end up in the "attribute" hash for a record. The first
+argument must be, "attr', and the second should be the name for the key to display in
+the "attributes" hash for that record.
+
+The following extra arguments are also supported:
+
+"omitempty": excludes the fields value from the "attribute" hash.
+"iso8601": uses the ISO8601 timestamp format when serialising or deserialising the time.Time value.
+
+Value, relation: "relation,<key name in relationships hash>"
+
+Relations are struct fields that represent a one-to-one or one-to-many to other structs.
+jsonapi will traverse the graph of relationships and marshal or unmarshal records. The first
+argument must be, "relation", and the second should be the name of the relationship, used as
+the key in the "relationships" hash for the record.
+
+Use the methods below to Marshal and Unmarshal jsonapi.org json payloads.
+
+Visit the readme at https://github.com/google/jsonapi
+*/
+package jsonapi
diff --git a/vendor/github.com/google/jsonapi/errors.go b/vendor/github.com/google/jsonapi/errors.go
new file mode 100644
index 0000000..798fed0
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/errors.go
@@ -0,0 +1,52 @@
+package jsonapi
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// MarshalErrors writes a JSON API response using the given `[]error`.
+//
+// For more information on JSON API error payloads, see the spec here:
+// http://jsonapi.org/format/#document-top-level
+// and here: http://jsonapi.org/format/#error-objects.
+func MarshalErrors(w io.Writer, errorObjects []*ErrorObject) error {
+ return json.NewEncoder(w).Encode(&ErrorsPayload{Errors: errorObjects})
+}
+
+// ErrorsPayload is a serializer struct for representing a valid JSON API errors payload.
+type ErrorsPayload struct {
+ Errors []*ErrorObject `json:"errors"`
+}
+
+// ErrorObject is an `Error` implementation as well as an implementation of the JSON API error object.
+//
+// The main idea behind this struct is that you can use it directly in your code as an error type
+// and pass it directly to `MarshalErrors` to get a valid JSON API errors payload.
+// For more information on Golang errors, see: https://golang.org/pkg/errors/
+// For more information on the JSON API spec's error objects, see: http://jsonapi.org/format/#error-objects
+type ErrorObject struct {
+ // ID is a unique identifier for this particular occurrence of a problem.
+ ID string `json:"id,omitempty"`
+
+ // Title is a short, human-readable summary of the problem that SHOULD NOT change from occurrence to occurrence of the problem, except for purposes of localization.
+ Title string `json:"title,omitempty"`
+
+ // Detail is a human-readable explanation specific to this occurrence of the problem. Like title, this field’s value can be localized.
+ Detail string `json:"detail,omitempty"`
+
+ // Status is the HTTP status code applicable to this problem, expressed as a string value.
+ Status string `json:"status,omitempty"`
+
+ // Code is an application-specific error code, expressed as a string value.
+ Code string `json:"code,omitempty"`
+
+ // Meta is an object containing non-standard meta-information about the error.
+ Meta *map[string]interface{} `json:"meta,omitempty"`
+}
+
+// Error implements the `Error` interface.
+func (e *ErrorObject) Error() string {
+ return fmt.Sprintf("Error: %s %s\n", e.Title, e.Detail)
+}
diff --git a/vendor/github.com/google/jsonapi/node.go b/vendor/github.com/google/jsonapi/node.go
new file mode 100644
index 0000000..a58488c
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/node.go
@@ -0,0 +1,121 @@
+package jsonapi
+
+import "fmt"
+
+// Payloader is used to encapsulate the One and Many payload types
+type Payloader interface {
+ clearIncluded()
+}
+
+// OnePayload is used to represent a generic JSON API payload where a single
+// resource (Node) was included as an {} in the "data" key
+type OnePayload struct {
+ Data *Node `json:"data"`
+ Included []*Node `json:"included,omitempty"`
+ Links *Links `json:"links,omitempty"`
+ Meta *Meta `json:"meta,omitempty"`
+}
+
+func (p *OnePayload) clearIncluded() {
+ p.Included = []*Node{}
+}
+
+// ManyPayload is used to represent a generic JSON API payload where many
+// resources (Nodes) were included in an [] in the "data" key
+type ManyPayload struct {
+ Data []*Node `json:"data"`
+ Included []*Node `json:"included,omitempty"`
+ Links *Links `json:"links,omitempty"`
+ Meta *Meta `json:"meta,omitempty"`
+}
+
+func (p *ManyPayload) clearIncluded() {
+ p.Included = []*Node{}
+}
+
+// Node is used to represent a generic JSON API Resource
+type Node struct {
+ Type string `json:"type"`
+ ID string `json:"id,omitempty"`
+ ClientID string `json:"client-id,omitempty"`
+ Attributes map[string]interface{} `json:"attributes,omitempty"`
+ Relationships map[string]interface{} `json:"relationships,omitempty"`
+ Links *Links `json:"links,omitempty"`
+ Meta *Meta `json:"meta,omitempty"`
+}
+
+// RelationshipOneNode is used to represent a generic has one JSON API relation
+type RelationshipOneNode struct {
+ Data *Node `json:"data"`
+ Links *Links `json:"links,omitempty"`
+ Meta *Meta `json:"meta,omitempty"`
+}
+
+// RelationshipManyNode is used to represent a generic has many JSON API
+// relation
+type RelationshipManyNode struct {
+ Data []*Node `json:"data"`
+ Links *Links `json:"links,omitempty"`
+ Meta *Meta `json:"meta,omitempty"`
+}
+
+// Links is used to represent a `links` object.
+// http://jsonapi.org/format/#document-links
+type Links map[string]interface{}
+
+func (l *Links) validate() (err error) {
+ // Each member of a links object is a “link”. A link MUST be represented as
+ // either:
+ // - a string containing the link’s URL.
+ // - an object (“link object”) which can contain the following members:
+ // - href: a string containing the link’s URL.
+ // - meta: a meta object containing non-standard meta-information about the
+ // link.
+ for k, v := range *l {
+ _, isString := v.(string)
+ _, isLink := v.(Link)
+
+ if !(isString || isLink) {
+ return fmt.Errorf(
+ "The %s member of the links object was not a string or link object",
+ k,
+ )
+ }
+ }
+ return
+}
+
+// Link is used to represent a member of the `links` object.
+type Link struct {
+ Href string `json:"href"`
+ Meta Meta `json:"meta,omitempty"`
+}
+
+// Linkable is used to include document links in response data
+// e.g. {"self": "http://example.com/posts/1"}
+type Linkable interface {
+ JSONAPILinks() *Links
+}
+
+// RelationshipLinkable is used to include relationship links in response data
+// e.g. {"related": "http://example.com/posts/1/comments"}
+type RelationshipLinkable interface {
+ // JSONAPIRelationshipLinks will be invoked for each relationship with the corresponding relation name (e.g. `comments`)
+ JSONAPIRelationshipLinks(relation string) *Links
+}
+
+// Meta is used to represent a `meta` object.
+// http://jsonapi.org/format/#document-meta
+type Meta map[string]interface{}
+
+// Metable is used to include document meta in response data
+// e.g. {"foo": "bar"}
+type Metable interface {
+ JSONAPIMeta() *Meta
+}
+
+// RelationshipMetable is used to include relationship meta in response data
+type RelationshipMetable interface {
+ // JSONRelationshipMeta will be invoked for each relationship with the corresponding relation name (e.g. `comments`)
+ JSONAPIRelationshipMeta(relation string) *Meta
+}
diff --git a/vendor/github.com/google/jsonapi/request.go b/vendor/github.com/google/jsonapi/request.go
new file mode 100644
index 0000000..f665857
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/request.go
@@ -0,0 +1,656 @@
+package jsonapi
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ unsupportedStructTagMsg = "Unsupported jsonapi tag annotation, %s"
+)
+
+var (
+ // ErrInvalidTime is returned when a struct has a time.Time type field, but
+ // the JSON value was not a unix timestamp integer.
+ ErrInvalidTime = errors.New("Only numbers can be parsed as dates, unix timestamps")
+ // ErrInvalidISO8601 is returned when a struct has a time.Time type field and includes
+ // "iso8601" in the tag spec, but the JSON value was not an ISO8601 timestamp string.
+ ErrInvalidISO8601 = errors.New("Only strings can be parsed as dates, ISO8601 timestamps")
+ // ErrInvalidRFC3339 is returned when a struct has a time.Time type field and includes
+ // "rfc3339" in the tag spec, but the JSON value was not an RFC3339 timestamp string.
+ ErrInvalidRFC3339 = errors.New("Only strings can be parsed as dates, RFC3339 timestamps")
+ // ErrUnknownFieldNumberType is returned when the JSON value was a float
+ // (numeric) but the Struct field was a non numeric type (i.e. not int, uint,
+ // float, etc)
+ ErrUnknownFieldNumberType = errors.New("The struct field was not of a known number type")
+ // ErrInvalidType is returned when the given type is incompatible with the expected type.
+ ErrInvalidType = errors.New("Invalid type provided") // I wish we used punctuation.
+
+)
+
+// ErrUnsupportedPtrType is returned when the Struct field was a pointer but
+// the JSON value was of a different type
+type ErrUnsupportedPtrType struct {
+ rf reflect.Value
+ t reflect.Type
+ structField reflect.StructField
+}
+
+func (eupt ErrUnsupportedPtrType) Error() string {
+ typeName := eupt.t.Elem().Name()
+ kind := eupt.t.Elem().Kind()
+ if kind.String() != "" && kind.String() != typeName {
+ typeName = fmt.Sprintf("%s (%s)", typeName, kind.String())
+ }
+ return fmt.Sprintf(
+ "jsonapi: Can't unmarshal %+v (%s) to struct field `%s`, which is a pointer to `%s`",
+ eupt.rf, eupt.rf.Type().Kind(), eupt.structField.Name, typeName,
+ )
+}
+
+func newErrUnsupportedPtrType(rf reflect.Value, t reflect.Type, structField reflect.StructField) error {
+ return ErrUnsupportedPtrType{rf, t, structField}
+}
+
+// UnmarshalPayload converts an io into a struct instance using jsonapi tags on
+// struct fields. This method supports single request payloads only, at the
+// moment. Bulk creates and updates are not supported yet.
+//
+// Will Unmarshal embedded and sideloaded payloads. The latter is only possible if the
+// object graph is complete. That is, in the "relationships" data there are type and id,
+// keys that correspond to records in the "included" array.
+//
+// For example you could pass it, in, req.Body and, model, a BlogPost
+// struct instance to populate in an http handler,
+//
+// func CreateBlog(w http.ResponseWriter, r *http.Request) {
+// blog := new(Blog)
+//
+// if err := jsonapi.UnmarshalPayload(r.Body, blog); err != nil {
+// http.Error(w, err.Error(), 500)
+// return
+// }
+//
+// // ...do stuff with your blog...
+//
+// w.Header().Set("Content-Type", jsonapi.MediaType)
+// w.WriteHeader(201)
+//
+// if err := jsonapi.MarshalPayload(w, blog); err != nil {
+// http.Error(w, err.Error(), 500)
+// }
+// }
+//
+//
+// Visit https://github.com/google/jsonapi#create for more info.
+//
+// model interface{} should be a pointer to a struct.
+func UnmarshalPayload(in io.Reader, model interface{}) error {
+ payload := new(OnePayload)
+
+ if err := json.NewDecoder(in).Decode(payload); err != nil {
+ return err
+ }
+
+ if payload.Included != nil {
+ includedMap := make(map[string]*Node)
+ for _, included := range payload.Included {
+ key := fmt.Sprintf("%s,%s", included.Type, included.ID)
+ includedMap[key] = included
+ }
+
+ return unmarshalNode(payload.Data, reflect.ValueOf(model), &includedMap)
+ }
+ return unmarshalNode(payload.Data, reflect.ValueOf(model), nil)
+}
+
+// UnmarshalManyPayload converts an io into a set of struct instances using
+// jsonapi tags on the type's struct fields.
+func UnmarshalManyPayload(in io.Reader, t reflect.Type) ([]interface{}, error) {
+ payload := new(ManyPayload)
+
+ if err := json.NewDecoder(in).Decode(payload); err != nil {
+ return nil, err
+ }
+
+ models := []interface{}{} // will be populated from the "data"
+ includedMap := map[string]*Node{} // will be populate from the "included"
+
+ if payload.Included != nil {
+ for _, included := range payload.Included {
+ key := fmt.Sprintf("%s,%s", included.Type, included.ID)
+ includedMap[key] = included
+ }
+ }
+
+ for _, data := range payload.Data {
+ model := reflect.New(t.Elem())
+ err := unmarshalNode(data, model, &includedMap)
+ if err != nil {
+ return nil, err
+ }
+ models = append(models, model.Interface())
+ }
+
+ return models, nil
+}
+
+func unmarshalNode(data *Node, model reflect.Value, included *map[string]*Node) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("data is not a jsonapi representation of '%v'", model.Type())
+ }
+ }()
+
+ modelValue := model.Elem()
+ modelType := modelValue.Type()
+
+ var er error
+
+ for i := 0; i < modelValue.NumField(); i++ {
+ fieldType := modelType.Field(i)
+ tag := fieldType.Tag.Get("jsonapi")
+ if tag == "" {
+ continue
+ }
+
+ fieldValue := modelValue.Field(i)
+
+ args := strings.Split(tag, ",")
+ if len(args) < 1 {
+ er = ErrBadJSONAPIStructTag
+ break
+ }
+
+ annotation := args[0]
+
+ if (annotation == annotationClientID && len(args) != 1) ||
+ (annotation != annotationClientID && len(args) < 2) {
+ er = ErrBadJSONAPIStructTag
+ break
+ }
+
+ if annotation == annotationPrimary {
+ // Check the JSON API Type
+ if data.Type != args[1] {
+ er = fmt.Errorf(
+ "Trying to Unmarshal an object of type %#v, but %#v does not match",
+ data.Type,
+ args[1],
+ )
+ break
+ }
+
+ if data.ID == "" {
+ continue
+ }
+
+ // ID will have to be transmitted as astring per the JSON API spec
+ v := reflect.ValueOf(data.ID)
+
+ // Deal with PTRS
+ var kind reflect.Kind
+ if fieldValue.Kind() == reflect.Ptr {
+ kind = fieldType.Type.Elem().Kind()
+ } else {
+ kind = fieldType.Type.Kind()
+ }
+
+ // Handle String case
+ if kind == reflect.String {
+ assign(fieldValue, v)
+ continue
+ }
+
+ // Value was not a string... only other supported type was a numeric,
+ // which would have been sent as a float value.
+ floatValue, err := strconv.ParseFloat(data.ID, 64)
+ if err != nil {
+ // Could not convert the value in the "id" attr to a float
+ er = ErrBadJSONAPIID
+ break
+ }
+
+ // Convert the numeric float to one of the supported ID numeric types
+ // (int[8,16,32,64] or uint[8,16,32,64])
+ idValue, err := handleNumeric(floatValue, fieldType.Type, fieldValue)
+ if err != nil {
+ // We had a JSON float (numeric), but our field was not one of the
+ // allowed numeric types
+ er = ErrBadJSONAPIID
+ break
+ }
+
+ assign(fieldValue, idValue)
+ } else if annotation == annotationClientID {
+ if data.ClientID == "" {
+ continue
+ }
+
+ fieldValue.Set(reflect.ValueOf(data.ClientID))
+ } else if annotation == annotationAttribute {
+ attributes := data.Attributes
+
+ if attributes == nil || len(data.Attributes) == 0 {
+ continue
+ }
+
+ attribute := attributes[args[1]]
+
+ // continue if the attribute was not included in the request
+ if attribute == nil {
+ continue
+ }
+
+ structField := fieldType
+ value, err := unmarshalAttribute(attribute, args, structField, fieldValue)
+ if err != nil {
+ er = err
+ break
+ }
+
+ assign(fieldValue, value)
+ } else if annotation == annotationRelation {
+ isSlice := fieldValue.Type().Kind() == reflect.Slice
+
+ if data.Relationships == nil || data.Relationships[args[1]] == nil {
+ continue
+ }
+
+ if isSlice {
+ // to-many relationship
+ relationship := new(RelationshipManyNode)
+
+ buf := bytes.NewBuffer(nil)
+
+ json.NewEncoder(buf).Encode(data.Relationships[args[1]])
+ json.NewDecoder(buf).Decode(relationship)
+
+ data := relationship.Data
+ models := reflect.New(fieldValue.Type()).Elem()
+
+ for _, n := range data {
+ m := reflect.New(fieldValue.Type().Elem().Elem())
+
+ if err := unmarshalNode(
+ fullNode(n, included),
+ m,
+ included,
+ ); err != nil {
+ er = err
+ break
+ }
+
+ models = reflect.Append(models, m)
+ }
+
+ fieldValue.Set(models)
+ } else {
+ // to-one relationships
+ relationship := new(RelationshipOneNode)
+
+ buf := bytes.NewBuffer(nil)
+
+ json.NewEncoder(buf).Encode(
+ data.Relationships[args[1]],
+ )
+ json.NewDecoder(buf).Decode(relationship)
+
+ /*
+ http://jsonapi.org/format/#document-resource-object-relationships
+ http://jsonapi.org/format/#document-resource-object-linkage
+ relationship can have a data node set to null (e.g. to disassociate the relationship)
+ so unmarshal and set fieldValue only if data obj is not null
+ */
+ if relationship.Data == nil {
+ continue
+ }
+
+ m := reflect.New(fieldValue.Type().Elem())
+ if err := unmarshalNode(
+ fullNode(relationship.Data, included),
+ m,
+ included,
+ ); err != nil {
+ er = err
+ break
+ }
+
+ fieldValue.Set(m)
+
+ }
+
+ } else {
+ er = fmt.Errorf(unsupportedStructTagMsg, annotation)
+ }
+ }
+
+ return er
+}
+
+func fullNode(n *Node, included *map[string]*Node) *Node {
+ includedKey := fmt.Sprintf("%s,%s", n.Type, n.ID)
+
+ if included != nil && (*included)[includedKey] != nil {
+ return (*included)[includedKey]
+ }
+
+ return n
+}
+
+// assign will take the value specified and assign it to the field; if
+// field is expecting a ptr assign will assign a ptr.
+func assign(field, value reflect.Value) {
+ value = reflect.Indirect(value)
+
+ if field.Kind() == reflect.Ptr {
+ // initialize pointer so it's value
+ // can be set by assignValue
+ field.Set(reflect.New(field.Type().Elem()))
+ field = field.Elem()
+
+ }
+
+ assignValue(field, value)
+}
+
+// assign assigns the specified value to the field,
+// expecting both values not to be pointer types.
+func assignValue(field, value reflect.Value) {
+ switch field.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16,
+ reflect.Int32, reflect.Int64:
+ field.SetInt(value.Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16,
+ reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ field.SetUint(value.Uint())
+ case reflect.Float32, reflect.Float64:
+ field.SetFloat(value.Float())
+ case reflect.String:
+ field.SetString(value.String())
+ case reflect.Bool:
+ field.SetBool(value.Bool())
+ default:
+ field.Set(value)
+ }
+}
+
+func unmarshalAttribute(
+ attribute interface{},
+ args []string,
+ structField reflect.StructField,
+ fieldValue reflect.Value) (value reflect.Value, err error) {
+ value = reflect.ValueOf(attribute)
+ fieldType := structField.Type
+
+ // Handle field of type []string
+ if fieldValue.Type() == reflect.TypeOf([]string{}) {
+ value, err = handleStringSlice(attribute)
+ return
+ }
+
+ // Handle field of type time.Time
+ if fieldValue.Type() == reflect.TypeOf(time.Time{}) ||
+ fieldValue.Type() == reflect.TypeOf(new(time.Time)) {
+ value, err = handleTime(attribute, args, fieldValue)
+ return
+ }
+
+ // Handle field of type struct
+ if fieldValue.Type().Kind() == reflect.Struct {
+ value, err = handleStruct(attribute, fieldValue)
+ return
+ }
+
+ // Handle field containing slice of structs
+ if fieldValue.Type().Kind() == reflect.Slice &&
+ reflect.TypeOf(fieldValue.Interface()).Elem().Kind() == reflect.Struct {
+ value, err = handleStructSlice(attribute, fieldValue)
+ return
+ }
+
+ // JSON value was a float (numeric)
+ if value.Kind() == reflect.Float64 {
+ value, err = handleNumeric(attribute, fieldType, fieldValue)
+ return
+ }
+
+ // Field was a Pointer type
+ if fieldValue.Kind() == reflect.Ptr {
+ value, err = handlePointer(attribute, args, fieldType, fieldValue, structField)
+ return
+ }
+
+ // As a final catch-all, ensure types line up to avoid a runtime panic.
+ if fieldValue.Kind() != value.Kind() {
+ err = ErrInvalidType
+ return
+ }
+
+ return
+}
+
+func handleStringSlice(attribute interface{}) (reflect.Value, error) {
+ v := reflect.ValueOf(attribute)
+ values := make([]string, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ values[i] = v.Index(i).Interface().(string)
+ }
+
+ return reflect.ValueOf(values), nil
+}
+
+func handleTime(attribute interface{}, args []string, fieldValue reflect.Value) (reflect.Value, error) {
+ var isISO8601, isRFC3339 bool
+ v := reflect.ValueOf(attribute)
+
+ if len(args) > 2 {
+ for _, arg := range args[2:] {
+ if arg == annotationISO8601 {
+ isISO8601 = true
+ } else if arg == annotationRFC3339 {
+ isRFC3339 = true
+ }
+ }
+ }
+
+ if isISO8601 {
+ if v.Kind() != reflect.String {
+ return reflect.ValueOf(time.Now()), ErrInvalidISO8601
+ }
+
+ t, err := time.Parse(iso8601TimeFormat, v.Interface().(string))
+ if err != nil {
+ return reflect.ValueOf(time.Now()), ErrInvalidISO8601
+ }
+
+ if fieldValue.Kind() == reflect.Ptr {
+ return reflect.ValueOf(&t), nil
+ }
+
+ return reflect.ValueOf(t), nil
+ }
+
+ if isRFC3339 {
+ if v.Kind() != reflect.String {
+ return reflect.ValueOf(time.Now()), ErrInvalidRFC3339
+ }
+
+ t, err := time.Parse(time.RFC3339, v.Interface().(string))
+ if err != nil {
+ return reflect.ValueOf(time.Now()), ErrInvalidRFC3339
+ }
+
+ if fieldValue.Kind() == reflect.Ptr {
+ return reflect.ValueOf(&t), nil
+ }
+
+ return reflect.ValueOf(t), nil
+ }
+
+ var at int64
+
+ if v.Kind() == reflect.Float64 {
+ at = int64(v.Interface().(float64))
+ } else if v.Kind() == reflect.Int {
+ at = v.Int()
+ } else {
+ return reflect.ValueOf(time.Now()), ErrInvalidTime
+ }
+
+ t := time.Unix(at, 0)
+
+ return reflect.ValueOf(t), nil
+}
+
+func handleNumeric(
+ attribute interface{},
+ fieldType reflect.Type,
+ fieldValue reflect.Value) (reflect.Value, error) {
+ v := reflect.ValueOf(attribute)
+ floatValue := v.Interface().(float64)
+
+ var kind reflect.Kind
+ if fieldValue.Kind() == reflect.Ptr {
+ kind = fieldType.Elem().Kind()
+ } else {
+ kind = fieldType.Kind()
+ }
+
+ var numericValue reflect.Value
+
+ switch kind {
+ case reflect.Int:
+ n := int(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Int8:
+ n := int8(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Int16:
+ n := int16(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Int32:
+ n := int32(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Int64:
+ n := int64(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Uint:
+ n := uint(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Uint8:
+ n := uint8(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Uint16:
+ n := uint16(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Uint32:
+ n := uint32(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Uint64:
+ n := uint64(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Float32:
+ n := float32(floatValue)
+ numericValue = reflect.ValueOf(&n)
+ case reflect.Float64:
+ n := floatValue
+ numericValue = reflect.ValueOf(&n)
+ default:
+ return reflect.Value{}, ErrUnknownFieldNumberType
+ }
+
+ return numericValue, nil
+}
+
+func handlePointer(
+ attribute interface{},
+ args []string,
+ fieldType reflect.Type,
+ fieldValue reflect.Value,
+ structField reflect.StructField) (reflect.Value, error) {
+ t := fieldValue.Type()
+ var concreteVal reflect.Value
+
+ switch cVal := attribute.(type) {
+ case string:
+ concreteVal = reflect.ValueOf(&cVal)
+ case bool:
+ concreteVal = reflect.ValueOf(&cVal)
+ case complex64, complex128, uintptr:
+ concreteVal = reflect.ValueOf(&cVal)
+ case map[string]interface{}:
+ var err error
+ concreteVal, err = handleStruct(attribute, fieldValue)
+ if err != nil {
+ return reflect.Value{}, newErrUnsupportedPtrType(
+ reflect.ValueOf(attribute), fieldType, structField)
+ }
+ return concreteVal, err
+ default:
+ return reflect.Value{}, newErrUnsupportedPtrType(
+ reflect.ValueOf(attribute), fieldType, structField)
+ }
+
+ if t != concreteVal.Type() {
+ return reflect.Value{}, newErrUnsupportedPtrType(
+ reflect.ValueOf(attribute), fieldType, structField)
+ }
+
+ return concreteVal, nil
+}
+
+func handleStruct(
+ attribute interface{},
+ fieldValue reflect.Value) (reflect.Value, error) {
+
+ data, err := json.Marshal(attribute)
+ if err != nil {
+ return reflect.Value{}, err
+ }
+
+ node := new(Node)
+ if err := json.Unmarshal(data, &node.Attributes); err != nil {
+ return reflect.Value{}, err
+ }
+
+ var model reflect.Value
+ if fieldValue.Kind() == reflect.Ptr {
+ model = reflect.New(fieldValue.Type().Elem())
+ } else {
+ model = reflect.New(fieldValue.Type())
+ }
+
+ if err := unmarshalNode(node, model, nil); err != nil {
+ return reflect.Value{}, err
+ }
+
+ return model, nil
+}
+
+func handleStructSlice(
+ attribute interface{},
+ fieldValue reflect.Value) (reflect.Value, error) {
+ models := reflect.New(fieldValue.Type()).Elem()
+ dataMap := reflect.ValueOf(attribute).Interface().([]interface{})
+ for _, data := range dataMap {
+ model := reflect.New(fieldValue.Type().Elem()).Elem()
+
+ value, err := handleStruct(data, model)
+
+ if err != nil {
+ continue
+ }
+
+ models = reflect.Append(models, reflect.Indirect(value))
+ }
+
+ return models, nil
+}
diff --git a/vendor/github.com/google/jsonapi/response.go b/vendor/github.com/google/jsonapi/response.go
new file mode 100644
index 0000000..b44e4e9
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/response.go
@@ -0,0 +1,538 @@
+package jsonapi
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // ErrBadJSONAPIStructTag is returned when the Struct field's JSON API
+ // annotation is invalid.
+ ErrBadJSONAPIStructTag = errors.New("Bad jsonapi struct tag format")
+ // ErrBadJSONAPIID is returned when the Struct JSON API annotated "id" field
+ // was not a valid numeric type.
+ ErrBadJSONAPIID = errors.New(
+ "id should be either string, int(8,16,32,64) or uint(8,16,32,64)")
+ // ErrExpectedSlice is returned when a variable or argument was expected to
+ // be a slice of *Structs; MarshalMany will return this error when its
+ // interface{} argument is invalid.
+ ErrExpectedSlice = errors.New("models should be a slice of struct pointers")
+ // ErrUnexpectedType is returned when marshalling an interface; the interface
+ // had to be a pointer or a slice; otherwise this error is returned.
+ ErrUnexpectedType = errors.New("models should be a struct pointer or slice of struct pointers")
+)
+
+// MarshalPayload writes a jsonapi response for one or many records. The
+// related records are sideloaded into the "included" array. If this method is
+// given a struct pointer as an argument it will serialize in the form
+// "data": {...}. If this method is given a slice of pointers, this method will
+// serialize in the form "data": [...]
+//
+// One Example: you could pass it, w, your http.ResponseWriter, and, models, a
+// ptr to a Blog to be written to the response body:
+//
+// func ShowBlog(w http.ResponseWriter, r *http.Request) {
+// blog := &Blog{}
+//
+// w.Header().Set("Content-Type", jsonapi.MediaType)
+// w.WriteHeader(http.StatusOK)
+//
+// if err := jsonapi.MarshalPayload(w, blog); err != nil {
+// http.Error(w, err.Error(), http.StatusInternalServerError)
+// }
+// }
+//
+// Many Example: you could pass it, w, your http.ResponseWriter, and, models, a
+// slice of Blog struct instance pointers to be written to the response body:
+//
+// func ListBlogs(w http.ResponseWriter, r *http.Request) {
+// blogs := []*Blog{}
+//
+// w.Header().Set("Content-Type", jsonapi.MediaType)
+// w.WriteHeader(http.StatusOK)
+//
+// if err := jsonapi.MarshalPayload(w, blogs); err != nil {
+// http.Error(w, err.Error(), http.StatusInternalServerError)
+// }
+// }
+//
+func MarshalPayload(w io.Writer, models interface{}) error {
+ payload, err := Marshal(models)
+ if err != nil {
+ return err
+ }
+
+ return json.NewEncoder(w).Encode(payload)
+}
+
+// Marshal does the same as MarshalPayload except it just returns the payload
+// and doesn't write out results. Useful if you use your own JSON rendering
+// library.
+func Marshal(models interface{}) (Payloader, error) {
+ switch vals := reflect.ValueOf(models); vals.Kind() {
+ case reflect.Slice:
+ m, err := convertToSliceInterface(&models)
+ if err != nil {
+ return nil, err
+ }
+
+ payload, err := marshalMany(m)
+ if err != nil {
+ return nil, err
+ }
+
+ if linkableModels, isLinkable := models.(Linkable); isLinkable {
+ jl := linkableModels.JSONAPILinks()
+ if er := jl.validate(); er != nil {
+ return nil, er
+ }
+ payload.Links = linkableModels.JSONAPILinks()
+ }
+
+ if metableModels, ok := models.(Metable); ok {
+ payload.Meta = metableModels.JSONAPIMeta()
+ }
+
+ return payload, nil
+ case reflect.Ptr:
+ // Check that the pointer was to a struct
+ if reflect.Indirect(vals).Kind() != reflect.Struct {
+ return nil, ErrUnexpectedType
+ }
+ return marshalOne(models)
+ default:
+ return nil, ErrUnexpectedType
+ }
+}
+
+// MarshalPayloadWithoutIncluded writes a jsonapi response with one or many
+// records, without the related records sideloaded into "included" array.
+// If you want to serialize the relations into the "included" array see
+// MarshalPayload.
+//
+// models interface{} should be either a struct pointer or a slice of struct
+// pointers.
+func MarshalPayloadWithoutIncluded(w io.Writer, model interface{}) error {
+ payload, err := Marshal(model)
+ if err != nil {
+ return err
+ }
+ payload.clearIncluded()
+
+ return json.NewEncoder(w).Encode(payload)
+}
+
+// marshalOne does the same as MarshalOnePayload except it just returns the
+// payload and doesn't write out results. Useful is you use your JSON rendering
+// library.
+func marshalOne(model interface{}) (*OnePayload, error) {
+ included := make(map[string]*Node)
+
+ rootNode, err := visitModelNode(model, &included, true)
+ if err != nil {
+ return nil, err
+ }
+ payload := &OnePayload{Data: rootNode}
+
+ payload.Included = nodeMapValues(&included)
+
+ return payload, nil
+}
+
+// marshalMany does the same as MarshalManyPayload except it just returns the
+// payload and doesn't write out results. Useful is you use your JSON rendering
+// library.
+func marshalMany(models []interface{}) (*ManyPayload, error) {
+ payload := &ManyPayload{
+ Data: []*Node{},
+ }
+ included := map[string]*Node{}
+
+ for _, model := range models {
+ node, err := visitModelNode(model, &included, true)
+ if err != nil {
+ return nil, err
+ }
+ payload.Data = append(payload.Data, node)
+ }
+ payload.Included = nodeMapValues(&included)
+
+ return payload, nil
+}
+
+// MarshalOnePayloadEmbedded - This method not meant to for use in
+// implementation code, although feel free. The purpose of this
+// method is for use in tests. In most cases, your request
+// payloads for create will be embedded rather than sideloaded for
+// related records. This method will serialize a single struct
+// pointer into an embedded json response. In other words, there
+// will be no, "included", array in the json all relationships will
+// be serailized inline in the data.
+//
+// However, in tests, you may want to construct payloads to post
+// to create methods that are embedded to most closely resemble
+// the payloads that will be produced by the client. This is what
+// this method is intended for.
+//
+// model interface{} should be a pointer to a struct.
+func MarshalOnePayloadEmbedded(w io.Writer, model interface{}) error {
+ rootNode, err := visitModelNode(model, nil, false)
+ if err != nil {
+ return err
+ }
+
+ payload := &OnePayload{Data: rootNode}
+
+ return json.NewEncoder(w).Encode(payload)
+}
+
+func visitModelNode(model interface{}, included *map[string]*Node,
+ sideload bool) (*Node, error) {
+ node := new(Node)
+
+ var er error
+ value := reflect.ValueOf(model)
+ if value.IsNil() {
+ return nil, nil
+ }
+
+ modelValue := value.Elem()
+ modelType := value.Type().Elem()
+
+ for i := 0; i < modelValue.NumField(); i++ {
+ structField := modelValue.Type().Field(i)
+ tag := structField.Tag.Get(annotationJSONAPI)
+ if tag == "" {
+ continue
+ }
+
+ fieldValue := modelValue.Field(i)
+ fieldType := modelType.Field(i)
+
+ args := strings.Split(tag, annotationSeperator)
+
+ if len(args) < 1 {
+ er = ErrBadJSONAPIStructTag
+ break
+ }
+
+ annotation := args[0]
+
+ if (annotation == annotationClientID && len(args) != 1) ||
+ (annotation != annotationClientID && len(args) < 2) {
+ er = ErrBadJSONAPIStructTag
+ break
+ }
+
+ if annotation == annotationPrimary {
+ v := fieldValue
+
+ // Deal with PTRS
+ var kind reflect.Kind
+ if fieldValue.Kind() == reflect.Ptr {
+ kind = fieldType.Type.Elem().Kind()
+ v = reflect.Indirect(fieldValue)
+ } else {
+ kind = fieldType.Type.Kind()
+ }
+
+ // Handle allowed types
+ switch kind {
+ case reflect.String:
+ node.ID = v.Interface().(string)
+ case reflect.Int:
+ node.ID = strconv.FormatInt(int64(v.Interface().(int)), 10)
+ case reflect.Int8:
+ node.ID = strconv.FormatInt(int64(v.Interface().(int8)), 10)
+ case reflect.Int16:
+ node.ID = strconv.FormatInt(int64(v.Interface().(int16)), 10)
+ case reflect.Int32:
+ node.ID = strconv.FormatInt(int64(v.Interface().(int32)), 10)
+ case reflect.Int64:
+ node.ID = strconv.FormatInt(v.Interface().(int64), 10)
+ case reflect.Uint:
+ node.ID = strconv.FormatUint(uint64(v.Interface().(uint)), 10)
+ case reflect.Uint8:
+ node.ID = strconv.FormatUint(uint64(v.Interface().(uint8)), 10)
+ case reflect.Uint16:
+ node.ID = strconv.FormatUint(uint64(v.Interface().(uint16)), 10)
+ case reflect.Uint32:
+ node.ID = strconv.FormatUint(uint64(v.Interface().(uint32)), 10)
+ case reflect.Uint64:
+ node.ID = strconv.FormatUint(v.Interface().(uint64), 10)
+ default:
+ // We had a JSON float (numeric), but our field was not one of the
+ // allowed numeric types
+ er = ErrBadJSONAPIID
+ }
+
+ if er != nil {
+ break
+ }
+
+ node.Type = args[1]
+ } else if annotation == annotationClientID {
+ clientID := fieldValue.String()
+ if clientID != "" {
+ node.ClientID = clientID
+ }
+ } else if annotation == annotationAttribute {
+ var omitEmpty, iso8601, rfc3339 bool
+
+ if len(args) > 2 {
+ for _, arg := range args[2:] {
+ switch arg {
+ case annotationOmitEmpty:
+ omitEmpty = true
+ case annotationISO8601:
+ iso8601 = true
+ case annotationRFC3339:
+ rfc3339 = true
+ }
+ }
+ }
+
+ if node.Attributes == nil {
+ node.Attributes = make(map[string]interface{})
+ }
+
+ if fieldValue.Type() == reflect.TypeOf(time.Time{}) {
+ t := fieldValue.Interface().(time.Time)
+
+ if t.IsZero() {
+ continue
+ }
+
+ if iso8601 {
+ node.Attributes[args[1]] = t.UTC().Format(iso8601TimeFormat)
+ } else if rfc3339 {
+ node.Attributes[args[1]] = t.UTC().Format(time.RFC3339)
+ } else {
+ node.Attributes[args[1]] = t.Unix()
+ }
+ } else if fieldValue.Type() == reflect.TypeOf(new(time.Time)) {
+ // A time pointer may be nil
+ if fieldValue.IsNil() {
+ if omitEmpty {
+ continue
+ }
+
+ node.Attributes[args[1]] = nil
+ } else {
+ tm := fieldValue.Interface().(*time.Time)
+
+ if tm.IsZero() && omitEmpty {
+ continue
+ }
+
+ if iso8601 {
+ node.Attributes[args[1]] = tm.UTC().Format(iso8601TimeFormat)
+ } else if rfc3339 {
+ node.Attributes[args[1]] = tm.UTC().Format(time.RFC3339)
+ } else {
+ node.Attributes[args[1]] = tm.Unix()
+ }
+ }
+ } else {
+ // Dealing with a fieldValue that is not a time
+ emptyValue := reflect.Zero(fieldValue.Type())
+
+ // See if we need to omit this field
+ if omitEmpty && reflect.DeepEqual(fieldValue.Interface(), emptyValue.Interface()) {
+ continue
+ }
+
+ strAttr, ok := fieldValue.Interface().(string)
+ if ok {
+ node.Attributes[args[1]] = strAttr
+ } else {
+ node.Attributes[args[1]] = fieldValue.Interface()
+ }
+ }
+ } else if annotation == annotationRelation {
+ var omitEmpty bool
+
+ //add support for 'omitempty' struct tag for marshaling as absent
+ if len(args) > 2 {
+ omitEmpty = args[2] == annotationOmitEmpty
+ }
+
+ isSlice := fieldValue.Type().Kind() == reflect.Slice
+ if omitEmpty &&
+ (isSlice && fieldValue.Len() < 1 ||
+ (!isSlice && fieldValue.IsNil())) {
+ continue
+ }
+
+ if node.Relationships == nil {
+ node.Relationships = make(map[string]interface{})
+ }
+
+ var relLinks *Links
+ if linkableModel, ok := model.(RelationshipLinkable); ok {
+ relLinks = linkableModel.JSONAPIRelationshipLinks(args[1])
+ }
+
+ var relMeta *Meta
+ if metableModel, ok := model.(RelationshipMetable); ok {
+ relMeta = metableModel.JSONAPIRelationshipMeta(args[1])
+ }
+
+ if isSlice {
+ // to-many relationship
+ relationship, err := visitModelNodeRelationships(
+ fieldValue,
+ included,
+ sideload,
+ )
+ if err != nil {
+ er = err
+ break
+ }
+ relationship.Links = relLinks
+ relationship.Meta = relMeta
+
+ if sideload {
+ shallowNodes := []*Node{}
+ for _, n := range relationship.Data {
+ appendIncluded(included, n)
+ shallowNodes = append(shallowNodes, toShallowNode(n))
+ }
+
+ node.Relationships[args[1]] = &RelationshipManyNode{
+ Data: shallowNodes,
+ Links: relationship.Links,
+ Meta: relationship.Meta,
+ }
+ } else {
+ node.Relationships[args[1]] = relationship
+ }
+ } else {
+ // to-one relationships
+
+ // Handle null relationship case
+ if fieldValue.IsNil() {
+ node.Relationships[args[1]] = &RelationshipOneNode{Data: nil}
+ continue
+ }
+
+ relationship, err := visitModelNode(
+ fieldValue.Interface(),
+ included,
+ sideload,
+ )
+ if err != nil {
+ er = err
+ break
+ }
+
+ if sideload {
+ appendIncluded(included, relationship)
+ node.Relationships[args[1]] = &RelationshipOneNode{
+ Data: toShallowNode(relationship),
+ Links: relLinks,
+ Meta: relMeta,
+ }
+ } else {
+ node.Relationships[args[1]] = &RelationshipOneNode{
+ Data: relationship,
+ Links: relLinks,
+ Meta: relMeta,
+ }
+ }
+ }
+
+ } else {
+ er = ErrBadJSONAPIStructTag
+ break
+ }
+ }
+
+ if er != nil {
+ return nil, er
+ }
+
+ if linkableModel, isLinkable := model.(Linkable); isLinkable {
+ jl := linkableModel.JSONAPILinks()
+ if er := jl.validate(); er != nil {
+ return nil, er
+ }
+ node.Links = linkableModel.JSONAPILinks()
+ }
+
+ if metableModel, ok := model.(Metable); ok {
+ node.Meta = metableModel.JSONAPIMeta()
+ }
+
+ return node, nil
+}
+
+func toShallowNode(node *Node) *Node {
+ return &Node{
+ ID: node.ID,
+ Type: node.Type,
+ }
+}
+
+func visitModelNodeRelationships(models reflect.Value, included *map[string]*Node,
+ sideload bool) (*RelationshipManyNode, error) {
+ nodes := []*Node{}
+
+ for i := 0; i < models.Len(); i++ {
+ n := models.Index(i).Interface()
+
+ node, err := visitModelNode(n, included, sideload)
+ if err != nil {
+ return nil, err
+ }
+
+ nodes = append(nodes, node)
+ }
+
+ return &RelationshipManyNode{Data: nodes}, nil
+}
+
+func appendIncluded(m *map[string]*Node, nodes ...*Node) {
+ included := *m
+
+ for _, n := range nodes {
+ k := fmt.Sprintf("%s,%s", n.Type, n.ID)
+
+ if _, hasNode := included[k]; hasNode {
+ continue
+ }
+
+ included[k] = n
+ }
+}
+
+func nodeMapValues(m *map[string]*Node) []*Node {
+ mp := *m
+ nodes := make([]*Node, len(mp))
+
+ i := 0
+ for _, n := range mp {
+ nodes[i] = n
+ i++
+ }
+
+ return nodes
+}
+
+func convertToSliceInterface(i *interface{}) ([]interface{}, error) {
+ vals := reflect.ValueOf(*i)
+ if vals.Kind() != reflect.Slice {
+ return nil, ErrExpectedSlice
+ }
+ var response []interface{}
+ for x := 0; x < vals.Len(); x++ {
+ response = append(response, vals.Index(x).Interface())
+ }
+ return response, nil
+}
diff --git a/vendor/github.com/google/jsonapi/runtime.go b/vendor/github.com/google/jsonapi/runtime.go
new file mode 100644
index 0000000..db2d9f2
--- /dev/null
+++ b/vendor/github.com/google/jsonapi/runtime.go
@@ -0,0 +1,129 @@
+package jsonapi
+
+import (
+ "crypto/rand"
+ "fmt"
+ "io"
+ "reflect"
+ "time"
+)
+
+// Event represents a lifecycle event in the marshaling or unmarshalling
+// process.
+type Event int
+
+const (
+ // UnmarshalStart is the Event that is sent when deserialization of a payload
+ // begins.
+ UnmarshalStart Event = iota
+
+ // UnmarshalStop is the Event that is sent when deserialization of a payload
+ // ends.
+ UnmarshalStop
+
+ // MarshalStart is the Event that is sent sent when serialization of a payload
+ // begins.
+ MarshalStart
+
+ // MarshalStop is the Event that is sent sent when serialization of a payload
+ // ends.
+ MarshalStop
+)
+
+// Runtime has the same methods as jsonapi package for serialization and
+// deserialization but also has a ctx, a map[string]interface{} for storing
+// state, designed for instrumenting serialization timings.
+type Runtime struct {
+ ctx map[string]interface{}
+}
+
+// Events is the func type that provides the callback for handling event timings.
+type Events func(*Runtime, Event, string, time.Duration)
+
+// Instrumentation is a a global Events variable. This is the handler for all
+// timing events.
+var Instrumentation Events
+
+// NewRuntime creates a Runtime for use in an application.
+func NewRuntime() *Runtime { return &Runtime{make(map[string]interface{})} }
+
+// WithValue adds custom state variables to the runtime context.
+func (r *Runtime) WithValue(key string, value interface{}) *Runtime {
+ r.ctx[key] = value
+
+ return r
+}
+
+// Value returns a state variable in the runtime context.
+func (r *Runtime) Value(key string) interface{} {
+ return r.ctx[key]
+}
+
+// Instrument is deprecated.
+func (r *Runtime) Instrument(key string) *Runtime {
+ return r.WithValue("instrument", key)
+}
+
+func (r *Runtime) shouldInstrument() bool {
+ return Instrumentation != nil
+}
+
+// UnmarshalPayload has docs in request.go for UnmarshalPayload.
+func (r *Runtime) UnmarshalPayload(reader io.Reader, model interface{}) error {
+ return r.instrumentCall(UnmarshalStart, UnmarshalStop, func() error {
+ return UnmarshalPayload(reader, model)
+ })
+}
+
+// UnmarshalManyPayload has docs in request.go for UnmarshalManyPayload.
+func (r *Runtime) UnmarshalManyPayload(reader io.Reader, kind reflect.Type) (elems []interface{}, err error) {
+ r.instrumentCall(UnmarshalStart, UnmarshalStop, func() error {
+ elems, err = UnmarshalManyPayload(reader, kind)
+ return err
+ })
+
+ return
+}
+
+// MarshalPayload has docs in response.go for MarshalPayload.
+func (r *Runtime) MarshalPayload(w io.Writer, model interface{}) error {
+ return r.instrumentCall(MarshalStart, MarshalStop, func() error {
+ return MarshalPayload(w, model)
+ })
+}
+
+func (r *Runtime) instrumentCall(start Event, stop Event, c func() error) error {
+ if !r.shouldInstrument() {
+ return c()
+ }
+
+ instrumentationGUID, err := newUUID()
+ if err != nil {
+ return err
+ }
+
+ begin := time.Now()
+ Instrumentation(r, start, instrumentationGUID, time.Duration(0))
+
+ if err := c(); err != nil {
+ return err
+ }
+
+ diff := time.Duration(time.Now().UnixNano() - begin.UnixNano())
+ Instrumentation(r, stop, instrumentationGUID, diff)
+
+ return nil
+}
+
+// citation: http://play.golang.org/p/4FkNSiUDMg
+func newUUID() (string, error) {
+ uuid := make([]byte, 16)
+ if _, err := io.ReadFull(rand.Reader, uuid); err != nil {
+ return "", err
+ }
+ // variant bits; see section 4.1.1
+ uuid[8] = uuid[8]&^0xc0 | 0x80
+ // version 4 (pseudo-random); see section 4.1.3
+ uuid[6] = uuid[6]&^0xf0 | 0x40
+ return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]), nil
+}
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 0000000..7ec5ac7
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
new file mode 100644
index 0000000..a502fdc
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -0,0 +1,26 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b4bb97f
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Paul Borman <borman@google.com>
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/vendor/github.com/google/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
new file mode 100644
index 0000000..3e9a618
--- /dev/null
+++ b/vendor/github.com/google/uuid/README.md
@@ -0,0 +1,21 @@
+# uuid
+The uuid package generates and inspects UUIDs based on
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
+and DCE 1.1: Authentication and Security Services.
+
+This package is based on the github.com/pborman/uuid package (previously named
+code.google.com/p/go-uuid). It differs from these earlier packages in that
+a UUID is a 16 byte array rather than a byte slice. One loss due to this
+change is the ability to represent an invalid UUID (vs a NIL UUID).
+
+###### Install
+```sh
+go get github.com/google/uuid
+```
+
+###### Documentation
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without
+installing this package by using the GoDoc site here:
+http://pkg.go.dev/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
new file mode 100644
index 0000000..fa820b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/dce.go
@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+ Person = Domain(0)
+ Group = Domain(1)
+ Org = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group. The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+ uuid, err := NewUUID()
+ if err == nil {
+ uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+ uuid[9] = byte(domain)
+ binary.BigEndian.PutUint32(uuid[0:], id)
+ }
+ return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+// NewDCESecurity(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+ return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+// NewDCESecurity(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+ return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID. Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+ return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+ return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+ switch d {
+ case Person:
+ return "Person"
+ case Group:
+ return "Group"
+ case Org:
+ return "Org"
+ }
+ return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
new file mode 100644
index 0000000..5b8a4b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/doc.go
@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
+// maps or compared directly.
+package uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
new file mode 100644
index 0000000..dc60082
--- /dev/null
+++ b/vendor/github.com/google/uuid/hash.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "crypto/md5"
+ "crypto/sha1"
+ "hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+ NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+ NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+ Nil UUID // empty UUID, all zeros
+
+ // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+ Max = UUID{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h. The hash should be at least 16 byte in length. The
+// first 16 bytes of the hash are used to form the UUID. The version of the
+// UUID will be the lower 4 bits of version. NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+ h.Reset()
+ h.Write(space[:]) //nolint:errcheck
+ h.Write(data) //nolint:errcheck
+ s := h.Sum(nil)
+ var uuid UUID
+ copy(uuid[:], s)
+ uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+ return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data. It is the same as calling:
+//
+// NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+ return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data. It is the same as calling:
+//
+// NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+ return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
new file mode 100644
index 0000000..14bd340
--- /dev/null
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+ var js [36]byte
+ encodeHex(js[:], uuid)
+ return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+ id, err := ParseBytes(data)
+ if err != nil {
+ return err
+ }
+ *uuid = id
+ return nil
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+ return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(uuid[:], data)
+ return nil
+}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
new file mode 100644
index 0000000..d651a2b
--- /dev/null
+++ b/vendor/github.com/google/uuid/node.go
@@ -0,0 +1,90 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "sync"
+)
+
+var (
+ nodeMu sync.Mutex
+ ifname string // name of interface being used
+ nodeID [6]byte // hardware for version 1 UUIDs
+ zeroID [6]byte // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived. The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated. If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+ iname, addr := getHardwareInterface(name) // null implementation for js
+ if iname != "" && addr != nil {
+ ifname = iname
+ copy(nodeID[:], addr)
+ return true
+ }
+
+ // We found no interfaces with a valid hardware address. If name
+ // does not specify a specific interface generate a random Node ID
+ // (section 4.1.6)
+ if name == "" {
+ ifname = "random"
+ randomBits(nodeID[:])
+ return true
+ }
+ return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ nid := nodeID
+ return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
+// of id are used. If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+ if len(id) < 6 {
+ return false
+ }
+ defer nodeMu.Unlock()
+ nodeMu.Lock()
+ copy(nodeID[:], id)
+ ifname = "user"
+ return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
+// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+ var node [6]byte
+ copy(node[:], uuid[10:])
+ return node[:]
+}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 0000000..b2a0bc8
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This removes the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 0000000..0cbbcdd
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned. If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+ if interfaces == nil {
+ var err error
+ interfaces, err = net.Interfaces()
+ if err != nil {
+ return "", nil
+ }
+ }
+ for _, ifs := range interfaces {
+ if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+ return ifs.Name, ifs.HardwareAddr
+ }
+ }
+ return "", nil
+}
diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go
new file mode 100644
index 0000000..d7fcbf2
--- /dev/null
+++ b/vendor/github.com/google/uuid/null.go
@@ -0,0 +1,118 @@
+// Copyright 2021 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "database/sql/driver"
+ "encoding/json"
+ "fmt"
+)
+
+var jsonNull = []byte("null")
+
+// NullUUID represents a UUID that may be null.
+// NullUUID implements the SQL driver.Scanner interface so
+// it can be used as a scan destination:
+//
+// var u uuid.NullUUID
+// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
+// ...
+// if u.Valid {
+// // use u.UUID
+// } else {
+// // NULL value
+// }
+//
+type NullUUID struct {
+ UUID UUID
+ Valid bool // Valid is true if UUID is not NULL
+}
+
+// Scan implements the SQL driver.Scanner interface.
+func (nu *NullUUID) Scan(value interface{}) error {
+ if value == nil {
+ nu.UUID, nu.Valid = Nil, false
+ return nil
+ }
+
+ err := nu.UUID.Scan(value)
+ if err != nil {
+ nu.Valid = false
+ return err
+ }
+
+ nu.Valid = true
+ return nil
+}
+
+// Value implements the driver Valuer interface.
+func (nu NullUUID) Value() (driver.Value, error) {
+ if !nu.Valid {
+ return nil, nil
+ }
+ // Delegate to UUID Value function
+ return nu.UUID.Value()
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (nu NullUUID) MarshalBinary() ([]byte, error) {
+ if nu.Valid {
+ return nu.UUID[:], nil
+ }
+
+ return []byte(nil), nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (nu *NullUUID) UnmarshalBinary(data []byte) error {
+ if len(data) != 16 {
+ return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+ }
+ copy(nu.UUID[:], data)
+ nu.Valid = true
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (nu NullUUID) MarshalText() ([]byte, error) {
+ if nu.Valid {
+ return nu.UUID.MarshalText()
+ }
+
+ return jsonNull, nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (nu *NullUUID) UnmarshalText(data []byte) error {
+ id, err := ParseBytes(data)
+ if err != nil {
+ nu.Valid = false
+ return err
+ }
+ nu.UUID = id
+ nu.Valid = true
+ return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (nu NullUUID) MarshalJSON() ([]byte, error) {
+ if nu.Valid {
+ return json.Marshal(nu.UUID)
+ }
+
+ return jsonNull, nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (nu *NullUUID) UnmarshalJSON(data []byte) error {
+ if bytes.Equal(data, jsonNull) {
+ *nu = NullUUID{}
+ return nil // valid null UUID
+ }
+ err := json.Unmarshal(data, &nu.UUID)
+ nu.Valid = err == nil
+ return err
+}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
new file mode 100644
index 0000000..2e02ec0
--- /dev/null
+++ b/vendor/github.com/google/uuid/sql.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "database/sql/driver"
+ "fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+ switch src := src.(type) {
+ case nil:
+ return nil
+
+ case string:
+ // if an empty UUID comes from a table, we return a null UUID
+ if src == "" {
+ return nil
+ }
+
+ // see Parse for required string format
+ u, err := Parse(src)
+ if err != nil {
+ return fmt.Errorf("Scan: %v", err)
+ }
+
+ *uuid = u
+
+ case []byte:
+ // if an empty UUID comes from a table, we return a null UUID
+ if len(src) == 0 {
+ return nil
+ }
+
+ // assumes a simple slice of bytes if 16 bytes
+ // otherwise attempts to parse
+ if len(src) != 16 {
+ return uuid.Scan(string(src))
+ }
+ copy((*uuid)[:], src)
+
+ default:
+ return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+ }
+
+ return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+ return uuid.String(), nil
+}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
new file mode 100644
index 0000000..c351129
--- /dev/null
+++ b/vendor/github.com/google/uuid/time.go
@@ -0,0 +1,134 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+ "sync"
+ "time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+ lillian = 2299160 // Julian day of 15 Oct 1582
+ unix = 2440587 // Julian day of 1 Jan 1970
+ epoch = unix - lillian // Days between epochs
+ g1582 = epoch * 86400 // seconds between epochs
+ g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+ timeMu sync.Mutex
+ lasttime uint64 // last time we returned
+ clockSeq uint16 // clock sequence for this run
+
+ timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+ sec = int64(t - g1582ns100)
+ nsec = (sec % 10000000) * 100
+ sec /= 10000000
+ return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed. An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+ t := timeNow()
+
+ // If we don't have a clock sequence already, set one.
+ if clockSeq == 0 {
+ setClockSequence(-1)
+ }
+ now := uint64(t.UnixNano()/100) + g1582ns100
+
+ // If time has gone backwards with this clock sequence then we
+ // increment the clock sequence
+ if now <= lasttime {
+ clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+ }
+ lasttime = now
+ return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set. The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated. Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
+func ClockSequence() int {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ return clockSequence()
+}
+
+func clockSequence() int {
+ if clockSeq == 0 {
+ setClockSequence(-1)
+ }
+ return int(clockSeq & 0x3fff)
+}
+
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+ defer timeMu.Unlock()
+ timeMu.Lock()
+ setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+ if seq == -1 {
+ var b [2]byte
+ randomBits(b[:]) // clock sequence
+ seq = int(b[0])<<8 | int(b[1])
+ }
+ oldSeq := clockSeq
+ clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+ if oldSeq != clockSeq {
+ lasttime = 0
+ }
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
+func (uuid UUID) Time() Time {
+ var t Time
+ switch uuid.Version() {
+ case 6:
+ time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+ t = Time(time)
+ case 7:
+ time := binary.BigEndian.Uint64(uuid[:8])
+ t = Time((time>>16)*10000 + g1582ns100)
+ default: // forward compatible
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ t = Time(time)
+ }
+ return t
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+ return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
new file mode 100644
index 0000000..5ea6c73
--- /dev/null
+++ b/vendor/github.com/google/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+ if _, err := io.ReadFull(rander, b); err != nil {
+ panic(err.Error()) // rand should never fail
+ }
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+ b1 := xvalues[x1]
+ b2 := xvalues[x2]
+ return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
new file mode 100644
index 0000000..5232b48
--- /dev/null
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -0,0 +1,365 @@
+// Copyright 2018 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "bytes"
+ "crypto/rand"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+ Invalid = Variant(iota) // Invalid UUID
+ RFC4122 // The variant specified in RFC4122
+ Reserved // Reserved, NCS backward compatibility.
+ Microsoft // Reserved, Microsoft Corporation backward compatibility.
+ Future // Reserved for future definition.
+)
+
+const randPoolSize = 16 * 16
+
+var (
+ rander = rand.Reader // random function
+ poolEnabled = false
+ poolMu sync.Mutex
+ poolPos = randPoolSize // protected with poolMu
+ pool [randPoolSize]byte // protected with poolMu
+)
+
+type invalidLengthError struct{ len int }
+
+func (err invalidLengthError) Error() string {
+ return fmt.Sprintf("invalid UUID length: %d", err.len)
+}
+
+// IsInvalidLengthError is matcher function for custom error invalidLengthError
+func IsInvalidLengthError(err error) bool {
+ _, ok := err.(invalidLengthError)
+ return ok
+}
+
+// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
+// examined in the latter case. Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
+func Parse(s string) (UUID, error) {
+ var uuid UUID
+ switch len(s) {
+ // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36:
+
+ // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ case 36 + 2:
+ s = s[1:]
+
+ // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ case 32:
+ var ok bool
+ for i := range uuid {
+ uuid[i], ok = xtob(s[i*2], s[i*2+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, invalidLengthError{len(s)}
+ }
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return uuid, errors.New("invalid UUID format")
+ }
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34,
+ } {
+ v, ok := xtob(s[x], s[x+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ uuid[i] = v
+ }
+ return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+ var uuid UUID
+ switch len(b) {
+ case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
+ return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+ }
+ b = b[9:]
+ case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+ b = b[1:]
+ case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ var ok bool
+ for i := 0; i < 32; i += 2 {
+ uuid[i/2], ok = xtob(b[i], b[i+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ }
+ return uuid, nil
+ default:
+ return uuid, invalidLengthError{len(b)}
+ }
+ // s is now at least 36 bytes long
+ // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+ return uuid, errors.New("invalid UUID format")
+ }
+ for i, x := range [16]int{
+ 0, 2, 4, 6,
+ 9, 11,
+ 14, 16,
+ 19, 21,
+ 24, 26, 28, 30, 32, 34,
+ } {
+ v, ok := xtob(b[x], b[x+1])
+ if !ok {
+ return uuid, errors.New("invalid UUID format")
+ }
+ uuid[i] = v
+ }
+ return uuid, nil
+}
+
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+ uuid, err := Parse(s)
+ if err != nil {
+ panic(`uuid: Parse(` + s + `): ` + err.Error())
+ }
+ return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+ err = uuid.UnmarshalBinary(b)
+ return uuid, err
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+ if err != nil {
+ panic(err)
+ }
+ return uuid
+}
+
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+ switch len(s) {
+ // Standard UUID format
+ case 36:
+
+ // UUID with "urn:uuid:" prefix
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // UUID enclosed in braces
+ case 36 + 2:
+ if s[0] != '{' || s[len(s)-1] != '}' {
+ return fmt.Errorf("invalid bracketed UUID format")
+ }
+ s = s[1 : len(s)-1]
+
+ // UUID without hyphens
+ case 32:
+ for i := 0; i < len(s); i += 2 {
+ _, ok := xtob(s[i], s[i+1])
+ if !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+
+ default:
+ return invalidLengthError{len(s)}
+ }
+
+ // Check for standard UUID format
+ if len(s) == 36 {
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return errors.New("invalid UUID format")
+ }
+ for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+ if _, ok := xtob(s[x], s[x+1]); !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+ }
+
+ return nil
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+ var buf [36]byte
+ encodeHex(buf[:], uuid)
+ return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+ var buf [36 + 9]byte
+ copy(buf[:], "urn:uuid:")
+ encodeHex(buf[9:], uuid)
+ return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+ hex.Encode(dst, uuid[:4])
+ dst[8] = '-'
+ hex.Encode(dst[9:13], uuid[4:6])
+ dst[13] = '-'
+ hex.Encode(dst[14:18], uuid[6:8])
+ dst[18] = '-'
+ hex.Encode(dst[19:23], uuid[8:10])
+ dst[23] = '-'
+ hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+ switch {
+ case (uuid[8] & 0xc0) == 0x80:
+ return RFC4122
+ case (uuid[8] & 0xe0) == 0xc0:
+ return Microsoft
+ case (uuid[8] & 0xe0) == 0xe0:
+ return Future
+ default:
+ return Reserved
+ }
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+ return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+ if v > 15 {
+ return fmt.Sprintf("BAD_VERSION_%d", v)
+ }
+ return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+ switch v {
+ case RFC4122:
+ return "RFC4122"
+ case Reserved:
+ return "Reserved"
+ case Microsoft:
+ return "Microsoft"
+ case Future:
+ return "Future"
+ case Invalid:
+ return "Invalid"
+ }
+ return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+ if r == nil {
+ rander = rand.Reader
+ return
+ }
+ rander = r
+}
+
+// EnableRandPool enables internal randomness pool used for Random
+// (Version 4) UUID generation. The pool contains random bytes read from
+// the random number generator on demand in batches. Enabling the pool
+// may improve the UUID generation throughput significantly.
+//
+// Since the pool is stored on the Go heap, this feature may be a bad fit
+// for security sensitive applications.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func EnableRandPool() {
+ poolEnabled = true
+}
+
+// DisableRandPool disables the randomness pool if it was previously
+// enabled with EnableRandPool.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func DisableRandPool() {
+ poolEnabled = false
+ defer poolMu.Unlock()
+ poolMu.Lock()
+ poolPos = randPoolSize
+}
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+ var uuidStrs = make([]string, len(uuids))
+ for i, uuid := range uuids {
+ uuidStrs[i] = uuid.String()
+ }
+ return uuidStrs
+}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
new file mode 100644
index 0000000..4631096
--- /dev/null
+++ b/vendor/github.com/google/uuid/version1.go
@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewUUID returns nil. If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewUUID returns nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ timeLow := uint32(now & 0xffffffff)
+ timeMid := uint16((now >> 32) & 0xffff)
+ timeHi := uint16((now >> 48) & 0x0fff)
+ timeHi |= 0x1000 // Version 1
+
+ binary.BigEndian.PutUint32(uuid[0:], timeLow)
+ binary.BigEndian.PutUint16(uuid[4:], timeMid)
+ binary.BigEndian.PutUint16(uuid[6:], timeHi)
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
new file mode 100644
index 0000000..7697802
--- /dev/null
+++ b/vendor/github.com/google/uuid/version4.go
@@ -0,0 +1,76 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New creates a new random UUID or panics. New is equivalent to
+// the expression
+//
+// uuid.Must(uuid.NewRandom())
+func New() UUID {
+ return Must(NewRandom())
+}
+
+// NewString creates a new random UUID and returns it as a string or panics.
+// NewString is equivalent to the expression
+//
+// uuid.New().String()
+func NewString() string {
+ return Must(NewRandom()).String()
+}
+
+// NewRandom returns a Random (Version 4) UUID.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// Uses the randomness pool if it was enabled with EnableRandPool.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
+//
+// Randomly generated UUIDs have 122 random bits. One's annual risk of being
+// hit by a meteorite is estimated to be one chance in 17 billion, that
+// means the probability is about 0.00000000006 (6 × 10−11),
+// equivalent to the odds of creating a few tens of trillions of UUIDs in a
+// year and having one duplicate.
+func NewRandom() (UUID, error) {
+ if !poolEnabled {
+ return NewRandomFromReader(rander)
+ }
+ return newRandomFromPool()
+}
+
+// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
+func NewRandomFromReader(r io.Reader) (UUID, error) {
+ var uuid UUID
+ _, err := io.ReadFull(r, uuid[:])
+ if err != nil {
+ return Nil, err
+ }
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid, nil
+}
+
+func newRandomFromPool() (UUID, error) {
+ var uuid UUID
+ poolMu.Lock()
+ if poolPos == randPoolSize {
+ _, err := io.ReadFull(rander, pool[:])
+ if err != nil {
+ poolMu.Unlock()
+ return Nil, err
+ }
+ poolPos = 0
+ }
+ copy(uuid[:], pool[poolPos:(poolPos+16)])
+ poolPos += 16
+ poolMu.Unlock()
+
+ uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+ uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_mid | time_low_and_version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |clk_seq_hi_res | clk_seq_low | node (0-1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | node (2-5) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ uuid[6] = 0x60 | (uuid[6] & 0x0F)
+ uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 0000000..3167b64
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+ uuid, err := NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+ uuid, err := NewRandomFromReader(r)
+ if err != nil {
+ return uuid, err
+ }
+
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms | ver | rand_a (12 bit seq) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |var| rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ _ = uuid[15] // bounds check
+
+ t, s := getV7Time()
+
+ uuid[0] = byte(t >> 40)
+ uuid[1] = byte(t >> 32)
+ uuid[2] = byte(t >> 24)
+ uuid[3] = byte(t >> 16)
+ uuid[4] = byte(t >> 8)
+ uuid[5] = byte(t)
+
+ uuid[6] = 0x70 | (0x0F & byte(s>>8))
+ uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+// 52 bits of time in milliseconds since epoch
+// 12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+ timeMu.Lock()
+ defer timeMu.Unlock()
+
+ nano := timeNow().UnixNano()
+ milli = nano / nanoPerMilli
+ // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+ seq = (nano - milli*nanoPerMilli) >> 8
+ now := milli<<12 + seq
+ if now <= lastV7time {
+ now = lastV7time + 1
+ milli = now >> 12
+ seq = now & 0xfff
+ }
+ lastV7time = now
+ return milli, seq
+}
diff --git a/vendor/github.com/google/yamlfmt/.gitignore b/vendor/github.com/google/yamlfmt/.gitignore
new file mode 100644
index 0000000..6c713e5
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/.gitignore
@@ -0,0 +1,14 @@
+# Sometimes use these for testing
+.yamlfmt
+!integrationtest/**/.yamlfmt
+tmp
+
+# Goreleaser build folder
+dist/
+
+# build file with `make build`
+yamlfmt
+yamlfmt.exe
+
+# vscode settings
+.vscode
diff --git a/vendor/github.com/google/yamlfmt/.goreleaser.yaml b/vendor/github.com/google/yamlfmt/.goreleaser.yaml
new file mode 100644
index 0000000..d6b2757
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/.goreleaser.yaml
@@ -0,0 +1,63 @@
+# Copyright 2022 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: 2
+
+before:
+ hooks:
+ - go mod tidy
+builds:
+ - id: yamlfmt
+ main: ./cmd/yamlfmt
+ binary: yamlfmt
+ env:
+ - CGO_ENABLED=0
+ goos:
+ - linux
+ - windows
+ - darwin
+ ldflags:
+ - '-s -w -X main.version={{.Version}} -X main.commit={{.ShortCommit}}'
+archives:
+ - name_template: >-
+ {{ .ProjectName }}_
+ {{- .Version }}_
+ {{- title .Os }}_
+ {{- if eq .Arch "amd64" }}x86_64
+ {{- else if eq .Arch "386" }}i386
+ {{- else }}{{- .Arch }}{{- end }}
+ {{- if .Arm }}v{{- .Arm }}{{- end }}
+checksum:
+ name_template: 'checksums.txt'
+snapshot:
+ version_template: "{{ incpatch .Version }}-next"
+changelog:
+ sort: asc
+ filters:
+ exclude:
+ - '^docs:'
+ - '^test:'
+ - '^ci:'
+signs:
+ - cmd: cosign
+ signature: "${artifact}.sig"
+ certificate: "${artifact}.pem"
+ args:
+ - "sign-blob"
+ - "--oidc-issuer=https://token.actions.githubusercontent.com"
+ - "--output-certificate=${certificate}"
+ - "--output-signature=${signature}"
+ - "${artifact}"
+ - "--yes"
+ artifacts: checksum
diff --git a/vendor/github.com/google/yamlfmt/.pre-commit-hooks.yaml b/vendor/github.com/google/yamlfmt/.pre-commit-hooks.yaml
new file mode 100644
index 0000000..e888624
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/.pre-commit-hooks.yaml
@@ -0,0 +1,20 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+- id: yamlfmt
+ name: yamlfmt
+ description: This hook uses github.com/google/yamlfmt to format yaml files. Requires Go >1.18 to be installed.
+ entry: yamlfmt
+ language: golang
+ types: [yaml]
diff --git a/vendor/github.com/google/yamlfmt/CONTRIBUTING.md b/vendor/github.com/google/yamlfmt/CONTRIBUTING.md
new file mode 100644
index 0000000..9d7656b
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/CONTRIBUTING.md
@@ -0,0 +1,29 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement (CLA). You (or your employer) retain the copyright to your
+contribution; this simply gives us permission to use and redistribute your
+contributions as part of the project. Head over to
+<https://cla.developers.google.com/> to see your current agreements on file or
+to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code Reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## Community Guidelines
+
+This project follows
+[Google's Open Source Community Guidelines](https://opensource.google/conduct/). \ No newline at end of file
diff --git a/vendor/github.com/google/yamlfmt/Dockerfile b/vendor/github.com/google/yamlfmt/Dockerfile
new file mode 100644
index 0000000..a5dbd81
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/Dockerfile
@@ -0,0 +1,25 @@
+# Copyright 2024 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM golang:alpine AS build
+RUN apk add --no-cache git make
+WORKDIR /build
+COPY . .
+ENV CGO_ENABLED=0
+RUN make build
+
+FROM alpine:latest
+COPY --from=build /build/dist/yamlfmt /bin/yamlfmt
+WORKDIR /project
+ENTRYPOINT ["/bin/yamlfmt"]
diff --git a/vendor/github.com/google/yamlfmt/LICENSE b/vendor/github.com/google/yamlfmt/LICENSE
new file mode 100644
index 0000000..7a4a3ea
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License. \ No newline at end of file
diff --git a/vendor/github.com/google/yamlfmt/Makefile b/vendor/github.com/google/yamlfmt/Makefile
new file mode 100644
index 0000000..2790ae3
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/Makefile
@@ -0,0 +1,62 @@
+.EXPORT_ALL_VARIABLES:
+
+VERSION := $(shell git describe --abbrev=0 --tags | tr -d v)
+COMMIT := $(shell git rev-parse --short HEAD)
+LDFLAGS := -X 'main.version=$(VERSION)' \
+ -X 'main.commit=$(COMMIT)'
+
+.PHONY: build
+build:
+ go build -ldflags "$(LDFLAGS)" -o dist/yamlfmt ./cmd/yamlfmt
+
+.PHONY: test
+test:
+ go test ./...
+
+.PHONY: test_v
+test_v:
+ go test -v ./...
+
+YAMLFMT_BIN ?= $(shell pwd)/dist/yamlfmt
+.PHONY: integrationtest
+integrationtest:
+ $(MAKE) build
+ go test -v -tags=integration_test ./integrationtest/command
+
+.PHONY: integrationtest_v
+integrationtest_v:
+ $(MAKE) build
+ go test -v -tags=integration_test ./integrationtest/command
+
+.PHONY: integrationtest_stdout
+integrationtest_stdout:
+ $(MAKE) build
+ go test -v -tags=integration_test ./integrationtest/command -stdout
+
+.PHONY: integrationtest_update
+integrationtest_update:
+ $(MAKE) build
+ go test -tags=integration_test ./integrationtest/command -update
+
+.PHONY: command_test_case
+command_test_case:
+ifndef TESTNAME
+ $(error "TESTNAME undefined")
+endif
+ ./integrationtest/command/new_test_case.sh "$(TESTNAME)"
+
+.PHONY: install
+install:
+ go install -ldflags "$(LDFLAGS)" ./cmd/yamlfmt
+
+.PHONY: install_tools
+install_tools:
+ go install github.com/google/addlicense@latest
+
+.PHONY: addlicense
+addlicense:
+ addlicense -ignore "**/testdata/**" -c "Google LLC" -l apache .
+
+.PHONY: addlicense_check
+addlicense_check:
+ addlicense -check -ignore "**/testdata/**" -c "Google LLC" -l apache .
diff --git a/vendor/github.com/google/yamlfmt/README.md b/vendor/github.com/google/yamlfmt/README.md
new file mode 100644
index 0000000..6052124
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/README.md
@@ -0,0 +1,112 @@
+# yamlfmt
+
+`yamlfmt` is an extensible command line tool or library to format yaml files.
+
+## Goals
+
+* Create a command line yaml formatting tool that is easy to distribute (single binary)
+* Make it simple to extend with new custom formatters
+* Enable alternative use as a library, providing a foundation for users to create a tool that meets specific needs
+
+## Maintainers
+
+This tool is not yet officially supported by Google. It is currently maintained solely by @braydonk, and unless something changes primarily in spare time.
+
+## Blog
+
+I'm going to use these links to GitHub Discussions as a blog of sorts, until I can set up something more proper:
+* yamlfmt's recent slow development [#149](https://github.com/google/yamlfmt/discussions/149)
+* Issues related to the yaml.v3 library [#148](https://github.com/google/yamlfmt/discussions/148)
+
+## Installation
+
+To download the `yamlfmt` command, you can download the desired binary from releases or install the module directly:
+```
+go install github.com/google/yamlfmt/cmd/yamlfmt@latest
+```
+This currently requires Go version 1.21 or greater.
+
+NOTE: Recommended setup if this is your first time installing Go would be in [this DigitalOcean blog post](https://www.digitalocean.com/community/tutorials/how-to-build-and-install-go-programs).
+
+You can also download the binary you want from releases. The binary is self-sufficient with no dependencies, and can simply be put somewhere on your PATH and run with the command `yamlfmt`. Read more about verifying the authenticity of released artifacts [here](#verifying-release-artifacts).
+
+You can also install the command as a [pre-commit](https://pre-commit.com/) hook. See the [pre-commit hook](./docs/pre-commit.md) docs for instructions.
+
+## Basic Usage
+
+See [Command Usage](./docs/command-usage.md) for in-depth information and available flags.
+
+To run the tool with all default settings, run the command with a path argument:
+```bash
+yamlfmt x.yaml y.yaml <...>
+```
+You can specify as many paths as you want. You can also specify a directory which will be searched recursively for any files with the extension `.yaml` or `.yml`.
+```bash
+yamlfmt .
+```
+
+You can also use an alternate mode that will search paths with doublestar globs by supplying the `-dstar` flag.
+```bash
+yamlfmt -dstar **/*.{yaml,yml}
+```
+See the [doublestar](https://github.com/bmatcuk/doublestar) package for more information on this format.
+
+Yamlfmt can also be used in ci/cd pipelines which supports running containers. The following snippet shows an example job for GitLab CI:
+```yaml
+yaml lint:
+ image: ghcr.io/google/yamlfmt:latest
+ before_script:
+ - apk add git
+ script:
+ - yamlfmt .
+ - git diff --exit-code
+```
+The Docker image can also be used to run yamlfmt without installing it on your system. Just mount the directory you want to format as a volume (`/project` is used by default):
+```bash
+docker run -v "$(pwd):/project" ghcr.io/google/yamlfmt:latest <yamlfmt args>
+```
+
+# Configuration File
+
+The `yamlfmt` command can be configured through a yaml file called `.yamlfmt`. This file can live in your working directory, a path specified through a [CLI flag](./docs/command-usage.md#operation-flags), or in the standard global config path on your system (see docs for specifics).
+For in-depth configuration documentation see [Config](docs/config-file.md).
+
+## Verifying release artifacts
+
+NOTE: Support for verifying with cosign is present from v0.14.0 onward.
+
+In case you get the `yamlfmt` binary directly from a release, you may want to verify its authenticity. Checksums are applied to all released artifacts, and the resulting checksum file is signed using [cosign](https://docs.sigstore.dev/cosign/installation/).
+
+Steps to verify (replace `A.B.C` in the commands listed below with the version you want):
+
+1. Download the following files from the release:
+
+ ```text
+ curl -sfLO https://github.com/google/yamlfmt/releases/download/vA.B.C/checksums.txt
+ curl -sfLO https://github.com/google/yamlfmt/releases/download/vA.B.C/checksums.txt.pem
+ curl -sfLO https://github.com/google/yamlfmt/releases/download/vA.B.C/checksums.txt.sig
+ ```
+
+2. Verify the signature:
+
+ ```shell
+ cosign verify-blob checksums.txt \
+ --certificate checksums.txt.pem \
+ --signature checksums.txt.sig \
+ --certificate-identity-regexp 'https://github\.com/google/yamlfmt/\.github/workflows/.+' \
+ --certificate-oidc-issuer "https://token.actions.githubusercontent.com"
+ ```
+
+3. Download the compressed archive you want, and validate its checksum:
+
+ ```shell
+ curl -sfLO https://github.com/google/yamlfmt/releases/download/vA.B.C/yamlfmt_A.B.C_Linux_x86_64.tar.gz
+ sha256sum --ignore-missing -c checksums.txt
+ ```
+
+3. If checksum validation goes through, uncompress the archive:
+
+ ```shell
+ tar -xzf yamlfmt_A.B.C_Linux_x86_64.tar.gz
+ ./yamlfmt
+ ```
diff --git a/vendor/github.com/google/yamlfmt/command/command.go b/vendor/github.com/google/yamlfmt/command/command.go
new file mode 100644
index 0000000..3224f46
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/command/command.go
@@ -0,0 +1,258 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package command
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "github.com/google/yamlfmt"
+ "github.com/google/yamlfmt/engine"
+ "github.com/mitchellh/mapstructure"
+
+ "github.com/braydonk/yaml"
+)
+
+type FormatterConfig struct {
+ Type string `mapstructure:"type"`
+ FormatterSettings map[string]any `mapstructure:",remain"`
+}
+
+// NewFormatterConfig returns an empty formatter config with all fields initialized.
+func NewFormatterConfig() *FormatterConfig {
+ return &FormatterConfig{FormatterSettings: make(map[string]any)}
+}
+
+type Config struct {
+ Extensions []string `mapstructure:"extensions"`
+ MatchType yamlfmt.MatchType `mapstructure:"match_type"`
+ Include []string `mapstructure:"include"`
+ Exclude []string `mapstructure:"exclude"`
+ RegexExclude []string `mapstructure:"regex_exclude"`
+ FormatterConfig *FormatterConfig `mapstructure:"formatter,omitempty"`
+ Doublestar bool `mapstructure:"doublestar"`
+ ContinueOnError bool `mapstructure:"continue_on_error"`
+ LineEnding yamlfmt.LineBreakStyle `mapstructure:"line_ending"`
+ GitignoreExcludes bool `mapstructure:"gitignore_excludes"`
+ GitignorePath string `mapstructure:"gitignore_path"`
+ OutputFormat engine.EngineOutputFormat `mapstructure:"output_format"`
+}
+
+type Command struct {
+ Operation yamlfmt.Operation
+ Registry *yamlfmt.Registry
+ Config *Config
+ Quiet bool
+}
+
+func (c *Command) Run() error {
+ formatter, err := c.getFormatter()
+ if err != nil {
+ return err
+ }
+
+ lineSepChar, err := c.Config.LineEnding.Separator()
+ if err != nil {
+ return err
+ }
+
+ eng := &engine.ConsecutiveEngine{
+ LineSepCharacter: lineSepChar,
+ Formatter: formatter,
+ Quiet: c.Quiet,
+ ContinueOnError: c.Config.ContinueOnError,
+ OutputFormat: c.Config.OutputFormat,
+ }
+
+ collectedPaths, err := c.collectPaths()
+ if err != nil {
+ return err
+ }
+ if c.Config.GitignoreExcludes {
+ newPaths, err := yamlfmt.ExcludeWithGitignore(c.Config.GitignorePath, collectedPaths)
+ if err != nil {
+ return err
+ }
+ collectedPaths = newPaths
+ }
+
+ paths, err := c.analyzePaths(collectedPaths)
+ if err != nil {
+ fmt.Printf("path analysis found the following errors:\n%v", err)
+ fmt.Println("Continuing...")
+ }
+
+ switch c.Operation {
+ case yamlfmt.OperationFormat:
+ out, err := eng.Format(paths)
+ if out != nil {
+ fmt.Print(out)
+ }
+ if err != nil {
+ return err
+ }
+ case yamlfmt.OperationLint:
+ out, err := eng.Lint(paths)
+ if err != nil {
+ return err
+ }
+ if out != nil {
+ // This will be picked up by log.Fatal in main() and
+ // cause an exit code of 1, which is a critical
+ // component of the lint functionality.
+ return errors.New(out.String())
+ }
+ case yamlfmt.OperationDry:
+ out, err := eng.DryRun(paths)
+ if err != nil {
+ return err
+ }
+ if out != nil {
+ fmt.Print(out)
+ } else if !c.Quiet {
+ fmt.Println("No files will be changed.")
+ }
+ case yamlfmt.OperationStdin:
+ stdinYaml, err := readFromStdin()
+ if err != nil {
+ return err
+ }
+ out, err := eng.FormatContent(stdinYaml)
+ if err != nil {
+ return err
+ }
+ fmt.Print(string(out))
+ case yamlfmt.OperationPrintConfig:
+ commandConfig := map[string]any{}
+ err = mapstructure.Decode(c.Config, &commandConfig)
+ if err != nil {
+ return err
+ }
+ delete(commandConfig, "formatter")
+ out, err := yaml.Marshal(commandConfig)
+ if err != nil {
+ return err
+ }
+ fmt.Print(string(out))
+
+ formatterConfigMap, err := formatter.ConfigMap()
+ if err != nil {
+ return err
+ }
+ out, err = yaml.Marshal(map[string]any{
+ "formatter": formatterConfigMap,
+ })
+ if err != nil {
+ return err
+ }
+ fmt.Print(string(out))
+ }
+
+ return nil
+}
+
+func (c *Command) getFormatter() (yamlfmt.Formatter, error) {
+ var factoryType string
+
+ // In the existing codepaths, this value is always set. But
+ // it's a habit of mine to check anything that can possibly be nil
+ // if I remember that to be the case. :)
+ if c.Config.FormatterConfig != nil {
+ factoryType = c.Config.FormatterConfig.Type
+
+ // The line ending set within the formatter settings takes precedence over setting
+ // it from the top level config. If it's not set in formatter settings, then
+ // we use the value from the top level.
+ if _, ok := c.Config.FormatterConfig.FormatterSettings["line_ending"]; !ok {
+ c.Config.FormatterConfig.FormatterSettings["line_ending"] = c.Config.LineEnding
+ }
+ }
+
+ factory, err := c.Registry.GetFactory(factoryType)
+ if err != nil {
+ return nil, err
+ }
+ return factory.NewFormatter(c.Config.FormatterConfig.FormatterSettings)
+}
+
+func (c *Command) collectPaths() ([]string, error) {
+ collector, err := c.makePathCollector()
+ if err != nil {
+ return nil, err
+ }
+
+ return collector.CollectPaths()
+}
+
+func (c *Command) analyzePaths(paths []string) ([]string, error) {
+ analyzer, err := c.makeAnalyzer()
+ if err != nil {
+ return nil, err
+ }
+ includePaths, _, err := analyzer.ExcludePathsByContent(paths)
+ return includePaths, err
+}
+
+func (c *Command) makePathCollector() (yamlfmt.PathCollector, error) {
+ switch c.Config.MatchType {
+ case yamlfmt.MatchTypeDoublestar:
+ return &yamlfmt.DoublestarCollector{
+ Include: c.Config.Include,
+ Exclude: c.Config.Exclude,
+ }, nil
+ case yamlfmt.MatchTypeGitignore:
+ files := c.Config.Include
+ if len(files) == 0 {
+ files = []string{yamlfmt.DefaultPatternFile}
+ }
+
+ patternFile, err := yamlfmt.NewPatternFileCollector(files...)
+ if err != nil {
+ return nil, fmt.Errorf("NewPatternFile(%q): %w", files, err)
+ }
+
+ return patternFile, nil
+ default:
+ return &yamlfmt.FilepathCollector{
+ Include: c.Config.Include,
+ Exclude: c.Config.Exclude,
+ Extensions: c.Config.Extensions,
+ }, nil
+ }
+}
+
+func (c *Command) makeAnalyzer() (yamlfmt.ContentAnalyzer, error) {
+ return yamlfmt.NewBasicContentAnalyzer(c.Config.RegexExclude)
+}
+
+func readFromStdin() ([]byte, error) {
+ stdin := bufio.NewReader(os.Stdin)
+ data := []byte{}
+ for {
+ b, err := stdin.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ break
+ } else {
+ return nil, err
+ }
+ }
+ data = append(data, b)
+ }
+ return data, nil
+}
diff --git a/vendor/github.com/google/yamlfmt/content_analyzer.go b/vendor/github.com/google/yamlfmt/content_analyzer.go
new file mode 100644
index 0000000..4083e6b
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/content_analyzer.go
@@ -0,0 +1,90 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yamlfmt
+
+import (
+ "os"
+ "regexp"
+
+ "github.com/google/yamlfmt/internal/collections"
+)
+
+type ContentAnalyzer interface {
+ ExcludePathsByContent(paths []string) ([]string, []string, error)
+}
+
+type BasicContentAnalyzer struct {
+ RegexPatterns []*regexp.Regexp
+}
+
+func NewBasicContentAnalyzer(patterns []string) (BasicContentAnalyzer, error) {
+ analyzer := BasicContentAnalyzer{RegexPatterns: []*regexp.Regexp{}}
+ compileErrs := collections.Errors{}
+ for _, pattern := range patterns {
+ re, err := regexp.Compile(pattern)
+ if err != nil {
+ compileErrs = append(compileErrs, err)
+ continue
+ }
+ analyzer.RegexPatterns = append(analyzer.RegexPatterns, re)
+ }
+ return analyzer, compileErrs.Combine()
+}
+
+func (a BasicContentAnalyzer) ExcludePathsByContent(paths []string) ([]string, []string, error) {
+ pathsToFormat := collections.SliceToSet(paths)
+ pathsExcluded := []string{}
+ pathErrs := collections.Errors{}
+
+ for _, path := range paths {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ pathErrs = append(pathErrs, err)
+ continue
+ }
+
+ // Search metadata for ignore
+ metadata, mdErrs := ReadMetadata(content, path)
+ if len(mdErrs) != 0 {
+ pathErrs = append(pathErrs, mdErrs...)
+ }
+ ignoreFound := false
+ for md := range metadata {
+ if md.Type == MetadataIgnore {
+ ignoreFound = true
+ break
+ }
+ }
+ if ignoreFound {
+ pathsExcluded = append(pathsExcluded, path)
+ pathsToFormat.Remove(path)
+ continue
+ }
+
+ // Check if content matches any regex
+ matched := false
+ for _, pattern := range a.RegexPatterns {
+ if pattern.Match(content) {
+ matched = true
+ }
+ }
+ if matched {
+ pathsExcluded = append(pathsExcluded, path)
+ pathsToFormat.Remove(path)
+ }
+ }
+
+ return pathsToFormat.ToSlice(), pathsExcluded, pathErrs.Combine()
+}
diff --git a/vendor/github.com/google/yamlfmt/engine.go b/vendor/github.com/google/yamlfmt/engine.go
new file mode 100644
index 0000000..b98ee89
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/engine.go
@@ -0,0 +1,127 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yamlfmt
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/google/yamlfmt/internal/collections"
+ "github.com/google/yamlfmt/internal/multilinediff"
+)
+
+type Operation int
+
+const (
+ OperationFormat Operation = iota
+ OperationLint
+ OperationDry
+ OperationStdin
+ OperationPrintConfig
+)
+
+type Engine interface {
+ FormatContent(content []byte) ([]byte, error)
+ Format(paths []string) (fmt.Stringer, error)
+ Lint(paths []string) (fmt.Stringer, error)
+ DryRun(paths []string) (fmt.Stringer, error)
+}
+
+type FormatDiff struct {
+ Original string
+ Formatted string
+ LineSep string
+}
+
+func (d *FormatDiff) MultilineDiff() (string, int) {
+ return multilinediff.Diff(d.Original, d.Formatted, d.LineSep)
+}
+
+func (d *FormatDiff) Changed() bool {
+ return d.Original != d.Formatted
+}
+
+type FileDiff struct {
+ Path string
+ Diff *FormatDiff
+}
+
+func (fd *FileDiff) StrOutput() string {
+ diffStr, _ := fd.Diff.MultilineDiff()
+ return fmt.Sprintf("%s:\n%s\n", fd.Path, diffStr)
+}
+
+func (fd *FileDiff) StrOutputQuiet() string {
+ return fd.Path + "\n"
+}
+
+func (fd *FileDiff) Apply() error {
+ // If there is no diff in the format, there is no need to write the file.
+ if !fd.Diff.Changed() {
+ return nil
+ }
+ return os.WriteFile(fd.Path, []byte(fd.Diff.Formatted), 0644)
+}
+
+type FileDiffs map[string]*FileDiff
+
+func (fds FileDiffs) Add(diff *FileDiff) error {
+ if _, ok := fds[diff.Path]; ok {
+ return fmt.Errorf("a diff for %s already exists", diff.Path)
+ }
+
+ fds[diff.Path] = diff
+ return nil
+}
+
+func (fds FileDiffs) StrOutput() string {
+ result := ""
+ for _, fd := range fds {
+ if fd.Diff.Changed() {
+ result += fd.StrOutput()
+ }
+ }
+ return result
+}
+
+func (fds FileDiffs) StrOutputQuiet() string {
+ result := ""
+ for _, fd := range fds {
+ if fd.Diff.Changed() {
+ result += fd.StrOutputQuiet()
+ }
+ }
+ return result
+}
+
+func (fds FileDiffs) ApplyAll() error {
+ applyErrs := make(collections.Errors, len(fds))
+ i := 0
+ for _, diff := range fds {
+ applyErrs[i] = diff.Apply()
+ i++
+ }
+ return applyErrs.Combine()
+}
+
+func (fds FileDiffs) ChangedCount() int {
+ changed := 0
+ for _, fd := range fds {
+ if fd.Diff.Changed() {
+ changed++
+ }
+ }
+ return changed
+}
diff --git a/vendor/github.com/google/yamlfmt/engine/consecutive_engine.go b/vendor/github.com/google/yamlfmt/engine/consecutive_engine.go
new file mode 100644
index 0000000..650d1b7
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/engine/consecutive_engine.go
@@ -0,0 +1,103 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package engine
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/google/yamlfmt"
+)
+
+// Engine that will process each file one by one consecutively.
+type ConsecutiveEngine struct {
+ LineSepCharacter string
+ Formatter yamlfmt.Formatter
+ Quiet bool
+ ContinueOnError bool
+ OutputFormat EngineOutputFormat
+}
+
+func (e *ConsecutiveEngine) FormatContent(content []byte) ([]byte, error) {
+ return e.Formatter.Format(content)
+}
+
+func (e *ConsecutiveEngine) Format(paths []string) (fmt.Stringer, error) {
+ formatDiffs, formatErrs := e.formatAll(paths)
+ if len(formatErrs) > 0 {
+ if e.ContinueOnError {
+ fmt.Print(formatErrs)
+ fmt.Println("Continuing...")
+ } else {
+ return nil, formatErrs
+ }
+ }
+ return nil, formatDiffs.ApplyAll()
+}
+
+func (e *ConsecutiveEngine) Lint(paths []string) (fmt.Stringer, error) {
+ formatDiffs, formatErrs := e.formatAll(paths)
+ if len(formatErrs) > 0 {
+ return nil, formatErrs
+ }
+ if formatDiffs.ChangedCount() == 0 {
+ return nil, nil
+ }
+ return getEngineOutput(e.OutputFormat, yamlfmt.OperationLint, formatDiffs, e.Quiet)
+}
+
+func (e *ConsecutiveEngine) DryRun(paths []string) (fmt.Stringer, error) {
+ formatDiffs, formatErrs := e.formatAll(paths)
+ if len(formatErrs) > 0 {
+ return nil, formatErrs
+ }
+ if formatDiffs.ChangedCount() == 0 {
+ return nil, nil
+ }
+ return getEngineOutput(e.OutputFormat, yamlfmt.OperationDry, formatDiffs, e.Quiet)
+}
+
+func (e *ConsecutiveEngine) formatAll(paths []string) (yamlfmt.FileDiffs, FormatErrors) {
+ formatDiffs := yamlfmt.FileDiffs{}
+ formatErrs := FormatErrors{}
+ for _, path := range paths {
+ fileDiff, err := e.formatFileContent(path)
+ if err != nil {
+ formatErrs = append(formatErrs, wrapFormatError(path, err))
+ continue
+ }
+ formatDiffs.Add(fileDiff)
+ }
+ return formatDiffs, formatErrs
+}
+
+func (e *ConsecutiveEngine) formatFileContent(path string) (*yamlfmt.FileDiff, error) {
+ content, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ formatted, err := e.FormatContent(content)
+ if err != nil {
+ return nil, err
+ }
+ return &yamlfmt.FileDiff{
+ Path: path,
+ Diff: &yamlfmt.FormatDiff{
+ Original: string(content),
+ Formatted: string(formatted),
+ LineSep: e.LineSepCharacter,
+ },
+ }, nil
+}
diff --git a/vendor/github.com/google/yamlfmt/engine/errors.go b/vendor/github.com/google/yamlfmt/engine/errors.go
new file mode 100644
index 0000000..27dbba1
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/engine/errors.go
@@ -0,0 +1,40 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package engine
+
+import "fmt"
+
+type FormatErrors []*FormatError
+
+func (e FormatErrors) Error() string {
+ errStr := "encountered the following formatting errors:\n"
+ for _, err := range e {
+ errStr += fmt.Sprintf("%s\n", err.Error())
+ }
+ return errStr
+}
+
+type FormatError struct {
+ path string
+ err error
+}
+
+func wrapFormatError(path string, err error) *FormatError {
+ return &FormatError{path: path, err: err}
+}
+
+func (e *FormatError) Error() string {
+ return fmt.Sprintf("%s: %v", e.path, e.err)
+}
diff --git a/vendor/github.com/google/yamlfmt/engine/output.go b/vendor/github.com/google/yamlfmt/engine/output.go
new file mode 100644
index 0000000..2d35e84
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/engine/output.go
@@ -0,0 +1,138 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package engine
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/google/yamlfmt"
+ "github.com/google/yamlfmt/internal/gitlab"
+)
+
+type EngineOutputFormat string
+
+const (
+ EngineOutputDefault EngineOutputFormat = "default"
+ EngineOutputSingeLine EngineOutputFormat = "line"
+ EngineOutputGitlab EngineOutputFormat = "gitlab"
+)
+
+func getEngineOutput(t EngineOutputFormat, operation yamlfmt.Operation, files yamlfmt.FileDiffs, quiet bool) (fmt.Stringer, error) {
+ switch t {
+ case EngineOutputDefault:
+ return engineOutput{Operation: operation, Files: files, Quiet: quiet}, nil
+ case EngineOutputSingeLine:
+ return engineOutputSingleLine{Operation: operation, Files: files, Quiet: quiet}, nil
+ case EngineOutputGitlab:
+ return engineOutputGitlab{Operation: operation, Files: files, Compact: quiet}, nil
+
+ }
+ return nil, fmt.Errorf("unknown output type: %s", t)
+}
+
+type engineOutput struct {
+ Operation yamlfmt.Operation
+ Files yamlfmt.FileDiffs
+ Quiet bool
+}
+
+func (eo engineOutput) String() string {
+ var msg string
+ switch eo.Operation {
+ case yamlfmt.OperationLint:
+ msg = "The following formatting differences were found:"
+ if eo.Quiet {
+ msg = "The following files had formatting differences:"
+ }
+ case yamlfmt.OperationDry:
+ if len(eo.Files) > 0 {
+ if eo.Quiet {
+ msg = "The following files would be formatted:"
+ }
+ } else {
+ return "No files will formatted."
+ }
+ }
+ var result string
+ if msg != "" {
+ result += fmt.Sprintf("%s\n\n", msg)
+ }
+ if eo.Quiet {
+ result += eo.Files.StrOutputQuiet()
+ } else {
+ result += fmt.Sprintf("%s\n", eo.Files.StrOutput())
+ }
+ return result
+}
+
+type engineOutputSingleLine struct {
+ Operation yamlfmt.Operation
+ Files yamlfmt.FileDiffs
+ Quiet bool
+}
+
+func (eosl engineOutputSingleLine) String() string {
+ var msg string
+ for _, fileDiff := range eosl.Files {
+ msg += fmt.Sprintf("%s: formatting difference found\n", fileDiff.Path)
+ }
+ return msg
+}
+
+type engineOutputGitlab struct {
+ Operation yamlfmt.Operation
+ Files yamlfmt.FileDiffs
+ Compact bool
+}
+
+func (eo engineOutputGitlab) String() string {
+ var findings []gitlab.CodeQuality
+
+ for _, file := range eo.Files {
+ if cq, ok := gitlab.NewCodeQuality(*file); ok {
+ findings = append(findings, cq)
+ }
+ }
+
+ if len(findings) == 0 {
+ return ""
+ }
+
+ sort.Sort(byPath(findings))
+
+ var b strings.Builder
+ enc := json.NewEncoder(&b)
+
+ if !eo.Compact {
+ enc.SetIndent("", " ")
+ }
+
+ if err := enc.Encode(findings); err != nil {
+ panic(err)
+ }
+ return b.String()
+}
+
+// byPath is used to sort by Location.Path.
+type byPath []gitlab.CodeQuality
+
+func (b byPath) Len() int { return len(b) }
+func (b byPath) Less(i, j int) bool { return b[i].Location.Path < b[j].Location.Path }
+func (b byPath) Swap(i, j int) {
+ b[i].Location.Path, b[j].Location.Path = b[j].Location.Path, b[i].Location.Path
+}
diff --git a/vendor/github.com/google/yamlfmt/feature.go b/vendor/github.com/google/yamlfmt/feature.go
new file mode 100644
index 0000000..af56dda
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/feature.go
@@ -0,0 +1,78 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yamlfmt
+
+import (
+ "context"
+ "fmt"
+)
+
+type FeatureFunc func(context.Context, []byte) (context.Context, []byte, error)
+
+type Feature struct {
+ Name string
+ BeforeAction FeatureFunc
+ AfterAction FeatureFunc
+}
+
+type FeatureList []Feature
+
+type FeatureApplyMode string
+
+var (
+ FeatureApplyBefore FeatureApplyMode = "Before"
+ FeatureApplyAfter FeatureApplyMode = "After"
+)
+
+type FeatureApplyError struct {
+ err error
+ featureName string
+ mode FeatureApplyMode
+}
+
+func (e *FeatureApplyError) Error() string {
+ return fmt.Sprintf("Feature %s %sAction failed with error: %v", e.featureName, e.mode, e.err)
+}
+
+func (e *FeatureApplyError) Unwrap() error {
+ return e.err
+}
+
+func (fl FeatureList) ApplyFeatures(ctx context.Context, input []byte, mode FeatureApplyMode) (context.Context, []byte, error) {
+ // Declare err here so the result variable doesn't get shadowed in the loop
+ var err error
+ result := make([]byte, len(input))
+ copy(result, input)
+ for _, feature := range fl {
+ if mode == FeatureApplyBefore {
+ if feature.BeforeAction != nil {
+ ctx, result, err = feature.BeforeAction(ctx, result)
+ }
+ } else {
+ if feature.AfterAction != nil {
+ ctx, result, err = feature.AfterAction(ctx, result)
+ }
+ }
+
+ if err != nil {
+ return ctx, nil, &FeatureApplyError{
+ err: err,
+ featureName: feature.Name,
+ mode: mode,
+ }
+ }
+ }
+ return ctx, result, nil
+}
diff --git a/vendor/github.com/google/yamlfmt/formatter.go b/vendor/github.com/google/yamlfmt/formatter.go
new file mode 100644
index 0000000..ce432ee
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/formatter.go
@@ -0,0 +1,65 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yamlfmt
+
+import "fmt"
+
+type Formatter interface {
+ Type() string
+ Format(yamlContent []byte) ([]byte, error)
+ ConfigMap() (map[string]any, error)
+}
+
+type Factory interface {
+ Type() string
+ NewFormatter(config map[string]interface{}) (Formatter, error)
+}
+
+type Registry struct {
+ registry map[string]Factory
+ defaultType string
+}
+
+func NewFormatterRegistry(defaultFactory Factory) *Registry {
+ return &Registry{
+ registry: map[string]Factory{
+ defaultFactory.Type(): defaultFactory,
+ },
+ defaultType: defaultFactory.Type(),
+ }
+}
+
+func (r *Registry) Add(f Factory) {
+ r.registry[f.Type()] = f
+}
+
+func (r *Registry) GetFactory(fType string) (Factory, error) {
+ if fType == "" {
+ return r.GetDefaultFactory()
+ }
+ factory, ok := r.registry[fType]
+ if !ok {
+ return nil, fmt.Errorf("no formatter registered with type \"%s\"", fType)
+ }
+ return factory, nil
+}
+
+func (r *Registry) GetDefaultFactory() (Factory, error) {
+ factory, ok := r.registry[r.defaultType]
+ if !ok {
+ return nil, fmt.Errorf("no default formatter registered for type \"%s\"", r.defaultType)
+ }
+ return factory, nil
+}
diff --git a/vendor/github.com/google/yamlfmt/formatters/basic/README.md b/vendor/github.com/google/yamlfmt/formatters/basic/README.md
new file mode 100644
index 0000000..af50a64
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/formatters/basic/README.md
@@ -0,0 +1,3 @@
+# Basic Formatter
+
+For formatter settings, see [the configuration docs](../../docs/config-file.md).
diff --git a/vendor/github.com/google/yamlfmt/formatters/basic/anchors/check.go b/vendor/github.com/google/yamlfmt/formatters/basic/anchors/check.go
new file mode 100644
index 0000000..aef3070
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/formatters/basic/anchors/check.go
@@ -0,0 +1,37 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package anchors
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/braydonk/yaml"
+)
+
+func Check(n yaml.Node) error {
+ if n.Kind == yaml.AliasNode {
+ return errors.New("alias node found")
+ }
+ if n.Anchor != "" {
+ return fmt.Errorf("node references anchor %q", n.Anchor)
+ }
+ for _, c := range n.Content {
+ if err := Check(*c); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/google/yamlfmt/formatters/basic/config.go b/vendor/github.com/google/yamlfmt/formatters/basic/config.go
new file mode 100644
index 0000000..82e67eb
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/formatters/basic/config.go
@@ -0,0 +1,52 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package basic
+
+import (
+ "runtime"
+
+ "github.com/google/yamlfmt"
+)
+
+type Config struct {
+ Indent int `mapstructure:"indent"`
+ IncludeDocumentStart bool `mapstructure:"include_document_start"`
+ LineEnding yamlfmt.LineBreakStyle `mapstructure:"line_ending"`
+ LineLength int `mapstructure:"max_line_length"`
+ RetainLineBreaks bool `mapstructure:"retain_line_breaks"`
+ RetainLineBreaksSingle bool `mapstructure:"retain_line_breaks_single"`
+ DisallowAnchors bool `mapstructure:"disallow_anchors"`
+ ScanFoldedAsLiteral bool `mapstructure:"scan_folded_as_literal"`
+ IndentlessArrays bool `mapstructure:"indentless_arrays"`
+ DropMergeTag bool `mapstructure:"drop_merge_tag"`
+ PadLineComments int `mapstructure:"pad_line_comments"`
+ TrimTrailingWhitespace bool `mapstructure:"trim_trailing_whitespace"`
+ EOFNewline bool `mapstructure:"eof_newline"`
+ StripDirectives bool `mapstructure:"strip_directives"`
+ ArrayIndent int `mapstructure:"array_indent"`
+ IndentRootArray bool `mapstructure:"indent_root_array"`
+}
+
+func DefaultConfig() *Config {
+ lineBreakStyle := yamlfmt.LineBreakStyleLF
+ if runtime.GOOS == "windows" {
+ lineBreakStyle = yamlfmt.LineBreakStyleCRLF
+ }
+ return &Config{
+ Indent: 2,
+ LineEnding: lineBreakStyle,
+ PadLineComments: 1,
+ }
+}
diff --git a/vendor/github.com/google/yamlfmt/formatters/basic/errors.go b/vendor/github.com/google/yamlfmt/formatters/basic/errors.go
new file mode 100644
index 0000000..02c0a89
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/formatters/basic/errors.go
@@ -0,0 +1,33 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package basic
+
+import "fmt"
+
+type BasicFormatterError struct {
+ err error
+}
+
+func (e BasicFormatterError) Error() string {
+ return fmt.Sprintf("basic formatter error: %v", e.err)
+}
+
+func (e BasicFormatterError) Unwrap() error {
+ return e.err
+}
+
+// func wrapBasicFormatterError(err error) error {
+// return BasicFormatterError{err: err}
+// }
diff --git a/vendor/github.com/google/yamlfmt/formatters/basic/factory.go b/vendor/github.com/google/yamlfmt/formatters/basic/factory.go
new file mode 100644
index 0000000..eb536b0
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/formatters/basic/factory.go
@@ -0,0 +1,45 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package basic
+
+import (
+ "github.com/google/yamlfmt"
+ "github.com/mitchellh/mapstructure"
+)
+
+type BasicFormatterFactory struct{}
+
+func (f *BasicFormatterFactory) Type() string {
+ return BasicFormatterType
+}
+
+func (f *BasicFormatterFactory) NewFormatter(configData map[string]interface{}) (yamlfmt.Formatter, error) {
+ config := DefaultConfig()
+ if configData != nil {
+ err := mapstructure.Decode(configData, &config)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return newFormatter(config), nil
+}
+
+func newFormatter(config *Config) yamlfmt.Formatter {
+ return &BasicFormatter{
+ Config: config,
+ Features: ConfigureFeaturesFromConfig(config),
+ YAMLFeatures: ConfigureYAMLFeaturesFromConfig(config),
+ }
+}
diff --git a/vendor/github.com/google/yamlfmt/formatters/basic/features.go b/vendor/github.com/google/yamlfmt/formatters/basic/features.go
new file mode 100644
index 0000000..de736f4
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/formatters/basic/features.go
@@ -0,0 +1,78 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package basic
+
+import (
+ "github.com/braydonk/yaml"
+ "github.com/google/yamlfmt"
+ "github.com/google/yamlfmt/formatters/basic/anchors"
+ "github.com/google/yamlfmt/internal/features"
+ "github.com/google/yamlfmt/internal/hotfix"
+)
+
+func ConfigureFeaturesFromConfig(config *Config) yamlfmt.FeatureList {
+ lineSep, err := config.LineEnding.Separator()
+ if err != nil {
+ lineSep = "\n"
+ }
+ configuredFeatures := []yamlfmt.Feature{}
+ if config.RetainLineBreaks || config.RetainLineBreaksSingle {
+ configuredFeatures = append(
+ configuredFeatures,
+ hotfix.MakeFeatureRetainLineBreak(lineSep, config.RetainLineBreaksSingle),
+ )
+ }
+ if config.TrimTrailingWhitespace {
+ configuredFeatures = append(
+ configuredFeatures,
+ features.MakeFeatureTrimTrailingWhitespace(lineSep),
+ )
+ }
+ if config.EOFNewline {
+ configuredFeatures = append(
+ configuredFeatures,
+ features.MakeFeatureEOFNewline(lineSep),
+ )
+ }
+ if config.StripDirectives {
+ configuredFeatures = append(
+ configuredFeatures,
+ hotfix.MakeFeatureStripDirectives(lineSep),
+ )
+ }
+ return configuredFeatures
+}
+
+// These features will directly use the `yaml.Node` type and
+// as such are specific to this formatter.
+type YAMLFeatureFunc func(yaml.Node) error
+type YAMLFeatureList []YAMLFeatureFunc
+
+func (y YAMLFeatureList) ApplyFeatures(node yaml.Node) error {
+ for _, f := range y {
+ if err := f(node); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func ConfigureYAMLFeaturesFromConfig(config *Config) YAMLFeatureList {
+ var features YAMLFeatureList
+ if config.DisallowAnchors {
+ features = append(features, anchors.Check)
+ }
+ return features
+}
diff --git a/vendor/github.com/google/yamlfmt/formatters/basic/formatter.go b/vendor/github.com/google/yamlfmt/formatters/basic/formatter.go
new file mode 100644
index 0000000..26bcc45
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/formatters/basic/formatter.go
@@ -0,0 +1,133 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package basic
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+
+ "github.com/braydonk/yaml"
+ "github.com/google/yamlfmt"
+ "github.com/mitchellh/mapstructure"
+)
+
+const BasicFormatterType string = "basic"
+
+type BasicFormatter struct {
+ Config *Config
+ Features yamlfmt.FeatureList
+ YAMLFeatures YAMLFeatureList
+}
+
+// yamlfmt.Formatter interface
+
+func (f *BasicFormatter) Type() string {
+ return BasicFormatterType
+}
+
+func (f *BasicFormatter) Format(input []byte) ([]byte, error) {
+ // Run all features with BeforeActions
+ ctx := context.Background()
+ ctx, yamlContent, err := f.Features.ApplyFeatures(ctx, input, yamlfmt.FeatureApplyBefore)
+ if err != nil {
+ return nil, err
+ }
+
+ // Format the yaml content
+ reader := bytes.NewReader(yamlContent)
+ decoder := f.getNewDecoder(reader)
+ documents := []yaml.Node{}
+ for {
+ var docNode yaml.Node
+ err := decoder.Decode(&docNode)
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ return nil, err
+ }
+ documents = append(documents, docNode)
+ }
+
+ // Run all YAML features.
+ for _, d := range documents {
+ if err := f.YAMLFeatures.ApplyFeatures(d); err != nil {
+ return nil, err
+ }
+ }
+
+ var b bytes.Buffer
+ e := f.getNewEncoder(&b)
+ for _, doc := range documents {
+ err := e.Encode(&doc)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Run all features with AfterActions
+ _, resultYaml, err := f.Features.ApplyFeatures(ctx, b.Bytes(), yamlfmt.FeatureApplyAfter)
+ if err != nil {
+ return nil, err
+ }
+
+ return resultYaml, nil
+}
+
+func (f *BasicFormatter) getNewDecoder(reader io.Reader) *yaml.Decoder {
+ d := yaml.NewDecoder(reader)
+ if f.Config.ScanFoldedAsLiteral {
+ d.SetScanBlockScalarAsLiteral(true)
+ }
+ return d
+}
+
+func (f *BasicFormatter) getNewEncoder(buf *bytes.Buffer) *yaml.Encoder {
+ e := yaml.NewEncoder(buf)
+ e.SetIndent(f.Config.Indent)
+
+ if f.Config.LineLength > 0 {
+ e.SetWidth(f.Config.LineLength)
+ }
+
+ if f.Config.LineEnding == yamlfmt.LineBreakStyleCRLF {
+ e.SetLineBreakStyle(yaml.LineBreakStyleCRLF)
+ }
+
+ e.SetExplicitDocumentStart(f.Config.IncludeDocumentStart)
+ e.SetAssumeBlockAsLiteral(f.Config.ScanFoldedAsLiteral)
+ e.SetIndentlessBlockSequence(f.Config.IndentlessArrays)
+ e.SetDropMergeTag(f.Config.DropMergeTag)
+ e.SetPadLineComments(f.Config.PadLineComments)
+
+ if f.Config.ArrayIndent > 0 {
+ e.SetArrayIndent(f.Config.ArrayIndent)
+ }
+ e.SetIndentRootArray(f.Config.IndentRootArray)
+
+ return e
+}
+
+func (f *BasicFormatter) ConfigMap() (map[string]any, error) {
+ configMap := map[string]any{}
+ err := mapstructure.Decode(f.Config, &configMap)
+ if err != nil {
+ return nil, err
+ }
+ configMap["type"] = BasicFormatterType
+ return configMap, err
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/collections/errors.go b/vendor/github.com/google/yamlfmt/internal/collections/errors.go
new file mode 100644
index 0000000..c800700
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/collections/errors.go
@@ -0,0 +1,34 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package collections
+
+import "errors"
+
+type Errors []error
+
+func (errs Errors) Combine() error {
+ errMessage := ""
+
+ for _, err := range errs {
+ if err != nil {
+ errMessage += err.Error() + "\n"
+ }
+ }
+
+ if len(errMessage) == 0 {
+ return nil
+ }
+ return errors.New(errMessage)
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/collections/set.go b/vendor/github.com/google/yamlfmt/internal/collections/set.go
new file mode 100644
index 0000000..97f70bc
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/collections/set.go
@@ -0,0 +1,71 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package collections
+
+type Set[T comparable] map[T]struct{}
+
+func (s Set[T]) Add(el ...T) {
+ for _, el := range el {
+ s[el] = struct{}{}
+ }
+}
+
+func (s Set[T]) Remove(el T) bool {
+ if !s.Contains(el) {
+ return false
+ }
+ delete(s, el)
+ return true
+}
+
+func (s Set[T]) Contains(el T) bool {
+ _, ok := s[el]
+ return ok
+}
+
+func (s Set[T]) ToSlice() []T {
+ sl := []T{}
+ for el := range s {
+ sl = append(sl, el)
+ }
+ return sl
+}
+
+func (s Set[T]) Clone() Set[T] {
+ newSet := Set[T]{}
+ for el := range s {
+ newSet.Add(el)
+ }
+ return newSet
+}
+
+func (s Set[T]) Equals(rhs Set[T]) bool {
+ if len(s) != len(rhs) {
+ return false
+ }
+ rhsClone := rhs.Clone()
+ for el := range s {
+ rhsClone.Remove(el)
+ }
+ return len(rhsClone) == 0
+}
+
+func SliceToSet[T comparable](sl []T) Set[T] {
+ set := Set[T]{}
+ for _, el := range sl {
+ set.Add(el)
+ }
+ return set
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/collections/slice.go b/vendor/github.com/google/yamlfmt/internal/collections/slice.go
new file mode 100644
index 0000000..b4a9f3b
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/collections/slice.go
@@ -0,0 +1,24 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package collections
+
+func SliceContains[T comparable](haystack []T, needle T) bool {
+ for _, e := range haystack {
+ if e == needle {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/features/eof_newline.go b/vendor/github.com/google/yamlfmt/internal/features/eof_newline.go
new file mode 100644
index 0000000..d77c390
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/features/eof_newline.go
@@ -0,0 +1,39 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package features
+
+import (
+ "context"
+
+ "github.com/google/yamlfmt"
+)
+
+func MakeFeatureEOFNewline(linebreakStr string) yamlfmt.Feature {
+ return yamlfmt.Feature{
+ Name: "EOF Newline",
+ AfterAction: eofNewlineFeature(linebreakStr),
+ }
+}
+
+func eofNewlineFeature(linebreakStr string) yamlfmt.FeatureFunc {
+ return func(_ context.Context, content []byte) (context.Context, []byte, error) {
+ // This check works in both linebreak modes.
+ if len(content) == 0 || content[len(content)-1] != '\n' {
+ linebreakBytes := []byte(linebreakStr)
+ content = append(content, linebreakBytes...)
+ }
+ return nil, content, nil
+ }
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/features/trim_whitespace.go b/vendor/github.com/google/yamlfmt/internal/features/trim_whitespace.go
new file mode 100644
index 0000000..7b5bd63
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/features/trim_whitespace.go
@@ -0,0 +1,43 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package features
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "strings"
+
+ "github.com/google/yamlfmt"
+)
+
+func MakeFeatureTrimTrailingWhitespace(linebreakStr string) yamlfmt.Feature {
+ return yamlfmt.Feature{
+ Name: "Trim Trailing Whitespace",
+ BeforeAction: trimTrailingWhitespaceFeature(linebreakStr),
+ }
+}
+
+func trimTrailingWhitespaceFeature(linebreakStr string) yamlfmt.FeatureFunc {
+ return func(_ context.Context, content []byte) (context.Context, []byte, error) {
+ buf := bytes.NewBuffer(content)
+ s := bufio.NewScanner(buf)
+ newLines := []string{}
+ for s.Scan() {
+ newLines = append(newLines, strings.TrimRight(s.Text(), " "))
+ }
+ return nil, []byte(strings.Join(newLines, linebreakStr)), nil
+ }
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/gitlab/codequality.go b/vendor/github.com/google/yamlfmt/internal/gitlab/codequality.go
new file mode 100644
index 0000000..e03de2d
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/gitlab/codequality.go
@@ -0,0 +1,79 @@
+// Copyright 2024 GitLab, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package gitlab generates GitLab Code Quality reports.
+package gitlab
+
+import (
+ "crypto/sha256"
+ "fmt"
+
+ "github.com/google/yamlfmt"
+)
+
+// CodeQuality represents a single code quality finding.
+//
+// Documentation: https://docs.gitlab.com/ee/ci/testing/code_quality.html#code-quality-report-format
+type CodeQuality struct {
+ Description string `json:"description,omitempty"`
+ Name string `json:"check_name,omitempty"`
+ Fingerprint string `json:"fingerprint,omitempty"`
+ Severity Severity `json:"severity,omitempty"`
+ Location Location `json:"location,omitempty"`
+}
+
+// Location is the location of a Code Quality finding.
+type Location struct {
+ Path string `json:"path,omitempty"`
+}
+
+// NewCodeQuality creates a new CodeQuality object from a yamlfmt.FileDiff.
+//
+// If the file did not change, i.e. the diff is empty, an empty struct and false is returned.
+func NewCodeQuality(diff yamlfmt.FileDiff) (CodeQuality, bool) {
+ if !diff.Diff.Changed() {
+ return CodeQuality{}, false
+ }
+
+ return CodeQuality{
+ Description: "Not formatted correctly, run yamlfmt to resolve.",
+ Name: "yamlfmt",
+ Fingerprint: fingerprint(diff),
+ Severity: Major,
+ Location: Location{
+ Path: diff.Path,
+ },
+ }, true
+}
+
+// fingerprint returns a 256-bit SHA256 hash of the original unformatted file.
+// This is used to uniquely identify a code quality finding.
+func fingerprint(diff yamlfmt.FileDiff) string {
+ hash := sha256.New()
+
+ fmt.Fprint(hash, diff.Diff.Original)
+
+ return fmt.Sprintf("%x", hash.Sum(nil)) //nolint:perfsprint
+}
+
+// Severity is the severity of a code quality finding.
+type Severity string
+
+const (
+ Info Severity = "info"
+ Minor Severity = "minor"
+ Major Severity = "major"
+ Critical Severity = "critical"
+ Blocker Severity = "blocker"
+)
diff --git a/vendor/github.com/google/yamlfmt/internal/hotfix/retain_line_break.go b/vendor/github.com/google/yamlfmt/internal/hotfix/retain_line_break.go
new file mode 100644
index 0000000..a7a139e
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/hotfix/retain_line_break.go
@@ -0,0 +1,104 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The features in this file are to retain line breaks.
+// The basic idea is to insert/remove placeholder comments in the yaml document before and after the format process.
+
+package hotfix
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "strings"
+
+ "github.com/google/yamlfmt"
+)
+
+const lineBreakPlaceholder = "#magic___^_^___line"
+
+type paddinger struct {
+ strings.Builder
+}
+
+func (p *paddinger) adjust(txt string) {
+ var indentSize int
+ for i := 0; i < len(txt) && txt[i] == ' '; i++ { // yaml only allows space to indent.
+ indentSize++
+ }
+ // Grows if the given size is larger than us and always return the max padding.
+ for diff := indentSize - p.Len(); diff > 0; diff-- {
+ p.WriteByte(' ')
+ }
+}
+
+func MakeFeatureRetainLineBreak(linebreakStr string, chomp bool) yamlfmt.Feature {
+ return yamlfmt.Feature{
+ Name: "Retain Line Breaks",
+ BeforeAction: replaceLineBreakFeature(linebreakStr, chomp),
+ AfterAction: restoreLineBreakFeature(linebreakStr),
+ }
+}
+
+func replaceLineBreakFeature(newlineStr string, chomp bool) yamlfmt.FeatureFunc {
+ return func(_ context.Context, content []byte) (context.Context, []byte, error) {
+ var buf bytes.Buffer
+ reader := bytes.NewReader(content)
+ scanner := bufio.NewScanner(reader)
+ var inLineBreaks bool
+ var padding paddinger
+ for scanner.Scan() {
+ txt := scanner.Text()
+ padding.adjust(txt)
+ if strings.TrimSpace(txt) == "" { // line break or empty space line.
+ if chomp && inLineBreaks {
+ continue
+ }
+ buf.WriteString(padding.String()) // prepend some padding incase literal multiline strings.
+ buf.WriteString(lineBreakPlaceholder)
+ buf.WriteString(newlineStr)
+ inLineBreaks = true
+ } else {
+ buf.WriteString(txt)
+ buf.WriteString(newlineStr)
+ inLineBreaks = false
+ }
+ }
+ return nil, buf.Bytes(), scanner.Err()
+ }
+}
+
+func restoreLineBreakFeature(newlineStr string) yamlfmt.FeatureFunc {
+ return func(_ context.Context, content []byte) (context.Context, []byte, error) {
+ var buf bytes.Buffer
+ reader := bytes.NewReader(content)
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ txt := scanner.Text()
+ if strings.TrimSpace(txt) == "" {
+ // The basic yaml lib inserts newline when there is a comment(either placeholder or by user)
+ // followed by optional line breaks and a `---` multi-documents.
+ // To fix it, the empty line could only be inserted by us.
+ continue
+ }
+ if strings.HasPrefix(strings.TrimLeft(txt, " "), lineBreakPlaceholder) {
+ buf.WriteString(newlineStr)
+ continue
+ }
+ buf.WriteString(txt)
+ buf.WriteString(newlineStr)
+ }
+ return nil, buf.Bytes(), scanner.Err()
+ }
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/hotfix/strip_directives.go b/vendor/github.com/google/yamlfmt/internal/hotfix/strip_directives.go
new file mode 100644
index 0000000..63e1c6e
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/hotfix/strip_directives.go
@@ -0,0 +1,101 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package hotfix
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "strings"
+
+ "github.com/google/yamlfmt"
+)
+
+type directiveKey string
+
+var contextDirectivesKey directiveKey = "directives"
+
+type Directive struct {
+ line int
+ content string
+}
+
+func ContextWithDirectives(ctx context.Context, directives []Directive) context.Context {
+ return context.WithValue(ctx, contextDirectivesKey, directives)
+}
+
+func DirectivesFromContext(ctx context.Context) []Directive {
+ return ctx.Value(contextDirectivesKey).([]Directive)
+}
+
+func MakeFeatureStripDirectives(lineSepChar string) yamlfmt.Feature {
+ return yamlfmt.Feature{
+ Name: "Strip Directives",
+ BeforeAction: stripDirectivesFeature(lineSepChar),
+ AfterAction: restoreDirectivesFeature(lineSepChar),
+ }
+}
+
+func stripDirectivesFeature(lineSepChar string) yamlfmt.FeatureFunc {
+ return func(ctx context.Context, content []byte) (context.Context, []byte, error) {
+ directives := []Directive{}
+ reader := bytes.NewReader(content)
+ scanner := bufio.NewScanner(reader)
+ result := ""
+ currLine := 1
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "%") {
+ directives = append(directives, Directive{
+ line: currLine,
+ content: line,
+ })
+ } else {
+ result += line + lineSepChar
+ }
+ currLine++
+ }
+ return ContextWithDirectives(ctx, directives), []byte(result), nil
+ }
+}
+
+func restoreDirectivesFeature(lineSepChar string) yamlfmt.FeatureFunc {
+ return func(ctx context.Context, content []byte) (context.Context, []byte, error) {
+ directives := DirectivesFromContext(ctx)
+ directiveIdx := 0
+ doneDirectives := directiveIdx == len(directives)
+ reader := bytes.NewReader(content)
+ scanner := bufio.NewScanner(reader)
+ result := ""
+ currLine := 1
+ for scanner.Scan() {
+ if !doneDirectives && currLine == directives[directiveIdx].line {
+ result += directives[directiveIdx].content + lineSepChar
+ currLine++
+ directiveIdx++
+ doneDirectives = directiveIdx == len(directives)
+ }
+ result += scanner.Text() + lineSepChar
+ currLine++
+ }
+ // Edge case: There technically can be a directive as the final line. This would be
+ // useless as far as I can tell so maybe yamlfmt should just remove it anyway LOL but
+ // no we'll keep it.
+ if !doneDirectives && currLine == directives[directiveIdx].line {
+ result += directives[directiveIdx].content + lineSepChar
+ }
+ return ctx, []byte(result), nil
+ }
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/logger/debug.go b/vendor/github.com/google/yamlfmt/internal/logger/debug.go
new file mode 100644
index 0000000..a75463c
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/logger/debug.go
@@ -0,0 +1,51 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logger
+
+import (
+ "fmt"
+
+ "github.com/google/yamlfmt/internal/collections"
+)
+
+type DebugCode int
+
+const (
+ DebugCodeAny DebugCode = iota
+ DebugCodeConfig
+ DebugCodePaths
+)
+
+var (
+ supportedDebugCodes = map[string][]DebugCode{
+ "config": {DebugCodeConfig},
+ "paths": {DebugCodePaths},
+ "all": {DebugCodeConfig, DebugCodePaths},
+ }
+ activeDebugCodes = collections.Set[DebugCode]{}
+)
+
+func ActivateDebugCode(code string) {
+ if debugCodes, ok := supportedDebugCodes[code]; ok {
+ activeDebugCodes.Add(debugCodes...)
+ }
+}
+
+// Debug prints a message if the given debug code is active.
+func Debug(code DebugCode, msg string, args ...any) {
+ if activeDebugCodes.Contains(code) {
+ fmt.Printf("[DEBUG]: %s\n", fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/google/yamlfmt/internal/multilinediff/multilinediff.go b/vendor/github.com/google/yamlfmt/internal/multilinediff/multilinediff.go
new file mode 100644
index 0000000..dea8cc6
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/internal/multilinediff/multilinediff.go
@@ -0,0 +1,130 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package multilinediff
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/google/go-cmp/cmp"
+ "github.com/google/go-cmp/cmp/cmpopts"
+)
+
+// Get the diff between two strings.
+func Diff(a, b, lineSep string) (string, int) {
+ reporter := Reporter{LineSep: lineSep}
+ cmp.Diff(
+ a, b,
+ cmpopts.AcyclicTransformer("multiline", func(s string) []string {
+ return strings.Split(s, lineSep)
+ }),
+ cmp.Reporter(&reporter),
+ )
+ return reporter.String(), reporter.DiffCount
+}
+
+type diffType int
+
+const (
+ diffTypeEqual diffType = iota
+ diffTypeChange
+ diffTypeAdd
+)
+
+type diffLine struct {
+ diff diffType
+ old string
+ new string
+}
+
+func (l diffLine) toLine(length int) string {
+ line := ""
+
+ switch l.diff {
+ case diffTypeChange:
+ line += "- "
+ case diffTypeAdd:
+ line += "+ "
+ default:
+ line += " "
+ }
+
+ line += l.old
+
+ for i := 0; i < length-len(l.old); i++ {
+ line += " "
+ }
+
+ line += " "
+
+ line += l.new
+
+ return line
+}
+
+// A pretty reporter to pass into cmp.Diff using the cmd.Reporter function.
+type Reporter struct {
+ LineSep string
+ DiffCount int
+
+ path cmp.Path
+ lines []diffLine
+}
+
+func (r *Reporter) PushStep(ps cmp.PathStep) {
+ r.path = append(r.path, ps)
+}
+
+func (r *Reporter) Report(rs cmp.Result) {
+ line := diffLine{}
+ vOld, vNew := r.path.Last().Values()
+ if !rs.Equal() {
+ r.DiffCount++
+ if vOld.IsValid() {
+ line.diff = diffTypeChange
+ line.old = fmt.Sprintf("%+v", vOld)
+ }
+ if vNew.IsValid() {
+ if line.diff == diffTypeEqual {
+ line.diff = diffTypeAdd
+ }
+ line.new = fmt.Sprintf("%+v", vNew)
+ }
+ } else {
+ line.old = fmt.Sprintf("%+v", vOld)
+ line.new = fmt.Sprintf("%+v", vOld)
+ }
+ r.lines = append(r.lines, line)
+}
+
+func (r *Reporter) PopStep() {
+ r.path = r.path[:len(r.path)-1]
+}
+
+func (r *Reporter) String() string {
+ maxLen := 0
+ for _, l := range r.lines {
+ if len(l.old) > maxLen {
+ maxLen = len(l.old)
+ }
+ }
+
+ diffLines := []string{}
+ for _, l := range r.lines {
+ diffLines = append(diffLines, l.toLine(maxLen))
+ }
+
+ return strings.Join(diffLines, r.LineSep)
+}
diff --git a/vendor/github.com/google/yamlfmt/linebreak.go b/vendor/github.com/google/yamlfmt/linebreak.go
new file mode 100644
index 0000000..2012aab
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/linebreak.go
@@ -0,0 +1,42 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yamlfmt
+
+import "fmt"
+
+type LineBreakStyle string
+
+const (
+ LineBreakStyleLF LineBreakStyle = "lf"
+ LineBreakStyleCRLF LineBreakStyle = "crlf"
+)
+
+type UnsupportedLineBreakError struct {
+ style LineBreakStyle
+}
+
+func (e UnsupportedLineBreakError) Error() string {
+ return fmt.Sprintf("unsupported line break style %s, see package documentation for supported styles", e.style)
+}
+
+func (s LineBreakStyle) Separator() (string, error) {
+ switch s {
+ case LineBreakStyleLF:
+ return "\n", nil
+ case LineBreakStyleCRLF:
+ return "\r\n", nil
+ }
+ return "", UnsupportedLineBreakError{style: s}
+}
diff --git a/vendor/github.com/google/yamlfmt/metadata.go b/vendor/github.com/google/yamlfmt/metadata.go
new file mode 100644
index 0000000..82abfc9
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/metadata.go
@@ -0,0 +1,114 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yamlfmt
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode"
+
+ "github.com/google/yamlfmt/internal/collections"
+)
+
+const MetadataIdentifier = "!yamlfmt!"
+
+type MetadataType string
+
+const (
+ MetadataIgnore MetadataType = "ignore"
+)
+
+func IsMetadataType(mdValueStr string) bool {
+ mdTypes := collections.Set[MetadataType]{}
+ mdTypes.Add(MetadataIgnore)
+ return mdTypes.Contains(MetadataType(mdValueStr))
+}
+
+type Metadata struct {
+ Type MetadataType
+ LineNum int
+}
+
+var (
+ ErrMalformedMetadata = errors.New("metadata: malformed string")
+ ErrUnrecognizedMetadata = errors.New("metadata: unrecognized type")
+)
+
+type MetadataError struct {
+ err error
+ path string
+ lineNum int
+ lineStr string
+}
+
+func (e *MetadataError) Error() string {
+ return fmt.Sprintf(
+ "%v: %s:%d:%s",
+ e.err,
+ e.path,
+ e.lineNum,
+ e.lineStr,
+ )
+}
+
+func (e *MetadataError) Unwrap() error {
+ return e.err
+}
+
+func ReadMetadata(content []byte, path string) (collections.Set[Metadata], collections.Errors) {
+ metadata := collections.Set[Metadata]{}
+ mdErrs := collections.Errors{}
+ // This could be `\r\n` but it won't affect the outcome of this operation.
+ contentLines := strings.Split(string(content), "\n")
+ for i, line := range contentLines {
+ mdidIndex := strings.Index(line, MetadataIdentifier)
+ if mdidIndex == -1 {
+ continue
+ }
+ mdStr := scanMetadata(line, mdidIndex)
+ mdComponents := strings.Split(mdStr, ":")
+ if len(mdComponents) != 2 {
+ mdErrs = append(mdErrs, &MetadataError{
+ path: path,
+ lineNum: i + 1,
+ err: ErrMalformedMetadata,
+ lineStr: line,
+ })
+ continue
+ }
+ if IsMetadataType(mdComponents[1]) {
+ metadata.Add(Metadata{LineNum: i + 1, Type: MetadataType(mdComponents[1])})
+ } else {
+ mdErrs = append(mdErrs, &MetadataError{
+ path: path,
+ lineNum: i + 1,
+ err: ErrUnrecognizedMetadata,
+ lineStr: line,
+ })
+ }
+ }
+ return metadata, mdErrs
+}
+
+func scanMetadata(line string, index int) string {
+ mdBytes := []byte{}
+ i := index
+ for i < len(line) && !unicode.IsSpace(rune(line[i])) {
+ mdBytes = append(mdBytes, line[i])
+ i++
+ }
+ return string(mdBytes)
+}
diff --git a/vendor/github.com/google/yamlfmt/path_collector.go b/vendor/github.com/google/yamlfmt/path_collector.go
new file mode 100644
index 0000000..7967f3c
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/path_collector.go
@@ -0,0 +1,336 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yamlfmt
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/bmatcuk/doublestar/v4"
+ "github.com/google/yamlfmt/internal/collections"
+ "github.com/google/yamlfmt/internal/logger"
+ ignore "github.com/sabhiram/go-gitignore"
+)
+
+type MatchType string
+
+const (
+ MatchTypeStandard MatchType = "standard"
+ MatchTypeDoublestar MatchType = "doublestar"
+ MatchTypeGitignore MatchType = "gitignore"
+)
+
+type PathCollector interface {
+ CollectPaths() ([]string, error)
+}
+
+type FilepathCollector struct {
+ Include []string
+ Exclude []string
+ Extensions []string
+}
+
+func (c *FilepathCollector) CollectPaths() ([]string, error) {
+ logger.Debug(logger.DebugCodePaths, "using file path matching. include patterns: %s", c.Include)
+ pathsFound := []string{}
+ for _, inclPath := range c.Include {
+ info, err := os.Stat(inclPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+ continue
+ }
+ if !info.IsDir() {
+ pathsFound = append(pathsFound, inclPath)
+ continue
+ }
+ paths, err := c.walkDirectoryForYaml(inclPath)
+ if err != nil {
+ return nil, err
+ }
+ pathsFound = append(pathsFound, paths...)
+ }
+ logger.Debug(logger.DebugCodePaths, "found paths: %s", pathsFound)
+
+ pathsFoundSet := collections.SliceToSet(pathsFound)
+ pathsToFormat := collections.SliceToSet(pathsFound)
+ for _, exclPath := range c.Exclude {
+ info, err := os.Stat(exclPath)
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+ continue
+ }
+
+ if info.IsDir() {
+ logger.Debug(logger.DebugCodePaths, "for exclude dir: %s", exclPath)
+ for foundPath := range pathsFoundSet {
+ if strings.HasPrefix(foundPath, exclPath) {
+ logger.Debug(logger.DebugCodePaths, "excluding %s", foundPath)
+ pathsToFormat.Remove(foundPath)
+ }
+ }
+ } else {
+ logger.Debug(logger.DebugCodePaths, "for exclude file: %s", exclPath)
+ removed := pathsToFormat.Remove(exclPath)
+ if removed {
+ logger.Debug(logger.DebugCodePaths, "found in paths, excluding")
+ }
+ }
+ }
+
+ pathsToFormatSlice := pathsToFormat.ToSlice()
+ logger.Debug(logger.DebugCodePaths, "paths to format: %s", pathsToFormat)
+ return pathsToFormatSlice, nil
+}
+
+func (c *FilepathCollector) walkDirectoryForYaml(dir string) ([]string, error) {
+ var paths []string
+ err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {
+ if info.IsDir() {
+ return nil
+ }
+
+ if c.extensionMatches(info.Name()) {
+ paths = append(paths, path)
+ }
+
+ return nil
+ })
+ return paths, err
+}
+
+func (c *FilepathCollector) extensionMatches(name string) bool {
+ for _, ext := range c.Extensions {
+ // Users may specify "yaml", but we only want to match ".yaml", not "buyaml".
+ if !strings.HasPrefix(ext, ".") {
+ ext = "." + ext
+ }
+
+ if strings.HasSuffix(name, ext) {
+ return true
+ }
+ }
+ return false
+}
+
+type DoublestarCollector struct {
+ Include []string
+ Exclude []string
+}
+
+func (c *DoublestarCollector) CollectPaths() ([]string, error) {
+ logger.Debug(logger.DebugCodePaths, "using doublestar path matching. include patterns: %s", c.Include)
+ includedPaths := []string{}
+ for _, pattern := range c.Include {
+ logger.Debug(logger.DebugCodePaths, "trying pattern: %s", pattern)
+ globMatches, err := doublestar.FilepathGlob(pattern)
+ if err != nil {
+ return nil, err
+ }
+ logger.Debug(logger.DebugCodePaths, "pattern %s matches: %s", pattern, globMatches)
+ includedPaths = append(includedPaths, globMatches...)
+ }
+
+ pathsToFormatSet := collections.Set[string]{}
+ for _, path := range includedPaths {
+ if len(c.Exclude) == 0 {
+ pathsToFormatSet.Add(path)
+ continue
+ }
+ excluded := false
+ logger.Debug(logger.DebugCodePaths, "calculating excludes for %s", path)
+ for _, pattern := range c.Exclude {
+ match, err := doublestar.PathMatch(filepath.Clean(pattern), path)
+ if err != nil {
+ return nil, err
+ }
+ if match {
+ logger.Debug(logger.DebugCodePaths, "pattern %s matched, excluding", pattern)
+ excluded = true
+ break
+ }
+ logger.Debug(logger.DebugCodePaths, "pattern %s did not match path", pattern)
+ }
+ if !excluded {
+ logger.Debug(logger.DebugCodePaths, "path %s included", path)
+ pathsToFormatSet.Add(path)
+ }
+ }
+
+ pathsToFormat := pathsToFormatSet.ToSlice()
+ logger.Debug(logger.DebugCodePaths, "paths to format: %s", pathsToFormat)
+ return pathsToFormat, nil
+}
+
+func findGitIgnorePath(gitignorePath string) (string, error) {
+ // if path is absolute, check if exists and return
+ if filepath.IsAbs(gitignorePath) {
+ _, err := os.Stat(gitignorePath)
+ return gitignorePath, err
+ }
+
+ // if path is relative, search for it until the git root
+ dir, err := os.Getwd()
+ if err != nil {
+ return gitignorePath, fmt.Errorf("cannot get current working directory: %w", err)
+ }
+ for {
+ // check if gitignore is there
+ gitIgnore := filepath.Join(dir, gitignorePath)
+ if _, err := os.Stat(gitIgnore); err == nil {
+ return gitIgnore, nil
+ }
+
+ // check if we are at the git root directory
+ gitRoot := filepath.Join(dir, ".git")
+ if _, err := os.Stat(gitRoot); err == nil {
+ return gitignorePath, errors.New("gitignore not found")
+ }
+
+ // check if we are at the root of the filesystem
+ parent := filepath.Dir(dir)
+ if parent == dir {
+ return gitignorePath, errors.New("no git repository found")
+ }
+
+ // level up
+ dir = parent
+ }
+}
+
+func ExcludeWithGitignore(gitignorePath string, paths []string) ([]string, error) {
+ gitignorePath, err := findGitIgnorePath(gitignorePath)
+ if err != nil {
+ return nil, err
+ }
+ logger.Debug(logger.DebugCodePaths, "excluding paths with gitignore: %s", gitignorePath)
+ ignorer, err := ignore.CompileIgnoreFile(gitignorePath)
+ if err != nil {
+ return nil, err
+ }
+ pathsToFormat := []string{}
+ for _, path := range paths {
+ if ok, pattern := ignorer.MatchesPathHow(path); !ok {
+ pathsToFormat = append(pathsToFormat, path)
+ } else {
+ logger.Debug(logger.DebugCodePaths, "pattern %s matches %s, excluding", pattern.Line, path)
+ }
+ }
+ logger.Debug(logger.DebugCodePaths, "paths to format: %s", pathsToFormat)
+ return pathsToFormat, nil
+}
+
+const DefaultPatternFile = "yamlfmt.patterns"
+
+// PatternFileCollector determines which files to format and which to ignore based on a pattern file in gitignore(5) syntax.
+type PatternFileCollector struct {
+ fs fs.FS
+ matcher *ignore.GitIgnore
+}
+
+// NewPatternFileCollector initializes a new PatternFile using the provided file(s).
+// If multiple files are provided, their content is concatenated in order.
+// All patterns are relative to the current working directory.
+func NewPatternFileCollector(files ...string) (*PatternFileCollector, error) {
+ r, err := cat(files...)
+ if err != nil {
+ return nil, err
+ }
+
+ wd, err := os.Getwd()
+ if err != nil {
+ return nil, fmt.Errorf("os.Getwd: %w", err)
+ }
+
+ return NewPatternFileCollectorFS(r, os.DirFS(wd)), nil
+}
+
+// cat concatenates the contents of all files in its argument list.
+func cat(files ...string) (io.Reader, error) {
+ var b bytes.Buffer
+
+ for _, f := range files {
+ fh, err := os.Open(f)
+ if err != nil {
+ return nil, err
+ }
+ defer fh.Close()
+
+ if _, err := io.Copy(&b, fh); err != nil {
+ return nil, fmt.Errorf("copying %q: %w", f, err)
+ }
+ fh.Close()
+
+ // Append a newline to avoid issues with files lacking a newline at end-of-file.
+ fmt.Fprintln(&b)
+ }
+
+ return &b, nil
+}
+
+// NewPatternFileCollectorFS reads a pattern file from r and uses fs for file lookups.
+// It is used by NewPatternFile and primarily public because it is useful for testing.
+func NewPatternFileCollectorFS(r io.Reader, fs fs.FS) *PatternFileCollector {
+ var lines []string
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ lines = append(lines, s.Text())
+ }
+
+ return &PatternFileCollector{
+ fs: fs,
+ matcher: ignore.CompileIgnoreLines(lines...),
+ }
+}
+
+// CollectPaths implements the PathCollector interface.
+func (c *PatternFileCollector) CollectPaths() ([]string, error) {
+ var files []string
+
+ err := fs.WalkDir(c.fs, ".", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ ok, pattern := c.matcher.MatchesPathHow(path)
+ switch {
+ case ok && pattern.Negate && d.IsDir():
+ return fs.SkipDir
+ case ok && pattern.Negate:
+ return nil
+ case ok && d.Type().IsRegular():
+ files = append(files, path)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("WalkDir: %w", err)
+ }
+
+ return files, nil
+}
diff --git a/vendor/github.com/google/yamlfmt/schema.json b/vendor/github.com/google/yamlfmt/schema.json
new file mode 100644
index 0000000..65f5038
--- /dev/null
+++ b/vendor/github.com/google/yamlfmt/schema.json
@@ -0,0 +1,93 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "https://raw.githubusercontent.com/google/yamlfmt/main/schema.json",
+ "title": "yamlfmt config file",
+ "description": "The yamlfmt config file. For details, see https://github.com/google/yamlfmt/blob/main/docs/config-file.md.",
+ "type": "object",
+ "properties": {
+ "line_ending": {
+ "type": "string",
+ "enum": [
+ "lf",
+ "crlf"
+ ],
+ "default": "lf",
+ "description": "Parse and write the file with 'lf' or 'crlf' line endings. This global setting will override any formatter line_ending options."
+ },
+ "doublestar": {
+ "type": "boolean",
+ "default": false,
+ "description": "Use doublestar for include and exclude paths. (This was the default before 0.7.0)"
+ },
+ "continue_on_error": {
+ "type": "boolean",
+ "default": false,
+ "description": "Continue formatting and don't exit with code 1 when there is an invalid yaml file found."
+ },
+ "include": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "default": [],
+ "description": "The paths for the command to include for formatting. See Specifying Paths for more details."
+ },
+ "exclude": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "default": [],
+ "description": "The paths for the command to exclude from formatting. See Specifying Paths for more details."
+ },
+ "gitignore_excludes": {
+ "type": "boolean",
+ "default": false,
+ "description": "Use gitignore files for exclude paths. This is in addition to the patterns from the exclude option."
+ },
+ "gitignore_path": {
+ "type": "string",
+ "default": ".gitignore",
+ "description": "The path to the gitignore file to use."
+ },
+ "regex_exclude": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "default": [],
+ "description": "Regex patterns to match file contents for, if the file content matches the regex the file will be excluded. Use Go regexes."
+ },
+ "extensions": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "default": [],
+ "description": "The extensions to use for standard mode path collection. See Specifying Paths for more details."
+ },
+ "formatter": {
+ "type": "object",
+ "default": {
+ "type": "basic"
+ },
+ "description": "Formatter settings. See Formatter for more details.",
+ "properties": {
+ "type": {
+ "type": "string",
+ "default": "basic"
+ }
+ }
+ },
+ "output_format": {
+ "type": "string",
+ "enum": [
+ "default",
+ "line"
+ ],
+ "default": "default",
+ "description": "The output format to use. See Output docs for more details."
+ }
+ },
+ "additionalProperties": false
+}