summaryrefslogtreecommitdiff
path: root/vendor/github.com/jhump/protoreflect
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-05-20 14:28:06 -0600
committermo khan <mo@mokhan.ca>2025-05-23 14:49:19 -0600
commit4beee46dc6c7642316e118a4d3aa51e4b407256e (patch)
tree039bdf57b99061844aeb0fe55ad0bc1c864166af /vendor/github.com/jhump/protoreflect
parent0ba49bfbde242920d8675a193d7af89420456fc0 (diff)
feat: add external authorization service (authzd) with JWT authentication
- Add new authzd gRPC service implementing Envoy's external authorization API - Integrate JWT authentication filter in Envoy configuration with claim extraction - Update middleware to support both cookie-based and header-based user authentication - Add comprehensive test coverage for authorization service and server - Configure proper service orchestration with authzd, sparkled, and Envoy - Update build system and Docker configuration for multi-service deployment - Add grpcurl tool for gRPC service debugging and testing This enables fine-grained authorization control through Envoy's ext_authz filter while maintaining backward compatibility with existing cookie-based authentication.
Diffstat (limited to 'vendor/github.com/jhump/protoreflect')
-rw-r--r--vendor/github.com/jhump/protoreflect/LICENSE202
-rw-r--r--vendor/github.com/jhump/protoreflect/codec/codec.go218
-rw-r--r--vendor/github.com/jhump/protoreflect/codec/decode_fields.go318
-rw-r--r--vendor/github.com/jhump/protoreflect/codec/doc.go7
-rw-r--r--vendor/github.com/jhump/protoreflect/codec/encode_fields.go288
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/cache.go48
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/convert.go294
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/descriptor.go1847
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go30
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go59
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/doc.go70
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/imports.go324
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go75
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/internal/registry.go67
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/internal/source_info.go107
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/internal/util.go296
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/load.go258
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go716
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go27
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go154
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go659
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go236
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go134
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go199
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go103
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go200
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go361
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go86
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go305
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go273
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go29
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go575
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go497
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go16
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go122
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go804
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go175
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt6401
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go7
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoprint/message_literal.go315
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoprint/print.go2744
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/protoprint/sort.go439
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go207
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go340
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go314
-rw-r--r--vendor/github.com/jhump/protoreflect/desc/wrap.go211
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/binary.go193
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/doc.go167
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go2830
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/equal.go157
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/extension.go46
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go241
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go310
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/indent.go76
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/json.go1256
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go131
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go139
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/merge.go100
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/message_factory.go207
-rw-r--r--vendor/github.com/jhump/protoreflect/dynamic/text.go1177
-rw-r--r--vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go137
-rw-r--r--vendor/github.com/jhump/protoreflect/grpcreflect/client.go1018
-rw-r--r--vendor/github.com/jhump/protoreflect/grpcreflect/doc.go10
-rw-r--r--vendor/github.com/jhump/protoreflect/grpcreflect/server.go67
-rw-r--r--vendor/github.com/jhump/protoreflect/internal/codec/buffer.go118
-rw-r--r--vendor/github.com/jhump/protoreflect/internal/codec/decode.go346
-rw-r--r--vendor/github.com/jhump/protoreflect/internal/codec/encode.go147
-rw-r--r--vendor/github.com/jhump/protoreflect/internal/standard_files.go127
-rw-r--r--vendor/github.com/jhump/protoreflect/internal/unrecognized.go20
69 files changed, 30177 insertions, 0 deletions
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
new file mode 100644
index 0000000..b53b91d
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2017 Joshua Humphries
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/jhump/protoreflect/codec/codec.go b/vendor/github.com/jhump/protoreflect/codec/codec.go
new file mode 100644
index 0000000..7e5c568
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/codec.go
@@ -0,0 +1,218 @@
+package codec
+
+import (
+ "io"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/internal/codec"
+)
+
+// ErrOverflow is returned when an integer is too large to be represented.
+var ErrOverflow = codec.ErrOverflow
+
+// ErrBadWireType is returned when decoding a wire-type from a buffer that
+// is not valid.
+var ErrBadWireType = codec.ErrBadWireType
+
+// NB: much of the implementation is in an internal package, to avoid an import
+// cycle between this codec package and the desc package. We export it from
+// this package, but we can't use a type alias because we also need to add
+// methods to it, to broaden the exposed API.
+
+// Buffer is a reader and a writer that wraps a slice of bytes and also
+// provides API for decoding and encoding the protobuf binary format.
+//
+// Its operation is similar to that of a bytes.Buffer: writing pushes
+// data to the end of the buffer while reading pops data from the head
+// of the buffer. So the same buffer can be used to both read and write.
+type Buffer codec.Buffer
+
+// NewBuffer creates a new buffer with the given slice of bytes as the
+// buffer's initial contents.
+func NewBuffer(buf []byte) *Buffer {
+ return (*Buffer)(codec.NewBuffer(buf))
+}
+
+// SetDeterministic sets this buffer to encode messages deterministically. This
+// is useful for tests. But the overhead is non-zero, so it should not likely be
+// used outside of tests. When true, map fields in a message must have their
+// keys sorted before serialization to ensure deterministic output. Otherwise,
+// values in a map field will be serialized in map iteration order.
+func (cb *Buffer) SetDeterministic(deterministic bool) {
+ (*codec.Buffer)(cb).SetDeterministic(deterministic)
+}
+
+// IsDeterministic returns whether or not this buffer is configured to encode
+// messages deterministically.
+func (cb *Buffer) IsDeterministic() bool {
+ return (*codec.Buffer)(cb).IsDeterministic()
+}
+
+// Reset resets this buffer back to empty. Any subsequent writes/encodes
+// to the buffer will allocate a new backing slice of bytes.
+func (cb *Buffer) Reset() {
+ (*codec.Buffer)(cb).Reset()
+}
+
+// Bytes returns the slice of bytes remaining in the buffer. Note that
+// this does not perform a copy: if the contents of the returned slice
+// are modified, the modifications will be visible to subsequent reads
+// via the buffer.
+func (cb *Buffer) Bytes() []byte {
+ return (*codec.Buffer)(cb).Bytes()
+}
+
+// String returns the remaining bytes in the buffer as a string.
+func (cb *Buffer) String() string {
+ return (*codec.Buffer)(cb).String()
+}
+
+// EOF returns true if there are no more bytes remaining to read.
+func (cb *Buffer) EOF() bool {
+ return (*codec.Buffer)(cb).EOF()
+}
+
+// Skip attempts to skip the given number of bytes in the input. If
+// the input has fewer bytes than the given count, io.ErrUnexpectedEOF
+// is returned and the buffer is unchanged. Otherwise, the given number
+// of bytes are skipped and nil is returned.
+func (cb *Buffer) Skip(count int) error {
+ return (*codec.Buffer)(cb).Skip(count)
+
+}
+
+// Len returns the remaining number of bytes in the buffer.
+func (cb *Buffer) Len() int {
+ return (*codec.Buffer)(cb).Len()
+}
+
+// Read implements the io.Reader interface. If there are no bytes
+// remaining in the buffer, it will return 0, io.EOF. Otherwise,
+// it reads max(len(dest), cb.Len()) bytes from input and copies
+// them into dest. It returns the number of bytes copied and a nil
+// error in this case.
+func (cb *Buffer) Read(dest []byte) (int, error) {
+ return (*codec.Buffer)(cb).Read(dest)
+}
+
+var _ io.Reader = (*Buffer)(nil)
+
+// Write implements the io.Writer interface. It always returns
+// len(data), nil.
+func (cb *Buffer) Write(data []byte) (int, error) {
+ return (*codec.Buffer)(cb).Write(data)
+}
+
+var _ io.Writer = (*Buffer)(nil)
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) DecodeVarint() (uint64, error) {
+ return (*codec.Buffer)(cb).DecodeVarint()
+}
+
+// DecodeTagAndWireType decodes a field tag and wire type from input.
+// This reads a varint and then extracts the two fields from the varint
+// value read.
+func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) {
+ return (*codec.Buffer)(cb).DecodeTagAndWireType()
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) DecodeFixed64() (x uint64, err error) {
+ return (*codec.Buffer)(cb).DecodeFixed64()
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) DecodeFixed32() (x uint64, err error) {
+ return (*codec.Buffer)(cb).DecodeFixed32()
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ return (*codec.Buffer)(cb).DecodeRawBytes(alloc)
+}
+
+// ReadGroup reads the input until a "group end" tag is found
+// and returns the data up to that point. Subsequent reads from
+// the buffer will read data after the group end tag. If alloc
+// is true, the data is copied to a new slice before being returned.
+// Otherwise, the returned slice is a view into the buffer's
+// underlying byte slice.
+//
+// This function correctly handles nested groups: if a "group start"
+// tag is found, then that group's end tag will be included in the
+// returned data.
+func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) {
+ return (*codec.Buffer)(cb).ReadGroup(alloc)
+}
+
+// SkipGroup is like ReadGroup, except that it discards the
+// data and just advances the buffer to point to the input
+// right *after* the "group end" tag.
+func (cb *Buffer) SkipGroup() error {
+ return (*codec.Buffer)(cb).SkipGroup()
+}
+
+// SkipField attempts to skip the value of a field with the given wire
+// type. When consuming a protobuf-encoded stream, it can be called immediately
+// after DecodeTagAndWireType to discard the subsequent data for the field.
+func (cb *Buffer) SkipField(wireType int8) error {
+ return (*codec.Buffer)(cb).SkipField(wireType)
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) EncodeVarint(x uint64) error {
+ return (*codec.Buffer)(cb).EncodeVarint(x)
+}
+
+// EncodeTagAndWireType encodes the given field tag and wire type to the
+// buffer. This combines the two values and then writes them as a varint.
+func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error {
+ return (*codec.Buffer)(cb).EncodeTagAndWireType(tag, wireType)
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) EncodeFixed64(x uint64) error {
+ return (*codec.Buffer)(cb).EncodeFixed64(x)
+
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) EncodeFixed32(x uint64) error {
+ return (*codec.Buffer)(cb).EncodeFixed32(x)
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) EncodeRawBytes(b []byte) error {
+ return (*codec.Buffer)(cb).EncodeRawBytes(b)
+}
+
+// EncodeMessage writes the given message to the buffer.
+func (cb *Buffer) EncodeMessage(pm proto.Message) error {
+ return (*codec.Buffer)(cb).EncodeMessage(pm)
+}
+
+// EncodeDelimitedMessage writes the given message to the buffer with a
+// varint-encoded length prefix (the delimiter).
+func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error {
+ return (*codec.Buffer)(cb).EncodeDelimitedMessage(pm)
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
new file mode 100644
index 0000000..0edb817
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
@@ -0,0 +1,318 @@
+package codec
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+var varintTypes = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+
+func init() {
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_BOOL] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT32] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT64] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT32] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT64] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT32] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT64] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_ENUM] = true
+
+ fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED32] = true
+ fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED32] = true
+ fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FLOAT] = true
+
+ fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED64] = true
+ fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED64] = true
+ fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_DOUBLE] = true
+}
+
+// ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type
+// it reads indicates an end-group marker.
+var ErrWireTypeEndGroup = errors.New("unexpected wire type: end group")
+
+// MessageFactory is used to instantiate messages when DecodeFieldValue needs to
+// decode a message value.
+//
+// Also see MessageFactory in "github.com/jhump/protoreflect/dynamic", which
+// implements this interface.
+type MessageFactory interface {
+ NewMessage(md *desc.MessageDescriptor) proto.Message
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+ // The tag number for the unrecognized field.
+ Tag int32
+
+ // Encoding indicates how the unknown field was encoded on the wire. If it
+ // is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+ // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+ // significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+ // Value.
+ Encoding int8
+ Contents []byte
+ Value uint64
+}
+
+// DecodeZigZag32 decodes a signed 32-bit integer from the given
+// zig-zag encoded value.
+func DecodeZigZag32(v uint64) int32 {
+ return int32((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31))
+}
+
+// DecodeZigZag64 decodes a signed 64-bit integer from the given
+// zig-zag encoded value.
+func DecodeZigZag64(v uint64) int64 {
+ return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63))
+}
+
+// DecodeFieldValue will read a field value from the buffer and return its
+// value and the corresponding field descriptor. The given function is used
+// to lookup a field descriptor by tag number. The given factory is used to
+// instantiate a message if the field value is (or contains) a message value.
+//
+// On error, the field descriptor and value are typically nil. However, if the
+// error returned is ErrWireTypeEndGroup, the returned value will indicate any
+// tag number encoded in the end-group marker.
+//
+// If the field descriptor returned is nil, that means that the given function
+// returned nil. This is expected to happen for unrecognized tag numbers. In
+// that case, no error is returned, and the value will be an UnknownField.
+func (cb *Buffer) DecodeFieldValue(fieldFinder func(int32) *desc.FieldDescriptor, fact MessageFactory) (*desc.FieldDescriptor, interface{}, error) {
+ if cb.EOF() {
+ return nil, nil, io.EOF
+ }
+ tagNumber, wireType, err := cb.DecodeTagAndWireType()
+ if err != nil {
+ return nil, nil, err
+ }
+ if wireType == proto.WireEndGroup {
+ return nil, tagNumber, ErrWireTypeEndGroup
+ }
+ fd := fieldFinder(tagNumber)
+ if fd == nil {
+ val, err := cb.decodeUnknownField(tagNumber, wireType)
+ return nil, val, err
+ }
+ val, err := cb.decodeKnownField(fd, wireType, fact)
+ return fd, val, err
+}
+
+// DecodeScalarField extracts a properly-typed value from v. The returned value's
+// type depends on the given field descriptor type. It will be the same type as
+// generated structs use for the field descriptor's type. Enum types will return
+// an int32. If the given field type uses length-delimited encoding (nested
+// messages, bytes, and strings), an error is returned.
+func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
+ switch fd.GetType() {
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ return v != 0, nil
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
+ if v > math.MaxUint32 {
+ return nil, ErrOverflow
+ }
+ return uint32(v), nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_ENUM:
+ s := int64(v)
+ if s > math.MaxInt32 || s < math.MinInt32 {
+ return nil, ErrOverflow
+ }
+ return int32(s), nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
+ if v > math.MaxUint32 {
+ return nil, ErrOverflow
+ }
+ return int32(v), nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
+ if v > math.MaxUint32 {
+ return nil, ErrOverflow
+ }
+ return DecodeZigZag32(v), nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
+ return v, nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
+ return int64(v), nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
+ return DecodeZigZag64(v), nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ if v > math.MaxUint32 {
+ return nil, ErrOverflow
+ }
+ return math.Float32frombits(uint32(v)), nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ return math.Float64frombits(v), nil
+
+ default:
+ // bytes, string, message, and group cannot be represented as a simple numeric value
+ return nil, fmt.Errorf("bad input; field %s requires length-delimited wire type", fd.GetFullyQualifiedName())
+ }
+}
+
+// DecodeLengthDelimitedField extracts a properly-typed value from bytes. The
+// returned value's type will usually be []byte, string, or, for nested messages,
+// the type returned from the given message factory. However, since repeated
+// scalar fields can be length-delimited, when they used packed encoding, it can
+// also return an []interface{}, where each element is a scalar value. Furthermore,
+// it could return a scalar type, not in a slice, if the given field descriptor is
+// not repeated. This is to support cases where a field is changed from optional
+// to repeated. New code may emit a packed repeated representation, but old code
+// still expects a single scalar value. In this case, if the actual data in bytes
+// contains multiple values, only the last value is returned.
+func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) {
+ switch {
+ case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ return bytes, nil
+
+ case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ return string(bytes), nil
+
+ case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE ||
+ fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP:
+ msg := mf.NewMessage(fd.GetMessageType())
+ err := proto.Unmarshal(bytes, msg)
+ if err != nil {
+ return nil, err
+ } else {
+ return msg, nil
+ }
+
+ default:
+ // even if the field is not repeated or not packed, we still parse it as such for
+ // backwards compatibility (e.g. message we are de-serializing could have been both
+ // repeated and packed at the time of serialization)
+ packedBuf := NewBuffer(bytes)
+ var slice []interface{}
+ var val interface{}
+ for !packedBuf.EOF() {
+ var v uint64
+ var err error
+ if varintTypes[fd.GetType()] {
+ v, err = packedBuf.DecodeVarint()
+ } else if fixed32Types[fd.GetType()] {
+ v, err = packedBuf.DecodeFixed32()
+ } else if fixed64Types[fd.GetType()] {
+ v, err = packedBuf.DecodeFixed64()
+ } else {
+ return nil, fmt.Errorf("bad input; cannot parse length-delimited wire type for field %s", fd.GetFullyQualifiedName())
+ }
+ if err != nil {
+ return nil, err
+ }
+ val, err = DecodeScalarField(fd, v)
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsRepeated() {
+ slice = append(slice, val)
+ }
+ }
+ if fd.IsRepeated() {
+ return slice, nil
+ } else {
+ // if not a repeated field, last value wins
+ return val, nil
+ }
+ }
+}
+
+func (b *Buffer) decodeKnownField(fd *desc.FieldDescriptor, encoding int8, fact MessageFactory) (interface{}, error) {
+ var val interface{}
+ var err error
+ switch encoding {
+ case proto.WireFixed32:
+ var num uint64
+ num, err = b.DecodeFixed32()
+ if err == nil {
+ val, err = DecodeScalarField(fd, num)
+ }
+ case proto.WireFixed64:
+ var num uint64
+ num, err = b.DecodeFixed64()
+ if err == nil {
+ val, err = DecodeScalarField(fd, num)
+ }
+ case proto.WireVarint:
+ var num uint64
+ num, err = b.DecodeVarint()
+ if err == nil {
+ val, err = DecodeScalarField(fd, num)
+ }
+
+ case proto.WireBytes:
+ alloc := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES
+ var raw []byte
+ raw, err = b.DecodeRawBytes(alloc)
+ if err == nil {
+ val, err = DecodeLengthDelimitedField(fd, raw, fact)
+ }
+
+ case proto.WireStartGroup:
+ if fd.GetMessageType() == nil {
+ return nil, fmt.Errorf("cannot parse field %s from group-encoded wire type", fd.GetFullyQualifiedName())
+ }
+ msg := fact.NewMessage(fd.GetMessageType())
+ var data []byte
+ data, err = b.ReadGroup(false)
+ if err == nil {
+ err = proto.Unmarshal(data, msg)
+ if err == nil {
+ val = msg
+ }
+ }
+
+ default:
+ return nil, ErrBadWireType
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return val, nil
+}
+
+func (b *Buffer) decodeUnknownField(tagNumber int32, encoding int8) (interface{}, error) {
+ u := UnknownField{Tag: tagNumber, Encoding: encoding}
+ var err error
+ switch encoding {
+ case proto.WireFixed32:
+ u.Value, err = b.DecodeFixed32()
+ case proto.WireFixed64:
+ u.Value, err = b.DecodeFixed64()
+ case proto.WireVarint:
+ u.Value, err = b.DecodeVarint()
+ case proto.WireBytes:
+ u.Contents, err = b.DecodeRawBytes(true)
+ case proto.WireStartGroup:
+ u.Contents, err = b.ReadGroup(true)
+ default:
+ err = ErrBadWireType
+ }
+ if err != nil {
+ return nil, err
+ }
+ return u, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/codec/doc.go b/vendor/github.com/jhump/protoreflect/codec/doc.go
new file mode 100644
index 0000000..f76499f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/doc.go
@@ -0,0 +1,7 @@
+// Package codec contains a reader/write type that assists with encoding
+// and decoding protobuf's binary representation.
+//
+// The code in this package began as a fork of proto.Buffer but provides
+// additional API to make it more useful to code that needs to dynamically
+// process or produce the protobuf binary format.
+package codec
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
new file mode 100644
index 0000000..280f730
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
@@ -0,0 +1,288 @@
+package codec
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// EncodeZigZag64 does zig-zag encoding to convert the given
+// signed 64-bit integer into a form that can be expressed
+// efficiently as a varint, even for negative values.
+func EncodeZigZag64(v int64) uint64 {
+ return (uint64(v) << 1) ^ uint64(v>>63)
+}
+
+// EncodeZigZag32 does zig-zag encoding to convert the given
+// signed 32-bit integer into a form that can be expressed
+// efficiently as a varint, even for negative values.
+func EncodeZigZag32(v int32) uint64 {
+ return uint64((uint32(v) << 1) ^ uint32((v >> 31)))
+}
+
+func (cb *Buffer) EncodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
+ if fd.IsMap() {
+ mp := val.(map[interface{}]interface{})
+ entryType := fd.GetMessageType()
+ keyType := entryType.FindFieldByNumber(1)
+ valType := entryType.FindFieldByNumber(2)
+ var entryBuffer Buffer
+ if cb.IsDeterministic() {
+ entryBuffer.SetDeterministic(true)
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ for _, k := range keys {
+ v := mp[k]
+ entryBuffer.Reset()
+ if err := entryBuffer.encodeFieldElement(keyType, k); err != nil {
+ return err
+ }
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || !rv.IsNil() {
+ if err := entryBuffer.encodeFieldElement(valType, v); err != nil {
+ return err
+ }
+ }
+ if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+ return err
+ }
+ if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil {
+ return err
+ }
+ }
+ } else {
+ for k, v := range mp {
+ entryBuffer.Reset()
+ if err := entryBuffer.encodeFieldElement(keyType, k); err != nil {
+ return err
+ }
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || !rv.IsNil() {
+ if err := entryBuffer.encodeFieldElement(valType, v); err != nil {
+ return err
+ }
+ }
+ if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+ return err
+ }
+ if err := cb.EncodeRawBytes(entryBuffer.Bytes()); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+ } else if fd.IsRepeated() {
+ sl := val.([]interface{})
+ wt, err := getWireType(fd.GetType())
+ if err != nil {
+ return err
+ }
+ if isPacked(fd) && len(sl) > 0 &&
+ (wt == proto.WireVarint || wt == proto.WireFixed32 || wt == proto.WireFixed64) {
+ // packed repeated field
+ var packedBuffer Buffer
+ for _, v := range sl {
+ if err := packedBuffer.encodeFieldValue(fd, v); err != nil {
+ return err
+ }
+ }
+ if err := cb.EncodeTagAndWireType(fd.GetNumber(), proto.WireBytes); err != nil {
+ return err
+ }
+ return cb.EncodeRawBytes(packedBuffer.Bytes())
+ } else {
+ // non-packed repeated field
+ for _, v := range sl {
+ if err := cb.encodeFieldElement(fd, v); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ } else {
+ return cb.encodeFieldElement(fd, val)
+ }
+}
+
+func isPacked(fd *desc.FieldDescriptor) bool {
+ opts := fd.AsFieldDescriptorProto().GetOptions()
+ // if set, use that value
+ if opts != nil && opts.Packed != nil {
+ return opts.GetPacked()
+ }
+ // if unset: proto2 defaults to false, proto3 to true
+ return fd.GetFile().IsProto3()
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+ return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+ vi := s[i]
+ vj := s[j]
+ switch reflect.TypeOf(vi).Kind() {
+ case reflect.Int32:
+ return vi.(int32) < vj.(int32)
+ case reflect.Int64:
+ return vi.(int64) < vj.(int64)
+ case reflect.Uint32:
+ return vi.(uint32) < vj.(uint32)
+ case reflect.Uint64:
+ return vi.(uint64) < vj.(uint64)
+ case reflect.String:
+ return vi.(string) < vj.(string)
+ case reflect.Bool:
+ return !vi.(bool) && vj.(bool)
+ default:
+ panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+ }
+}
+
+func (s sortable) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (b *Buffer) encodeFieldElement(fd *desc.FieldDescriptor, val interface{}) error {
+ wt, err := getWireType(fd.GetType())
+ if err != nil {
+ return err
+ }
+ if err := b.EncodeTagAndWireType(fd.GetNumber(), wt); err != nil {
+ return err
+ }
+ if err := b.encodeFieldValue(fd, val); err != nil {
+ return err
+ }
+ if wt == proto.WireStartGroup {
+ return b.EncodeTagAndWireType(fd.GetNumber(), proto.WireEndGroup)
+ }
+ return nil
+}
+
+func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
+ switch fd.GetType() {
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ v := val.(bool)
+ if v {
+ return b.EncodeVarint(1)
+ }
+ return b.EncodeVarint(0)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32:
+ v := val.(int32)
+ return b.EncodeVarint(uint64(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
+ v := val.(int32)
+ return b.EncodeFixed32(uint64(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
+ v := val.(int32)
+ return b.EncodeVarint(EncodeZigZag32(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32:
+ v := val.(uint32)
+ return b.EncodeVarint(uint64(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
+ v := val.(uint32)
+ return b.EncodeFixed32(uint64(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64:
+ v := val.(int64)
+ return b.EncodeVarint(uint64(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
+ v := val.(int64)
+ return b.EncodeFixed64(uint64(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
+ v := val.(int64)
+ return b.EncodeVarint(EncodeZigZag64(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64:
+ v := val.(uint64)
+ return b.EncodeVarint(v)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
+ v := val.(uint64)
+ return b.EncodeFixed64(v)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ v := val.(float64)
+ return b.EncodeFixed64(math.Float64bits(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ v := val.(float32)
+ return b.EncodeFixed32(uint64(math.Float32bits(v)))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ v := val.([]byte)
+ return b.EncodeRawBytes(v)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ v := val.(string)
+ return b.EncodeRawBytes(([]byte)(v))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
+ return b.EncodeDelimitedMessage(val.(proto.Message))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
+ // just append the nested message to this buffer
+ return b.EncodeMessage(val.(proto.Message))
+ // whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
+
+ default:
+ return fmt.Errorf("unrecognized field type: %v", fd.GetType())
+ }
+}
+
+func getWireType(t descriptorpb.FieldDescriptorProto_Type) (int8, error) {
+ switch t {
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM,
+ descriptorpb.FieldDescriptorProto_TYPE_BOOL,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT64:
+ return proto.WireVarint, nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ return proto.WireFixed32, nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ return proto.WireFixed64, nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES,
+ descriptorpb.FieldDescriptorProto_TYPE_STRING,
+ descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
+ return proto.WireBytes, nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
+ return proto.WireStartGroup, nil
+
+ default:
+ return 0, ErrBadWireType
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/cache.go b/vendor/github.com/jhump/protoreflect/desc/cache.go
new file mode 100644
index 0000000..418632b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/cache.go
@@ -0,0 +1,48 @@
+package desc
+
+import (
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type descriptorCache interface {
+ get(protoreflect.Descriptor) Descriptor
+ put(protoreflect.Descriptor, Descriptor)
+}
+
+type lockingCache struct {
+ cacheMu sync.RWMutex
+ cache mapCache
+}
+
+func (c *lockingCache) get(d protoreflect.Descriptor) Descriptor {
+ c.cacheMu.RLock()
+ defer c.cacheMu.RUnlock()
+ return c.cache.get(d)
+}
+
+func (c *lockingCache) put(key protoreflect.Descriptor, val Descriptor) {
+ c.cacheMu.Lock()
+ defer c.cacheMu.Unlock()
+ c.cache.put(key, val)
+}
+
+func (c *lockingCache) withLock(fn func(descriptorCache)) {
+ c.cacheMu.Lock()
+ defer c.cacheMu.Unlock()
+ // Pass the underlying mapCache. We don't want fn to use
+ // c.get or c.put sine we already have the lock. So those
+ // methods would try to re-acquire and then deadlock!
+ fn(c.cache)
+}
+
+type mapCache map[protoreflect.Descriptor]Descriptor
+
+func (c mapCache) get(d protoreflect.Descriptor) Descriptor {
+ return c[d]
+}
+
+func (c mapCache) put(key protoreflect.Descriptor, val Descriptor) {
+ c[key] = val
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
new file mode 100644
index 0000000..01a6e9e
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -0,0 +1,294 @@
+package desc
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc/internal"
+ intn "github.com/jhump/protoreflect/internal"
+)
+
+// CreateFileDescriptor instantiates a new file descriptor for the given descriptor proto.
+// The file's direct dependencies must be provided. If the given dependencies do not include
+// all of the file's dependencies or if the contents of the descriptors are internally
+// inconsistent (e.g. contain unresolvable symbols) then an error is returned.
+func CreateFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+ return createFileDescriptor(fd, deps, nil)
+}
+
+type descResolver struct {
+ files []*FileDescriptor
+ importResolver *ImportResolver
+ fromPath string
+}
+
+func (r *descResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ resolvedPath := r.importResolver.ResolveImport(r.fromPath, path)
+ d := r.findFileByPath(resolvedPath)
+ if d != nil {
+ return d, nil
+ }
+ if resolvedPath != path {
+ d := r.findFileByPath(path)
+ if d != nil {
+ return d, nil
+ }
+ }
+ return nil, protoregistry.NotFound
+}
+
+func (r *descResolver) findFileByPath(path string) protoreflect.FileDescriptor {
+ for _, fd := range r.files {
+ if fd.GetName() == path {
+ return fd.UnwrapFile()
+ }
+ }
+ return nil
+}
+
+func (r *descResolver) FindDescriptorByName(n protoreflect.FullName) (protoreflect.Descriptor, error) {
+ for _, fd := range r.files {
+ d := fd.FindSymbol(string(n))
+ if d != nil {
+ return d.(DescriptorWrapper).Unwrap(), nil
+ }
+ }
+ return nil, protoregistry.NotFound
+}
+
+func createFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+ dr := &descResolver{files: deps, importResolver: r, fromPath: fd.GetName()}
+ d, err := protodesc.NewFile(fd, dr)
+ if err != nil {
+ return nil, err
+ }
+
+ // make sure cache has dependencies populated
+ cache := mapCache{}
+ for _, dep := range deps {
+ fd, err := dr.FindFileByPath(dep.GetName())
+ if err != nil {
+ return nil, err
+ }
+ cache.put(fd, dep)
+ }
+
+ return convertFile(d, fd, cache)
+}
+
+func convertFile(d protoreflect.FileDescriptor, fd *descriptorpb.FileDescriptorProto, cache descriptorCache) (*FileDescriptor, error) {
+ ret := &FileDescriptor{
+ wrapped: d,
+ proto: fd,
+ symbols: map[string]Descriptor{},
+ fieldIndex: map[string]map[int32]*FieldDescriptor{},
+ }
+ cache.put(d, ret)
+
+ // populate references to file descriptor dependencies
+ ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
+ for i := 0; i < d.Imports().Len(); i++ {
+ f := d.Imports().Get(i).FileDescriptor
+ if c, err := wrapFile(f, cache); err != nil {
+ return nil, err
+ } else {
+ ret.deps[i] = c
+ }
+ }
+ ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
+ for i, pd := range fd.GetPublicDependency() {
+ ret.publicDeps[i] = ret.deps[pd]
+ }
+ ret.weakDeps = make([]*FileDescriptor, len(fd.GetWeakDependency()))
+ for i, wd := range fd.GetWeakDependency() {
+ ret.weakDeps[i] = ret.deps[wd]
+ }
+
+ // populate all tables of child descriptors
+ path := make([]int32, 1, 8)
+ path[0] = internal.File_messagesTag
+ for i := 0; i < d.Messages().Len(); i++ {
+ src := d.Messages().Get(i)
+ srcProto := fd.GetMessageType()[src.Index()]
+ md := createMessageDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i)))
+ ret.symbols[string(src.FullName())] = md
+ ret.messages = append(ret.messages, md)
+ }
+ path[0] = internal.File_enumsTag
+ for i := 0; i < d.Enums().Len(); i++ {
+ src := d.Enums().Get(i)
+ srcProto := fd.GetEnumType()[src.Index()]
+ ed := createEnumDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i)))
+ ret.symbols[string(src.FullName())] = ed
+ ret.enums = append(ret.enums, ed)
+ }
+ path[0] = internal.File_extensionsTag
+ for i := 0; i < d.Extensions().Len(); i++ {
+ src := d.Extensions().Get(i)
+ srcProto := fd.GetExtension()[src.Index()]
+ exd := createFieldDescriptor(ret, ret, src, srcProto, cache, append(path, int32(i)))
+ ret.symbols[string(src.FullName())] = exd
+ ret.extensions = append(ret.extensions, exd)
+ }
+ path[0] = internal.File_servicesTag
+ for i := 0; i < d.Services().Len(); i++ {
+ src := d.Services().Get(i)
+ srcProto := fd.GetService()[src.Index()]
+ sd := createServiceDescriptor(ret, src, srcProto, ret.symbols, append(path, int32(i)))
+ ret.symbols[string(src.FullName())] = sd
+ ret.services = append(ret.services, sd)
+ }
+
+ ret.sourceInfo = internal.CreateSourceInfoMap(fd)
+ ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
+
+ // now we can resolve all type references and source code info
+ for _, md := range ret.messages {
+ if err := md.resolve(cache); err != nil {
+ return nil, err
+ }
+ }
+ path[0] = internal.File_extensionsTag
+ for _, exd := range ret.extensions {
+ if err := exd.resolve(cache); err != nil {
+ return nil, err
+ }
+ }
+ path[0] = internal.File_servicesTag
+ for _, sd := range ret.services {
+ if err := sd.resolve(cache); err != nil {
+ return nil, err
+ }
+ }
+
+ return ret, nil
+}
+
+// CreateFileDescriptors constructs a set of descriptors, one for each of the
+// given descriptor protos. The given set of descriptor protos must include all
+// transitive dependencies for every file.
+func CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+ return createFileDescriptors(fds, nil)
+}
+
+func createFileDescriptors(fds []*descriptorpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+ if len(fds) == 0 {
+ return nil, nil
+ }
+ files := map[string]*descriptorpb.FileDescriptorProto{}
+ resolved := map[string]*FileDescriptor{}
+ var name string
+ for _, fd := range fds {
+ name = fd.GetName()
+ files[name] = fd
+ }
+ for _, fd := range fds {
+ _, err := createFromSet(fd.GetName(), r, nil, files, resolved)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return resolved, nil
+}
+
+// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
+// file descriptors and their transitive dependencies. The files are topologically sorted
+// so that a file will always appear after its dependencies.
+func ToFileDescriptorSet(fds ...*FileDescriptor) *descriptorpb.FileDescriptorSet {
+ var fdps []*descriptorpb.FileDescriptorProto
+ addAllFiles(fds, &fdps, map[string]struct{}{})
+ return &descriptorpb.FileDescriptorSet{File: fdps}
+}
+
+func addAllFiles(src []*FileDescriptor, results *[]*descriptorpb.FileDescriptorProto, seen map[string]struct{}) {
+ for _, fd := range src {
+ if _, ok := seen[fd.GetName()]; ok {
+ continue
+ }
+ seen[fd.GetName()] = struct{}{}
+ addAllFiles(fd.GetDependencies(), results, seen)
+ *results = append(*results, fd.AsFileDescriptorProto())
+ }
+}
+
+// CreateFileDescriptorFromSet creates a descriptor from the given file descriptor set. The
+// set's *last* file will be the returned descriptor. The set's remaining files must comprise
+// the full set of transitive dependencies of that last file. This is the same format and
+// order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
+//
+// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) {
+ return createFileDescriptorFromSet(fds, nil)
+}
+
+func createFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+ result, err := createFileDescriptorsFromSet(fds, r)
+ if err != nil {
+ return nil, err
+ }
+ files := fds.GetFile()
+ lastFilename := files[len(files)-1].GetName()
+ return result[lastFilename], nil
+}
+
+// CreateFileDescriptorsFromSet creates file descriptors from the given file descriptor set.
+// The returned map includes all files in the set, keyed b name. The set must include the
+// full set of transitive dependencies for all files therein or else a link error will occur
+// and be returned instead of the slice of descriptors. This is the same format used by
+// protoc when a FileDescriptorSet file with an invocation like so:
+//
+// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+ return createFileDescriptorsFromSet(fds, nil)
+}
+
+func createFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
+ files := fds.GetFile()
+ if len(files) == 0 {
+ return nil, errors.New("file descriptor set is empty")
+ }
+ return createFileDescriptors(files, r)
+}
+
+// createFromSet creates a descriptor for the given filename. It recursively
+// creates descriptors for the given file's dependencies.
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*descriptorpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+ for _, s := range seen {
+ if filename == s {
+ return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
+ }
+ }
+ seen = append(seen, filename)
+
+ if d, ok := resolved[filename]; ok {
+ return d, nil
+ }
+ fdp := files[filename]
+ if fdp == nil {
+ return nil, intn.ErrNoSuchFile(filename)
+ }
+ deps := make([]*FileDescriptor, len(fdp.GetDependency()))
+ for i, depName := range fdp.GetDependency() {
+ resolvedDep := r.ResolveImport(filename, depName)
+ dep, err := createFromSet(resolvedDep, r, seen, files, resolved)
+ if _, ok := err.(intn.ErrNoSuchFile); ok && resolvedDep != depName {
+ dep, err = createFromSet(depName, r, seen, files, resolved)
+ }
+ if err != nil {
+ return nil, err
+ }
+ deps[i] = dep
+ }
+ d, err := createFileDescriptor(fdp, deps, r)
+ if err != nil {
+ return nil, err
+ }
+ resolved[filename] = d
+ return d, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
new file mode 100644
index 0000000..68eb252
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -0,0 +1,1847 @@
+package desc
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+// Descriptor is the common interface implemented by all descriptor objects.
+type Descriptor interface {
+ // GetName returns the name of the object described by the descriptor. This will
+ // be a base name that does not include enclosing message names or the package name.
+ // For file descriptors, this indicates the path and name to the described file.
+ GetName() string
+ // GetFullyQualifiedName returns the fully-qualified name of the object described by
+ // the descriptor. This will include the package name and any enclosing message names.
+ // For file descriptors, this returns the path and name to the described file (same as
+ // GetName).
+ GetFullyQualifiedName() string
+ // GetParent returns the enclosing element in a proto source file. If the described
+ // object is a top-level object, this returns the file descriptor. Otherwise, it returns
+ // the element in which the described object was declared. File descriptors have no
+ // parent and return nil.
+ GetParent() Descriptor
+ // GetFile returns the file descriptor in which this element was declared. File
+ // descriptors return themselves.
+ GetFile() *FileDescriptor
+ // GetOptions returns the options proto containing options for the described element.
+ GetOptions() proto.Message
+ // GetSourceInfo returns any source code information that was present in the file
+ // descriptor. Source code info is optional. If no source code info is available for
+ // the element (including if there is none at all in the file descriptor) then this
+ // returns nil
+ GetSourceInfo() *descriptorpb.SourceCodeInfo_Location
+ // AsProto returns the underlying descriptor proto for this descriptor.
+ AsProto() proto.Message
+}
+
+type sourceInfoRecomputeFunc = internal.SourceInfoComputeFunc
+
+// FileDescriptor describes a proto source file.
+type FileDescriptor struct {
+ wrapped protoreflect.FileDescriptor
+ proto *descriptorpb.FileDescriptorProto
+ symbols map[string]Descriptor
+ deps []*FileDescriptor
+ publicDeps []*FileDescriptor
+ weakDeps []*FileDescriptor
+ messages []*MessageDescriptor
+ enums []*EnumDescriptor
+ extensions []*FieldDescriptor
+ services []*ServiceDescriptor
+ fieldIndex map[string]map[int32]*FieldDescriptor
+ sourceInfo internal.SourceInfoMap
+ sourceInfoRecomputeFunc
+}
+
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapFile, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (fd *FileDescriptor) Unwrap() protoreflect.Descriptor {
+ return fd.wrapped
+}
+
+// UnwrapFile returns the underlying protoreflect.FileDescriptor.
+func (fd *FileDescriptor) UnwrapFile() protoreflect.FileDescriptor {
+ return fd.wrapped
+}
+
+func (fd *FileDescriptor) recomputeSourceInfo() {
+ internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
+}
+
+func (fd *FileDescriptor) registerField(field *FieldDescriptor) {
+ fields := fd.fieldIndex[field.owner.GetFullyQualifiedName()]
+ if fields == nil {
+ fields = map[int32]*FieldDescriptor{}
+ fd.fieldIndex[field.owner.GetFullyQualifiedName()] = fields
+ }
+ fields[field.GetNumber()] = field
+}
+
+// GetName returns the name of the file, as it was given to the protoc invocation
+// to compile it, possibly including path (relative to a directory in the proto
+// import path).
+func (fd *FileDescriptor) GetName() string {
+ return fd.wrapped.Path()
+}
+
+// GetFullyQualifiedName returns the name of the file, same as GetName. It is
+// present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFullyQualifiedName() string {
+ return fd.wrapped.Path()
+}
+
+// GetPackage returns the name of the package declared in the file.
+func (fd *FileDescriptor) GetPackage() string {
+ return string(fd.wrapped.Package())
+}
+
+// GetParent always returns nil: files are the root of descriptor hierarchies.
+// Is it present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetParent() Descriptor {
+ return nil
+}
+
+// GetFile returns the receiver, which is a file descriptor. This is present
+// to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFile() *FileDescriptor {
+ return fd
+}
+
+// GetOptions returns the file's options. Most usages will be more interested
+// in GetFileOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetOptions() proto.Message {
+ return fd.proto.GetOptions()
+}
+
+// GetFileOptions returns the file's options.
+func (fd *FileDescriptor) GetFileOptions() *descriptorpb.FileOptions {
+ return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
+// interface.
+func (fd *FileDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
+ return nil
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFileDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) AsProto() proto.Message {
+ return fd.proto
+}
+
+// AsFileDescriptorProto returns the underlying descriptor proto.
+func (fd *FileDescriptor) AsFileDescriptorProto() *descriptorpb.FileDescriptorProto {
+ return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FileDescriptor) String() string {
+ return fd.proto.String()
+}
+
+// IsProto3 returns true if the file declares a syntax of "proto3".
+//
+// When this returns false, the file is either syntax "proto2" (if
+// Edition() returns zero) or the file uses editions.
+func (fd *FileDescriptor) IsProto3() bool {
+ return fd.wrapped.Syntax() == protoreflect.Proto3
+}
+
+// Edition returns the edition of the file. If the file does not
+// use editions syntax, zero is returned.
+func (fd *FileDescriptor) Edition() descriptorpb.Edition {
+ if fd.wrapped.Syntax() == protoreflect.Editions {
+ return fd.proto.GetEdition()
+ }
+ return 0
+}
+
+// GetDependencies returns all of this file's dependencies. These correspond to
+// import statements in the file.
+func (fd *FileDescriptor) GetDependencies() []*FileDescriptor {
+ return fd.deps
+}
+
+// GetPublicDependencies returns all of this file's public dependencies. These
+// correspond to public import statements in the file.
+func (fd *FileDescriptor) GetPublicDependencies() []*FileDescriptor {
+ return fd.publicDeps
+}
+
+// GetWeakDependencies returns all of this file's weak dependencies. These
+// correspond to weak import statements in the file.
+func (fd *FileDescriptor) GetWeakDependencies() []*FileDescriptor {
+ return fd.weakDeps
+}
+
+// GetMessageTypes returns all top-level messages declared in this file.
+func (fd *FileDescriptor) GetMessageTypes() []*MessageDescriptor {
+ return fd.messages
+}
+
+// GetEnumTypes returns all top-level enums declared in this file.
+func (fd *FileDescriptor) GetEnumTypes() []*EnumDescriptor {
+ return fd.enums
+}
+
+// GetExtensions returns all top-level extensions declared in this file.
+func (fd *FileDescriptor) GetExtensions() []*FieldDescriptor {
+ return fd.extensions
+}
+
+// GetServices returns all services declared in this file.
+func (fd *FileDescriptor) GetServices() []*ServiceDescriptor {
+ return fd.services
+}
+
+// FindSymbol returns the descriptor contained within this file for the
+// element with the given fully-qualified symbol name. If no such element
+// exists then this method returns nil.
+func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+ if len(symbol) == 0 {
+ return nil
+ }
+ if symbol[0] == '.' {
+ symbol = symbol[1:]
+ }
+ if ret := fd.symbols[symbol]; ret != nil {
+ return ret
+ }
+
+ // allow accessing symbols through public imports, too
+ for _, dep := range fd.GetPublicDependencies() {
+ if ret := dep.FindSymbol(symbol); ret != nil {
+ return ret
+ }
+ }
+
+ // not found
+ return nil
+}
+
+// FindMessage finds the message with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindMessage(msgName string) *MessageDescriptor {
+ if md, ok := fd.symbols[msgName].(*MessageDescriptor); ok {
+ return md
+ } else {
+ return nil
+ }
+}
+
+// FindEnum finds the enum with the given fully-qualified name. If no such
+// element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindEnum(enumName string) *EnumDescriptor {
+ if ed, ok := fd.symbols[enumName].(*EnumDescriptor); ok {
+ return ed
+ } else {
+ return nil
+ }
+}
+
+// FindService finds the service with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindService(serviceName string) *ServiceDescriptor {
+ if sd, ok := fd.symbols[serviceName].(*ServiceDescriptor); ok {
+ return sd
+ } else {
+ return nil
+ }
+}
+
+// FindExtension finds the extension field for the given extended type name and
+// tag number. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtension(extendeeName string, tagNumber int32) *FieldDescriptor {
+ if exd, ok := fd.fieldIndex[extendeeName][tagNumber]; ok && exd.IsExtension() {
+ return exd
+ } else {
+ return nil
+ }
+}
+
+// FindExtensionByName finds the extension field with the given fully-qualified
+// name. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtensionByName(extName string) *FieldDescriptor {
+ if exd, ok := fd.symbols[extName].(*FieldDescriptor); ok && exd.IsExtension() {
+ return exd
+ } else {
+ return nil
+ }
+}
+
+// MessageDescriptor describes a protocol buffer message.
+type MessageDescriptor struct {
+ wrapped protoreflect.MessageDescriptor
+ proto *descriptorpb.DescriptorProto
+ parent Descriptor
+ file *FileDescriptor
+ fields []*FieldDescriptor
+ nested []*MessageDescriptor
+ enums []*EnumDescriptor
+ extensions []*FieldDescriptor
+ oneOfs []*OneOfDescriptor
+ extRanges extRanges
+ sourceInfoPath []int32
+ jsonNames jsonNameMap
+}
+
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapMessage, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (md *MessageDescriptor) Unwrap() protoreflect.Descriptor {
+ return md.wrapped
+}
+
+// UnwrapMessage returns the underlying protoreflect.MessageDescriptor.
+func (md *MessageDescriptor) UnwrapMessage() protoreflect.MessageDescriptor {
+ return md.wrapped
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, md protoreflect.MessageDescriptor, mdp *descriptorpb.DescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *MessageDescriptor {
+ ret := &MessageDescriptor{
+ wrapped: md,
+ proto: mdp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+ cache.put(md, ret)
+ path = append(path, internal.Message_nestedMessagesTag)
+ for i := 0; i < md.Messages().Len(); i++ {
+ src := md.Messages().Get(i)
+ srcProto := mdp.GetNestedType()[src.Index()]
+ nmd := createMessageDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i)))
+ symbols[string(src.FullName())] = nmd
+ ret.nested = append(ret.nested, nmd)
+ }
+ path[len(path)-1] = internal.Message_enumsTag
+ for i := 0; i < md.Enums().Len(); i++ {
+ src := md.Enums().Get(i)
+ srcProto := mdp.GetEnumType()[src.Index()]
+ ed := createEnumDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i)))
+ symbols[string(src.FullName())] = ed
+ ret.enums = append(ret.enums, ed)
+ }
+ path[len(path)-1] = internal.Message_fieldsTag
+ for i := 0; i < md.Fields().Len(); i++ {
+ src := md.Fields().Get(i)
+ srcProto := mdp.GetField()[src.Index()]
+ fld := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i)))
+ symbols[string(src.FullName())] = fld
+ ret.fields = append(ret.fields, fld)
+ }
+ path[len(path)-1] = internal.Message_extensionsTag
+ for i := 0; i < md.Extensions().Len(); i++ {
+ src := md.Extensions().Get(i)
+ srcProto := mdp.GetExtension()[src.Index()]
+ exd := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i)))
+ symbols[string(src.FullName())] = exd
+ ret.extensions = append(ret.extensions, exd)
+ }
+ path[len(path)-1] = internal.Message_oneOfsTag
+ for i := 0; i < md.Oneofs().Len(); i++ {
+ src := md.Oneofs().Get(i)
+ srcProto := mdp.GetOneofDecl()[src.Index()]
+ od := createOneOfDescriptor(fd, ret, i, src, srcProto, append(path, int32(i)))
+ symbols[string(src.FullName())] = od
+ ret.oneOfs = append(ret.oneOfs, od)
+ }
+ for _, r := range mdp.GetExtensionRange() {
+ // proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
+ // but protoc converts range to exclusive end in descriptor, so we must convert back
+ end := r.GetEnd() - 1
+ ret.extRanges = append(ret.extRanges, proto.ExtensionRange{
+ Start: r.GetStart(),
+ End: end})
+ }
+ sort.Sort(ret.extRanges)
+
+ return ret
+}
+
+func (md *MessageDescriptor) resolve(cache descriptorCache) error {
+ for _, nmd := range md.nested {
+ if err := nmd.resolve(cache); err != nil {
+ return err
+ }
+ }
+ for _, fld := range md.fields {
+ if err := fld.resolve(cache); err != nil {
+ return err
+ }
+ }
+ for _, exd := range md.extensions {
+ if err := exd.resolve(cache); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetName returns the simple (unqualified) name of the message.
+func (md *MessageDescriptor) GetName() string {
+ return string(md.wrapped.Name())
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the message. This
+// includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (md *MessageDescriptor) GetFullyQualifiedName() string {
+ return string(md.wrapped.FullName())
+}
+
+// GetParent returns the message's enclosing descriptor. For top-level messages,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (md *MessageDescriptor) GetParent() Descriptor {
+ return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this message is defined.
+func (md *MessageDescriptor) GetFile() *FileDescriptor {
+ return md.file
+}
+
+// GetOptions returns the message's options. Most usages will be more interested
+// in GetMessageOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) GetOptions() proto.Message {
+ return md.proto.GetOptions()
+}
+
+// GetMessageOptions returns the message's options.
+func (md *MessageDescriptor) GetMessageOptions() *descriptorpb.MessageOptions {
+ return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the message, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// message was defined and also contains comments associated with the message
+// definition.
+func (md *MessageDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
+ return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) AsProto() proto.Message {
+ return md.proto
+}
+
+// AsDescriptorProto returns the underlying descriptor proto.
+func (md *MessageDescriptor) AsDescriptorProto() *descriptorpb.DescriptorProto {
+ return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MessageDescriptor) String() string {
+ return md.proto.String()
+}
+
+// IsMapEntry returns true if this is a synthetic message type that represents an entry
+// in a map field.
+func (md *MessageDescriptor) IsMapEntry() bool {
+ return md.wrapped.IsMapEntry()
+}
+
+// GetFields returns all of the fields for this message.
+func (md *MessageDescriptor) GetFields() []*FieldDescriptor {
+ return md.fields
+}
+
+// GetNestedMessageTypes returns all of the message types declared inside this message.
+func (md *MessageDescriptor) GetNestedMessageTypes() []*MessageDescriptor {
+ return md.nested
+}
+
+// GetNestedEnumTypes returns all of the enums declared inside this message.
+func (md *MessageDescriptor) GetNestedEnumTypes() []*EnumDescriptor {
+ return md.enums
+}
+
+// GetNestedExtensions returns all of the extensions declared inside this message.
+func (md *MessageDescriptor) GetNestedExtensions() []*FieldDescriptor {
+ return md.extensions
+}
+
+// GetOneOfs returns all of the one-of field sets declared inside this message.
+func (md *MessageDescriptor) GetOneOfs() []*OneOfDescriptor {
+ return md.oneOfs
+}
+
+// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
+func (md *MessageDescriptor) IsProto3() bool {
+ return md.file.IsProto3()
+}
+
+// GetExtensionRanges returns the ranges of extension field numbers for this message.
+func (md *MessageDescriptor) GetExtensionRanges() []proto.ExtensionRange {
+ return md.extRanges
+}
+
+// IsExtendable returns true if this message has any extension ranges.
+func (md *MessageDescriptor) IsExtendable() bool {
+ return len(md.extRanges) > 0
+}
+
+// IsExtension returns true if the given tag number is within any of this message's
+// extension ranges.
+func (md *MessageDescriptor) IsExtension(tagNumber int32) bool {
+ return md.extRanges.IsExtension(tagNumber)
+}
+
+type extRanges []proto.ExtensionRange
+
+func (er extRanges) String() string {
+ var buf bytes.Buffer
+ first := true
+ for _, r := range er {
+ if first {
+ first = false
+ } else {
+ buf.WriteString(",")
+ }
+ fmt.Fprintf(&buf, "%d..%d", r.Start, r.End)
+ }
+ return buf.String()
+}
+
+func (er extRanges) IsExtension(tagNumber int32) bool {
+ i := sort.Search(len(er), func(i int) bool { return er[i].End >= tagNumber })
+ return i < len(er) && tagNumber >= er[i].Start
+}
+
+func (er extRanges) Len() int {
+ return len(er)
+}
+
+func (er extRanges) Less(i, j int) bool {
+ return er[i].Start < er[j].Start
+}
+
+func (er extRanges) Swap(i, j int) {
+ er[i], er[j] = er[j], er[i]
+}
+
+// FindFieldByName finds the field with the given name. If no such field exists
+// then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
+ fqn := md.GetFullyQualifiedName() + "." + fieldName
+ if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
+ return fd
+ } else {
+ return nil
+ }
+}
+
+// FindFieldByNumber finds the field with the given tag number. If no such field
+// exists then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
+ if fd, ok := md.file.fieldIndex[md.GetFullyQualifiedName()][tagNumber]; ok && !fd.IsExtension() {
+ return fd
+ } else {
+ return nil
+ }
+}
+
+// FieldDescriptor describes a field of a protocol buffer message.
+type FieldDescriptor struct {
+ wrapped protoreflect.FieldDescriptor
+ proto *descriptorpb.FieldDescriptorProto
+ parent Descriptor
+ owner *MessageDescriptor
+ file *FileDescriptor
+ oneOf *OneOfDescriptor
+ msgType *MessageDescriptor
+ enumType *EnumDescriptor
+ sourceInfoPath []int32
+ def memoizedDefault
+}
+
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapField, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (fd *FieldDescriptor) Unwrap() protoreflect.Descriptor {
+ return fd.wrapped
+}
+
+// UnwrapField returns the underlying protoreflect.FieldDescriptor.
+func (fd *FieldDescriptor) UnwrapField() protoreflect.FieldDescriptor {
+ return fd.wrapped
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, fld protoreflect.FieldDescriptor, fldp *descriptorpb.FieldDescriptorProto, cache descriptorCache, path []int32) *FieldDescriptor {
+ ret := &FieldDescriptor{
+ wrapped: fld,
+ proto: fldp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+ cache.put(fld, ret)
+ if !fld.IsExtension() {
+ ret.owner = parent.(*MessageDescriptor)
+ }
+ // owner for extensions, field type (be it message or enum), and one-ofs get resolved later
+ return ret
+}
+
+func descriptorType(d Descriptor) string {
+ switch d := d.(type) {
+ case *FileDescriptor:
+ return "a file"
+ case *MessageDescriptor:
+ return "a message"
+ case *FieldDescriptor:
+ if d.IsExtension() {
+ return "an extension"
+ }
+ return "a field"
+ case *OneOfDescriptor:
+ return "a oneof"
+ case *EnumDescriptor:
+ return "an enum"
+ case *EnumValueDescriptor:
+ return "an enum value"
+ case *ServiceDescriptor:
+ return "a service"
+ case *MethodDescriptor:
+ return "a method"
+ default:
+ return fmt.Sprintf("a %T", d)
+ }
+}
+
+func (fd *FieldDescriptor) resolve(cache descriptorCache) error {
+ if fd.proto.OneofIndex != nil && fd.oneOf == nil {
+ return fmt.Errorf("could not link field %s to one-of index %d", fd.GetFullyQualifiedName(), *fd.proto.OneofIndex)
+ }
+ if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM {
+ desc, err := resolve(fd.file, fd.wrapped.Enum(), cache)
+ if err != nil {
+ return err
+ }
+ enumType, ok := desc.(*EnumDescriptor)
+ if !ok {
+ return fmt.Errorf("field %v indicates a type of enum, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc))
+ }
+ fd.enumType = enumType
+ }
+ if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+ desc, err := resolve(fd.file, fd.wrapped.Message(), cache)
+ if err != nil {
+ return err
+ }
+ msgType, ok := desc.(*MessageDescriptor)
+ if !ok {
+ return fmt.Errorf("field %v indicates a type of message, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc))
+ }
+ fd.msgType = msgType
+ }
+ if fd.IsExtension() {
+ desc, err := resolve(fd.file, fd.wrapped.ContainingMessage(), cache)
+ if err != nil {
+ return err
+ }
+ msgType, ok := desc.(*MessageDescriptor)
+ if !ok {
+ return fmt.Errorf("field %v extends %q which should be a message but is %s", fd.GetFullyQualifiedName(), fd.proto.GetExtendee(), descriptorType(desc))
+ }
+ fd.owner = msgType
+ }
+ fd.file.registerField(fd)
+ return nil
+}
+
+func (fd *FieldDescriptor) determineDefault() interface{} {
+ if fd.IsMap() {
+ return map[interface{}]interface{}(nil)
+ } else if fd.IsRepeated() {
+ return []interface{}(nil)
+ } else if fd.msgType != nil {
+ return nil
+ }
+
+ proto3 := fd.file.IsProto3()
+ if !proto3 {
+ def := fd.AsFieldDescriptorProto().GetDefaultValue()
+ if def != "" {
+ ret := parseDefaultValue(fd, def)
+ if ret != nil {
+ return ret
+ }
+ // if we can't parse default value, fall-through to return normal default...
+ }
+ }
+
+ switch fd.GetType() {
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT32:
+ return uint32(0)
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32:
+ return int32(0)
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT64:
+ return uint64(0)
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64:
+ return int64(0)
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ return float32(0.0)
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ return float64(0.0)
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ return false
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ return []byte(nil)
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ return ""
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
+ if proto3 {
+ return int32(0)
+ }
+ enumVals := fd.GetEnumType().GetValues()
+ if len(enumVals) > 0 {
+ return enumVals[0].GetNumber()
+ } else {
+ return int32(0) // WTF?
+ }
+ default:
+ panic(fmt.Sprintf("Unknown field type: %v", fd.GetType()))
+ }
+}
+
+func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
+ switch fd.GetType() {
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
+ vd := fd.GetEnumType().FindValueByName(val)
+ if vd != nil {
+ return vd.GetNumber()
+ }
+ return nil
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ if val == "true" {
+ return true
+ } else if val == "false" {
+ return false
+ }
+ return nil
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ return []byte(unescape(val))
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ return val
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ if f, err := strconv.ParseFloat(val, 32); err == nil {
+ return float32(f)
+ } else {
+ return float32(0)
+ }
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ if f, err := strconv.ParseFloat(val, 64); err == nil {
+ return f
+ } else {
+ return float64(0)
+ }
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
+ if i, err := strconv.ParseInt(val, 10, 32); err == nil {
+ return int32(i)
+ } else {
+ return int32(0)
+ }
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
+ if i, err := strconv.ParseUint(val, 10, 32); err == nil {
+ return uint32(i)
+ } else {
+ return uint32(0)
+ }
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
+ if i, err := strconv.ParseInt(val, 10, 64); err == nil {
+ return i
+ } else {
+ return int64(0)
+ }
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
+ if i, err := strconv.ParseUint(val, 10, 64); err == nil {
+ return i
+ } else {
+ return uint64(0)
+ }
+ default:
+ return nil
+ }
+}
+
+func unescape(s string) string {
+ // protoc encodes default values for 'bytes' fields using C escaping,
+ // so this function reverses that escaping
+ out := make([]byte, 0, len(s))
+ var buf [4]byte
+ for len(s) > 0 {
+ if s[0] != '\\' || len(s) < 2 {
+ // not escape sequence, or too short to be well-formed escape
+ out = append(out, s[0])
+ s = s[1:]
+ } else if s[1] == 'x' || s[1] == 'X' {
+ n := matchPrefix(s[2:], 2, isHex)
+ if n == 0 {
+ // bad escape
+ out = append(out, s[:2]...)
+ s = s[2:]
+ } else {
+ c, err := strconv.ParseUint(s[2:2+n], 16, 8)
+ if err != nil {
+ // shouldn't really happen...
+ out = append(out, s[:2+n]...)
+ } else {
+ out = append(out, byte(c))
+ }
+ s = s[2+n:]
+ }
+ } else if s[1] >= '0' && s[1] <= '7' {
+ n := 1 + matchPrefix(s[2:], 2, isOctal)
+ c, err := strconv.ParseUint(s[1:1+n], 8, 8)
+ if err != nil || c > 0xff {
+ out = append(out, s[:1+n]...)
+ } else {
+ out = append(out, byte(c))
+ }
+ s = s[1+n:]
+ } else if s[1] == 'u' {
+ if len(s) < 6 {
+ // bad escape
+ out = append(out, s...)
+ s = s[len(s):]
+ } else {
+ c, err := strconv.ParseUint(s[2:6], 16, 16)
+ if err != nil {
+ // bad escape
+ out = append(out, s[:6]...)
+ } else {
+ w := utf8.EncodeRune(buf[:], rune(c))
+ out = append(out, buf[:w]...)
+ }
+ s = s[6:]
+ }
+ } else if s[1] == 'U' {
+ if len(s) < 10 {
+ // bad escape
+ out = append(out, s...)
+ s = s[len(s):]
+ } else {
+ c, err := strconv.ParseUint(s[2:10], 16, 32)
+ if err != nil || c > 0x10ffff {
+ // bad escape
+ out = append(out, s[:10]...)
+ } else {
+ w := utf8.EncodeRune(buf[:], rune(c))
+ out = append(out, buf[:w]...)
+ }
+ s = s[10:]
+ }
+ } else {
+ switch s[1] {
+ case 'a':
+ out = append(out, '\a')
+ case 'b':
+ out = append(out, '\b')
+ case 'f':
+ out = append(out, '\f')
+ case 'n':
+ out = append(out, '\n')
+ case 'r':
+ out = append(out, '\r')
+ case 't':
+ out = append(out, '\t')
+ case 'v':
+ out = append(out, '\v')
+ case '\\':
+ out = append(out, '\\')
+ case '\'':
+ out = append(out, '\'')
+ case '"':
+ out = append(out, '"')
+ case '?':
+ out = append(out, '?')
+ default:
+ // invalid escape, just copy it as-is
+ out = append(out, s[:2]...)
+ }
+ s = s[2:]
+ }
+ }
+ return string(out)
+}
+
+func isOctal(b byte) bool { return b >= '0' && b <= '7' }
+func isHex(b byte) bool {
+ return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F')
+}
+func matchPrefix(s string, limit int, fn func(byte) bool) int {
+ l := len(s)
+ if l > limit {
+ l = limit
+ }
+ i := 0
+ for ; i < l; i++ {
+ if !fn(s[i]) {
+ return i
+ }
+ }
+ return i
+}
+
+// GetName returns the name of the field.
+func (fd *FieldDescriptor) GetName() string {
+ return string(fd.wrapped.Name())
+}
+
+// GetNumber returns the tag number of this field.
+func (fd *FieldDescriptor) GetNumber() int32 {
+ return int32(fd.wrapped.Number())
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the field. Unlike
+// GetName, this includes fully qualified name of the enclosing message for
+// regular fields.
+//
+// For extension fields, this includes the package (if there is one) as well as
+// any enclosing messages. The package and/or enclosing messages are for where
+// the extension is defined, not the message it extends.
+//
+// If this field is part of a one-of, the fully qualified name does *not*
+// include the name of the one-of, only of the enclosing message.
+func (fd *FieldDescriptor) GetFullyQualifiedName() string {
+ return string(fd.wrapped.FullName())
+}
+
+// GetParent returns the fields's enclosing descriptor. For normal
+// (non-extension) fields, this is the enclosing message. For extensions, this
+// is the descriptor in which the extension is defined, not the message that is
+// extended. The parent for an extension may be a file descriptor or a message,
+// depending on where the extension is defined.
+func (fd *FieldDescriptor) GetParent() Descriptor {
+ return fd.parent
+}
+
+// GetFile returns the descriptor for the file in which this field is defined.
+func (fd *FieldDescriptor) GetFile() *FileDescriptor {
+ return fd.file
+}
+
+// GetOptions returns the field's options. Most usages will be more interested
+// in GetFieldOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) GetOptions() proto.Message {
+ return fd.proto.GetOptions()
+}
+
+// GetFieldOptions returns the field's options.
+func (fd *FieldDescriptor) GetFieldOptions() *descriptorpb.FieldOptions {
+ return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the field, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// field was defined and also contains comments associated with the field
+// definition.
+func (fd *FieldDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
+ return fd.file.sourceInfo.Get(fd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFieldDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) AsProto() proto.Message {
+ return fd.proto
+}
+
+// AsFieldDescriptorProto returns the underlying descriptor proto.
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *descriptorpb.FieldDescriptorProto {
+ return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FieldDescriptor) String() string {
+ return fd.proto.String()
+}
+
+// GetJSONName returns the name of the field as referenced in the message's JSON
+// format.
+func (fd *FieldDescriptor) GetJSONName() string {
+ if jsonName := fd.proto.JsonName; jsonName != nil {
+ // if json name is present, use its value
+ return *jsonName
+ }
+ // otherwise, compute the proper JSON name from the field name
+ return jsonCamelCase(fd.proto.GetName())
+}
+
+func jsonCamelCase(s string) string {
+ // This mirrors the implementation in protoc/C++ runtime and in the Java runtime:
+ // https://github.com/protocolbuffers/protobuf/blob/a104dffcb6b1958a424f5fa6f9e6bdc0ab9b6f9e/src/google/protobuf/descriptor.cc#L276
+ // https://github.com/protocolbuffers/protobuf/blob/a1c886834425abb64a966231dd2c9dd84fb289b3/java/core/src/main/java/com/google/protobuf/Descriptors.java#L1286
+ var buf bytes.Buffer
+ prevWasUnderscore := false
+ for _, r := range s {
+ if r == '_' {
+ prevWasUnderscore = true
+ continue
+ }
+ if prevWasUnderscore {
+ r = unicode.ToUpper(r)
+ prevWasUnderscore = false
+ }
+ buf.WriteRune(r)
+ }
+ return buf.String()
+}
+
+// GetFullyQualifiedJSONName returns the JSON format name (same as GetJSONName),
+// but includes the fully qualified name of the enclosing message.
+//
+// If the field is an extension, it will return the package name (if there is
+// one) as well as the names of any enclosing messages. The package and/or
+// enclosing messages are for where the extension is defined, not the message it
+// extends.
+func (fd *FieldDescriptor) GetFullyQualifiedJSONName() string {
+ parent := fd.GetParent()
+ switch parent := parent.(type) {
+ case *FileDescriptor:
+ pkg := parent.GetPackage()
+ if pkg == "" {
+ return fd.GetJSONName()
+ }
+ return fmt.Sprintf("%s.%s", pkg, fd.GetJSONName())
+ default:
+ return fmt.Sprintf("%s.%s", parent.GetFullyQualifiedName(), fd.GetJSONName())
+ }
+}
+
+// GetOwner returns the message type that this field belongs to. If this is a normal
+// field then this is the same as GetParent. But for extensions, this will be the
+// extendee message whereas GetParent refers to where the extension was declared.
+func (fd *FieldDescriptor) GetOwner() *MessageDescriptor {
+ return fd.owner
+}
+
+// IsExtension returns true if this is an extension field.
+func (fd *FieldDescriptor) IsExtension() bool {
+ return fd.wrapped.IsExtension()
+}
+
+// GetOneOf returns the one-of field set to which this field belongs. If this field
+// is not part of a one-of then this method returns nil.
+func (fd *FieldDescriptor) GetOneOf() *OneOfDescriptor {
+ return fd.oneOf
+}
+
+// GetType returns the type of this field. If the type indicates an enum, the
+// enum type can be queried via GetEnumType. If the type indicates a message, the
+// message type can be queried via GetMessageType.
+func (fd *FieldDescriptor) GetType() descriptorpb.FieldDescriptorProto_Type {
+ return fd.proto.GetType()
+}
+
+// GetLabel returns the label for this field. The label can be required (proto2-only),
+// optional (default for proto3), or required.
+func (fd *FieldDescriptor) GetLabel() descriptorpb.FieldDescriptorProto_Label {
+ return fd.proto.GetLabel()
+}
+
+// IsRequired returns true if this field has the "required" label.
+func (fd *FieldDescriptor) IsRequired() bool {
+ return fd.wrapped.Cardinality() == protoreflect.Required
+}
+
+// IsRepeated returns true if this field has the "repeated" label.
+func (fd *FieldDescriptor) IsRepeated() bool {
+ return fd.wrapped.Cardinality() == protoreflect.Repeated
+}
+
+// IsProto3Optional returns true if this field has an explicit "optional" label
+// and is in a "proto3" syntax file. Such fields, if they are normal fields (not
+// extensions), will be nested in synthetic oneofs that contain only the single
+// field.
+func (fd *FieldDescriptor) IsProto3Optional() bool {
+ return fd.proto.GetProto3Optional()
+}
+
+// HasPresence returns true if this field can distinguish when a value is
+// present or not. Scalar fields in "proto3" syntax files, for example, return
+// false since absent values are indistinguishable from zero values.
+func (fd *FieldDescriptor) HasPresence() bool {
+ return fd.wrapped.HasPresence()
+}
+
+// IsMap returns true if this is a map field. If so, it will have the "repeated"
+// label its type will be a message that represents a map entry. The map entry
+// message will have exactly two fields: tag #1 is the key and tag #2 is the value.
+func (fd *FieldDescriptor) IsMap() bool {
+ return fd.wrapped.IsMap()
+}
+
+// GetMapKeyType returns the type of the key field if this is a map field. If it is
+// not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
+ if fd.IsMap() {
+ return fd.msgType.FindFieldByNumber(int32(1))
+ }
+ return nil
+}
+
+// GetMapValueType returns the type of the value field if this is a map field. If it
+// is not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
+ if fd.IsMap() {
+ return fd.msgType.FindFieldByNumber(int32(2))
+ }
+ return nil
+}
+
+// GetMessageType returns the type of this field if it is a message type. If
+// this field is not a message type, it returns nil.
+func (fd *FieldDescriptor) GetMessageType() *MessageDescriptor {
+ return fd.msgType
+}
+
+// GetEnumType returns the type of this field if it is an enum type. If this
+// field is not an enum type, it returns nil.
+func (fd *FieldDescriptor) GetEnumType() *EnumDescriptor {
+ return fd.enumType
+}
+
+// GetDefaultValue returns the default value for this field.
+//
+// If this field represents a message type, this method always returns nil (even though
+// for proto2 files, the default value should be a default instance of the message type).
+// If the field represents an enum type, this method returns an int32 corresponding to the
+// enum value. If this field is a map, it returns a nil map[interface{}]interface{}. If
+// this field is repeated (and not a map), it returns a nil []interface{}.
+//
+// Otherwise, it returns the declared default value for the field or a zero value, if no
+// default is declared or if the file is proto3. The type of said return value corresponds
+// to the type of the field:
+//
+// +-------------------------+-----------+
+// | Declared Type | Go Type |
+// +-------------------------+-----------+
+// | int32, sint32, sfixed32 | int32 |
+// | int64, sint64, sfixed64 | int64 |
+// | uint32, fixed32 | uint32 |
+// | uint64, fixed64 | uint64 |
+// | float | float32 |
+// | double | double32 |
+// | bool | bool |
+// | string | string |
+// | bytes | []byte |
+// +-------------------------+-----------+
+func (fd *FieldDescriptor) GetDefaultValue() interface{} {
+ return fd.getDefaultValue()
+}
+
+// EnumDescriptor describes an enum declared in a proto file.
+type EnumDescriptor struct {
+ wrapped protoreflect.EnumDescriptor
+ proto *descriptorpb.EnumDescriptorProto
+ parent Descriptor
+ file *FileDescriptor
+ values []*EnumValueDescriptor
+ valuesByNum sortedValues
+ sourceInfoPath []int32
+}
+
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapEnum, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (ed *EnumDescriptor) Unwrap() protoreflect.Descriptor {
+ return ed.wrapped
+}
+
+// UnwrapEnum returns the underlying protoreflect.EnumDescriptor.
+func (ed *EnumDescriptor) UnwrapEnum() protoreflect.EnumDescriptor {
+ return ed.wrapped
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, ed protoreflect.EnumDescriptor, edp *descriptorpb.EnumDescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *EnumDescriptor {
+ ret := &EnumDescriptor{
+ wrapped: ed,
+ proto: edp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+ path = append(path, internal.Enum_valuesTag)
+ for i := 0; i < ed.Values().Len(); i++ {
+ src := ed.Values().Get(i)
+ srcProto := edp.GetValue()[src.Index()]
+ evd := createEnumValueDescriptor(fd, ret, src, srcProto, append(path, int32(i)))
+ symbols[string(src.FullName())] = evd
+ // NB: for backwards compatibility, also register the enum value as if
+ // scoped within the enum (counter-intuitively, enum value full names are
+ // scoped in the enum's parent element). EnumValueDescripto.GetFullyQualifiedName
+ // returns that alternate full name.
+ symbols[evd.GetFullyQualifiedName()] = evd
+ ret.values = append(ret.values, evd)
+ }
+ if len(ret.values) > 0 {
+ ret.valuesByNum = make(sortedValues, len(ret.values))
+ copy(ret.valuesByNum, ret.values)
+ sort.Stable(ret.valuesByNum)
+ }
+ return ret
+}
+
+type sortedValues []*EnumValueDescriptor
+
+func (sv sortedValues) Len() int {
+ return len(sv)
+}
+
+func (sv sortedValues) Less(i, j int) bool {
+ return sv[i].GetNumber() < sv[j].GetNumber()
+}
+
+func (sv sortedValues) Swap(i, j int) {
+ sv[i], sv[j] = sv[j], sv[i]
+
+}
+
+// GetName returns the simple (unqualified) name of the enum type.
+func (ed *EnumDescriptor) GetName() string {
+ return string(ed.wrapped.Name())
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum type.
+// This includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (ed *EnumDescriptor) GetFullyQualifiedName() string {
+ return string(ed.wrapped.FullName())
+}
+
+// GetParent returns the enum type's enclosing descriptor. For top-level enums,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (ed *EnumDescriptor) GetParent() Descriptor {
+ return ed.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum is defined.
+func (ed *EnumDescriptor) GetFile() *FileDescriptor {
+ return ed.file
+}
+
+// GetOptions returns the enum type's options. Most usages will be more
+// interested in GetEnumOptions, which has a concrete return type. This generic
+// version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) GetOptions() proto.Message {
+ return ed.proto.GetOptions()
+}
+
+// GetEnumOptions returns the enum type's options.
+func (ed *EnumDescriptor) GetEnumOptions() *descriptorpb.EnumOptions {
+ return ed.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum type, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum type was defined and also contains comments associated with the enum
+// definition.
+func (ed *EnumDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
+ return ed.file.sourceInfo.Get(ed.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) AsProto() proto.Message {
+ return ed.proto
+}
+
+// AsEnumDescriptorProto returns the underlying descriptor proto.
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *descriptorpb.EnumDescriptorProto {
+ return ed.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (ed *EnumDescriptor) String() string {
+ return ed.proto.String()
+}
+
+// GetValues returns all of the allowed values defined for this enum.
+func (ed *EnumDescriptor) GetValues() []*EnumValueDescriptor {
+ return ed.values
+}
+
+// FindValueByName finds the enum value with the given name. If no such value exists
+// then nil is returned.
+func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
+ fqn := fmt.Sprintf("%s.%s", ed.GetFullyQualifiedName(), name)
+ if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
+ return vd
+ } else {
+ return nil
+ }
+}
+
+// FindValueByNumber finds the value with the given numeric value. If no such value
+// exists then nil is returned. If aliases are allowed and multiple values have the
+// given number, the first declared value is returned.
+func (ed *EnumDescriptor) FindValueByNumber(num int32) *EnumValueDescriptor {
+ index := sort.Search(len(ed.valuesByNum), func(i int) bool { return ed.valuesByNum[i].GetNumber() >= num })
+ if index < len(ed.valuesByNum) {
+ vd := ed.valuesByNum[index]
+ if vd.GetNumber() == num {
+ return vd
+ }
+ }
+ return nil
+}
+
+// EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
+type EnumValueDescriptor struct {
+ wrapped protoreflect.EnumValueDescriptor
+ proto *descriptorpb.EnumValueDescriptorProto
+ parent *EnumDescriptor
+ file *FileDescriptor
+ sourceInfoPath []int32
+}
+
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapEnumValue, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (vd *EnumValueDescriptor) Unwrap() protoreflect.Descriptor {
+ return vd.wrapped
+}
+
+// UnwrapEnumValue returns the underlying protoreflect.EnumValueDescriptor.
+func (vd *EnumValueDescriptor) UnwrapEnumValue() protoreflect.EnumValueDescriptor {
+ return vd.wrapped
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, evd protoreflect.EnumValueDescriptor, evdp *descriptorpb.EnumValueDescriptorProto, path []int32) *EnumValueDescriptor {
+ return &EnumValueDescriptor{
+ wrapped: evd,
+ proto: evdp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+}
+
+func (vd *EnumValueDescriptor) resolve(path []int32) {
+ vd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the enum value.
+func (vd *EnumValueDescriptor) GetName() string {
+ return string(vd.wrapped.Name())
+}
+
+// GetNumber returns the numeric value associated with this enum value.
+func (vd *EnumValueDescriptor) GetNumber() int32 {
+ return int32(vd.wrapped.Number())
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum value.
+// Unlike GetName, this includes fully qualified name of the enclosing enum.
+func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
+ // NB: Technically, we do not return the correct value. Enum values are
+ // scoped within the enclosing element, not within the enum itself (which
+ // is very non-intuitive, but it follows C++ scoping rules). The value
+ // returned from vd.wrapped.FullName() is correct. However, we return
+ // something different, just for backwards compatibility, as this package
+ // has always instead returned the name scoped inside the enum.
+ return fmt.Sprintf("%s.%s", vd.parent.GetFullyQualifiedName(), vd.wrapped.Name())
+}
+
+// GetParent returns the descriptor for the enum in which this enum value is
+// defined. Most usages will prefer to use GetEnum, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetParent() Descriptor {
+ return vd.parent
+}
+
+// GetEnum returns the enum in which this enum value is defined.
+func (vd *EnumValueDescriptor) GetEnum() *EnumDescriptor {
+ return vd.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum value is
+// defined.
+func (vd *EnumValueDescriptor) GetFile() *FileDescriptor {
+ return vd.file
+}
+
+// GetOptions returns the enum value's options. Most usages will be more
+// interested in GetEnumValueOptions, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetOptions() proto.Message {
+ return vd.proto.GetOptions()
+}
+
+// GetEnumValueOptions returns the enum value's options.
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *descriptorpb.EnumValueOptions {
+ return vd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum value, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum value was defined and also contains comments associated with the enum
+// value definition.
+func (vd *EnumValueDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
+ return vd.file.sourceInfo.Get(vd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumValueDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) AsProto() proto.Message {
+ return vd.proto
+}
+
+// AsEnumValueDescriptorProto returns the underlying descriptor proto.
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto {
+ return vd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (vd *EnumValueDescriptor) String() string {
+ return vd.proto.String()
+}
+
+// ServiceDescriptor describes an RPC service declared in a proto file.
+type ServiceDescriptor struct {
+ wrapped protoreflect.ServiceDescriptor
+ proto *descriptorpb.ServiceDescriptorProto
+ file *FileDescriptor
+ methods []*MethodDescriptor
+ sourceInfoPath []int32
+}
+
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapService, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (sd *ServiceDescriptor) Unwrap() protoreflect.Descriptor {
+ return sd.wrapped
+}
+
+// UnwrapService returns the underlying protoreflect.ServiceDescriptor.
+func (sd *ServiceDescriptor) UnwrapService() protoreflect.ServiceDescriptor {
+ return sd.wrapped
+}
+
+func createServiceDescriptor(fd *FileDescriptor, sd protoreflect.ServiceDescriptor, sdp *descriptorpb.ServiceDescriptorProto, symbols map[string]Descriptor, path []int32) *ServiceDescriptor {
+ ret := &ServiceDescriptor{
+ wrapped: sd,
+ proto: sdp,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+ path = append(path, internal.Service_methodsTag)
+ for i := 0; i < sd.Methods().Len(); i++ {
+ src := sd.Methods().Get(i)
+ srcProto := sdp.GetMethod()[src.Index()]
+ md := createMethodDescriptor(fd, ret, src, srcProto, append(path, int32(i)))
+ symbols[string(src.FullName())] = md
+ ret.methods = append(ret.methods, md)
+ }
+ return ret
+}
+
+func (sd *ServiceDescriptor) resolve(cache descriptorCache) error {
+ for _, md := range sd.methods {
+ if err := md.resolve(cache); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetName returns the simple (unqualified) name of the service.
+func (sd *ServiceDescriptor) GetName() string {
+ return string(sd.wrapped.Name())
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the service. This
+// includes the package name (if there is one).
+func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
+ return string(sd.wrapped.FullName())
+}
+
+// GetParent returns the descriptor for the file in which this service is
+// defined. Most usages will prefer to use GetFile, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetParent() Descriptor {
+ return sd.file
+}
+
+// GetFile returns the descriptor for the file in which this service is defined.
+func (sd *ServiceDescriptor) GetFile() *FileDescriptor {
+ return sd.file
+}
+
+// GetOptions returns the service's options. Most usages will be more interested
+// in GetServiceOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetOptions() proto.Message {
+ return sd.proto.GetOptions()
+}
+
+// GetServiceOptions returns the service's options.
+func (sd *ServiceDescriptor) GetServiceOptions() *descriptorpb.ServiceOptions {
+ return sd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the service, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// service was defined and also contains comments associated with the service
+// definition.
+func (sd *ServiceDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
+ return sd.file.sourceInfo.Get(sd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsServiceDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) AsProto() proto.Message {
+ return sd.proto
+}
+
+// AsServiceDescriptorProto returns the underlying descriptor proto.
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto {
+ return sd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (sd *ServiceDescriptor) String() string {
+ return sd.proto.String()
+}
+
+// GetMethods returns all of the RPC methods for this service.
+func (sd *ServiceDescriptor) GetMethods() []*MethodDescriptor {
+ return sd.methods
+}
+
+// FindMethodByName finds the method with the given name. If no such method exists
+// then nil is returned.
+func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
+ fqn := fmt.Sprintf("%s.%s", sd.GetFullyQualifiedName(), name)
+ if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
+ return md
+ } else {
+ return nil
+ }
+}
+
+// MethodDescriptor describes an RPC method declared in a proto file.
+type MethodDescriptor struct {
+ wrapped protoreflect.MethodDescriptor
+ proto *descriptorpb.MethodDescriptorProto
+ parent *ServiceDescriptor
+ file *FileDescriptor
+ inType *MessageDescriptor
+ outType *MessageDescriptor
+ sourceInfoPath []int32
+}
+
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapMethod, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (md *MethodDescriptor) Unwrap() protoreflect.Descriptor {
+ return md.wrapped
+}
+
+// UnwrapMethod returns the underlying protoreflect.MethodDescriptor.
+func (md *MethodDescriptor) UnwrapMethod() protoreflect.MethodDescriptor {
+ return md.wrapped
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, md protoreflect.MethodDescriptor, mdp *descriptorpb.MethodDescriptorProto, path []int32) *MethodDescriptor {
+ // request and response types get resolved later
+ return &MethodDescriptor{
+ wrapped: md,
+ proto: mdp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+}
+
+func (md *MethodDescriptor) resolve(cache descriptorCache) error {
+ if desc, err := resolve(md.file, md.wrapped.Input(), cache); err != nil {
+ return err
+ } else {
+ msgType, ok := desc.(*MessageDescriptor)
+ if !ok {
+ return fmt.Errorf("method %v has request type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetInputType(), descriptorType(desc))
+ }
+ md.inType = msgType
+ }
+ if desc, err := resolve(md.file, md.wrapped.Output(), cache); err != nil {
+ return err
+ } else {
+ msgType, ok := desc.(*MessageDescriptor)
+ if !ok {
+ return fmt.Errorf("method %v has response type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetOutputType(), descriptorType(desc))
+ }
+ md.outType = msgType
+ }
+ return nil
+}
+
+// GetName returns the name of the method.
+func (md *MethodDescriptor) GetName() string {
+ return string(md.wrapped.Name())
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the method. Unlike
+// GetName, this includes fully qualified name of the enclosing service.
+func (md *MethodDescriptor) GetFullyQualifiedName() string {
+ return string(md.wrapped.FullName())
+}
+
+// GetParent returns the descriptor for the service in which this method is
+// defined. Most usages will prefer to use GetService, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (md *MethodDescriptor) GetParent() Descriptor {
+ return md.parent
+}
+
+// GetService returns the RPC service in which this method is declared.
+func (md *MethodDescriptor) GetService() *ServiceDescriptor {
+ return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this method is defined.
+func (md *MethodDescriptor) GetFile() *FileDescriptor {
+ return md.file
+}
+
+// GetOptions returns the method's options. Most usages will be more interested
+// in GetMethodOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) GetOptions() proto.Message {
+ return md.proto.GetOptions()
+}
+
+// GetMethodOptions returns the method's options.
+func (md *MethodDescriptor) GetMethodOptions() *descriptorpb.MethodOptions {
+ return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the method, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// method was defined and also contains comments associated with the method
+// definition.
+func (md *MethodDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
+ return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsMethodDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) AsProto() proto.Message {
+ return md.proto
+}
+
+// AsMethodDescriptorProto returns the underlying descriptor proto.
+func (md *MethodDescriptor) AsMethodDescriptorProto() *descriptorpb.MethodDescriptorProto {
+ return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MethodDescriptor) String() string {
+ return md.proto.String()
+}
+
+// IsServerStreaming returns true if this is a server-streaming method.
+func (md *MethodDescriptor) IsServerStreaming() bool {
+ return md.wrapped.IsStreamingServer()
+}
+
+// IsClientStreaming returns true if this is a client-streaming method.
+func (md *MethodDescriptor) IsClientStreaming() bool {
+ return md.wrapped.IsStreamingClient()
+}
+
+// GetInputType returns the input type, or request type, of the RPC method.
+func (md *MethodDescriptor) GetInputType() *MessageDescriptor {
+ return md.inType
+}
+
+// GetOutputType returns the output type, or response type, of the RPC method.
+func (md *MethodDescriptor) GetOutputType() *MessageDescriptor {
+ return md.outType
+}
+
+// OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
+type OneOfDescriptor struct {
+ wrapped protoreflect.OneofDescriptor
+ proto *descriptorpb.OneofDescriptorProto
+ parent *MessageDescriptor
+ file *FileDescriptor
+ choices []*FieldDescriptor
+ sourceInfoPath []int32
+}
+
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapOneOf, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (od *OneOfDescriptor) Unwrap() protoreflect.Descriptor {
+ return od.wrapped
+}
+
+// UnwrapOneOf returns the underlying protoreflect.OneofDescriptor.
+func (od *OneOfDescriptor) UnwrapOneOf() protoreflect.OneofDescriptor {
+ return od.wrapped
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, od protoreflect.OneofDescriptor, odp *descriptorpb.OneofDescriptorProto, path []int32) *OneOfDescriptor {
+ ret := &OneOfDescriptor{
+ wrapped: od,
+ proto: odp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+ for _, f := range parent.fields {
+ oi := f.proto.OneofIndex
+ if oi != nil && *oi == int32(index) {
+ f.oneOf = ret
+ ret.choices = append(ret.choices, f)
+ }
+ }
+ return ret
+}
+
+// GetName returns the name of the one-of.
+func (od *OneOfDescriptor) GetName() string {
+ return string(od.wrapped.Name())
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
+// GetName, this includes fully qualified name of the enclosing message.
+func (od *OneOfDescriptor) GetFullyQualifiedName() string {
+ return string(od.wrapped.FullName())
+}
+
+// GetParent returns the descriptor for the message in which this one-of is
+// defined. Most usages will prefer to use GetOwner, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (od *OneOfDescriptor) GetParent() Descriptor {
+ return od.parent
+}
+
+// GetOwner returns the message to which this one-of field set belongs.
+func (od *OneOfDescriptor) GetOwner() *MessageDescriptor {
+ return od.parent
+}
+
+// GetFile returns the descriptor for the file in which this one-fof is defined.
+func (od *OneOfDescriptor) GetFile() *FileDescriptor {
+ return od.file
+}
+
+// GetOptions returns the one-of's options. Most usages will be more interested
+// in GetOneOfOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) GetOptions() proto.Message {
+ return od.proto.GetOptions()
+}
+
+// GetOneOfOptions returns the one-of's options.
+func (od *OneOfDescriptor) GetOneOfOptions() *descriptorpb.OneofOptions {
+ return od.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the one-of, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// one-of was defined and also contains comments associated with the one-of
+// definition.
+func (od *OneOfDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
+ return od.file.sourceInfo.Get(od.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsOneofDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) AsProto() proto.Message {
+ return od.proto
+}
+
+// AsOneofDescriptorProto returns the underlying descriptor proto.
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *descriptorpb.OneofDescriptorProto {
+ return od.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (od *OneOfDescriptor) String() string {
+ return od.proto.String()
+}
+
+// GetChoices returns the fields that are part of the one-of field set. At most one of
+// these fields may be set for a given message.
+func (od *OneOfDescriptor) GetChoices() []*FieldDescriptor {
+ return od.choices
+}
+
+func (od *OneOfDescriptor) IsSynthetic() bool {
+ return od.wrapped.IsSynthetic()
+}
+
+func resolve(fd *FileDescriptor, src protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) {
+ d := cache.get(src)
+ if d != nil {
+ return d, nil
+ }
+
+ fqn := string(src.FullName())
+
+ d = fd.FindSymbol(fqn)
+ if d != nil {
+ return d, nil
+ }
+
+ for _, dep := range fd.deps {
+ d := dep.FindSymbol(fqn)
+ if d != nil {
+ return d, nil
+ }
+ }
+
+ return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), fqn)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
new file mode 100644
index 0000000..25d619a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
@@ -0,0 +1,30 @@
+//go:build appengine || gopherjs || purego
+// +build appengine gopherjs purego
+
+// NB: other environments where unsafe is unappropriate should use "purego" build tag
+// https://github.com/golang/go/issues/23172
+
+package desc
+
+type jsonNameMap struct{}
+type memoizedDefault struct{}
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+ // NB: With allowed use of unsafe, we use it to atomically define an index
+ // via atomic.LoadPointer/atomic.StorePointer. Without it, we skip the index
+ // and must do a linear scan of fields each time.
+ for _, f := range md.fields {
+ jn := f.GetJSONName()
+ if jn == jsonName {
+ return f
+ }
+ }
+ return nil
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+ return fd.determineDefault()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
new file mode 100644
index 0000000..691f0d8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
@@ -0,0 +1,59 @@
+//go:build !appengine && !gopherjs && !purego
+// +build !appengine,!gopherjs,!purego
+
+// NB: other environments where unsafe is unappropriate should use "purego" build tag
+// https://github.com/golang/go/issues/23172
+
+package desc
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+type jsonNameMap map[string]*FieldDescriptor // loaded/stored atomically via atomic+unsafe
+type memoizedDefault *interface{} // loaded/stored atomically via atomic+unsafe
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+ // NB: We don't want to eagerly index JSON names because many programs won't use it.
+ // So we want to do it lazily, but also make sure the result is thread-safe. So we
+ // atomically load/store the map as if it were a normal pointer. We don't use other
+ // mechanisms -- like sync.Mutex, sync.RWMutex, sync.Once, or atomic.Value -- to
+ // do this lazily because those types cannot be copied, and we'd rather not induce
+ // 'go vet' errors in programs that use descriptors and try to copy them.
+ // If multiple goroutines try to access the index at the same time, before it is
+ // built, they will all end up computing the index redundantly. Future reads of
+ // the index will use whatever was the "last one stored" by those racing goroutines.
+ // Since building the index is deterministic, this is fine: all indices computed
+ // will be the same.
+ addrOfJsonNames := (*unsafe.Pointer)(unsafe.Pointer(&md.jsonNames))
+ jsonNames := atomic.LoadPointer(addrOfJsonNames)
+ var index map[string]*FieldDescriptor
+ if jsonNames == nil {
+ // slow path: compute the index
+ index = map[string]*FieldDescriptor{}
+ for _, f := range md.fields {
+ jn := f.GetJSONName()
+ index[jn] = f
+ }
+ atomic.StorePointer(addrOfJsonNames, *(*unsafe.Pointer)(unsafe.Pointer(&index)))
+ } else {
+ *(*unsafe.Pointer)(unsafe.Pointer(&index)) = jsonNames
+ }
+ return index[jsonName]
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+ addrOfDef := (*unsafe.Pointer)(unsafe.Pointer(&fd.def))
+ def := atomic.LoadPointer(addrOfDef)
+ if def != nil {
+ return *(*interface{})(def)
+ }
+ // slow path: compute the default, potentially involves decoding value
+ d := fd.determineDefault()
+ atomic.StorePointer(addrOfDef, (unsafe.Pointer(&d)))
+ return d
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
new file mode 100644
index 0000000..07bcbf3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -0,0 +1,70 @@
+// Package desc contains "rich descriptors" for protocol buffers. The built-in
+// descriptor types are simple protobuf messages, each one representing a
+// different kind of element in the AST of a .proto source file.
+//
+// Because of this inherent "tree" quality, these build-in descriptors cannot
+// refer to their enclosing file descriptor. Nor can a field descriptor refer to
+// a message or enum descriptor that represents the field's type (for enum and
+// nested message fields). All such links must instead be stringly typed. This
+// limitation makes them much harder to use for doing interesting things with
+// reflection.
+//
+// Without this package, resolving references to types is particularly complex.
+// For example, resolving a field's type, the message type an extension extends,
+// or the request and response types of an RPC method all require searching
+// through symbols defined not only in the file in which these elements are
+// declared but also in its transitive closure of dependencies.
+//
+// "Rich descriptors" avoid the need to deal with the complexities described
+// above. A rich descriptor has all type references resolved and provides
+// methods to access other rich descriptors for all referenced elements. Each
+// rich descriptor has a usefully broad API, but does not try to mimic the full
+// interface of the underlying descriptor proto. Instead, every rich descriptor
+// provides access to that underlying proto, for extracting descriptor
+// properties that are not immediately accessible through rich descriptor's
+// methods.
+//
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+//
+// # Loading Descriptors
+//
+// Rich descriptors can be accessed in similar ways as their "poor" cousins
+// (descriptor protos). Instead of using proto.FileDescriptor, use
+// desc.LoadFileDescriptor. Message descriptors and extension field descriptors
+// can also be easily accessed using desc.LoadMessageDescriptor and
+// desc.LoadFieldDescriptorForExtension, respectively.
+//
+// If you are using the protoc-gen-gosrcinfo plugin (also in this repo), then
+// the descriptors returned from these Load* functions will include source code
+// information, and thus include comments for elements.
+//
+// # Creating Descriptors
+//
+// It is also possible create rich descriptors for proto messages that a given
+// Go program doesn't even know about. For example, they could be loaded from a
+// FileDescriptorSet file (which can be generated by protoc) or loaded from a
+// server. This enables interesting things like dynamic clients: where a Go
+// program can be an RPC client of a service it wasn't compiled to know about.
+//
+// You cannot create a message descriptor without also creating its enclosing
+// file, because the enclosing file is what contains other relevant information
+// like other symbols and dependencies/imports, which is how type references
+// are resolved (such as when a field in a message has a type that is another
+// message or enum).
+//
+// So the functions in this package for creating descriptors are all for
+// creating *file* descriptors. See the various Create* functions for more
+// information.
+//
+// Also see the desc/builder sub-package, for another API that makes it easier
+// to synthesize descriptors programmatically.
+//
+// Deprecated: This module was created for use with the older "v1" Protobuf API
+// in github.com/golang/protobuf. However, much of this module is no longer
+// necessary as the newer "v2" API in google.golang.org/protobuf provides similar
+// capabilities. Instead of using this github.com/jhump/protoreflect/desc package,
+// see [google.golang.org/protobuf/reflect/protoreflect].
+//
+// [google.golang.org/protobuf/reflect/protoreflect]: https://pkg.go.dev/google.golang.org/protobuf/reflect/protoreflect
+package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
new file mode 100644
index 0000000..dc6b735
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -0,0 +1,324 @@
+package desc
+
+import (
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+var (
+ globalImportPathConf map[string]string
+ globalImportPathMu sync.RWMutex
+)
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path. For more details on why alternate import paths may need to
+// be configured, see ImportResolver.
+//
+// This method panics if provided invalid input. An empty importPath is invalid.
+// An un-registered registerPath is also invalid. For example, if an attempt is
+// made to register the import path "foo/bar.proto" as "bar.proto", but there is
+// no "bar.proto" registered in the Go protobuf runtime, this method will panic.
+// This method also panics if an attempt is made to register the same import
+// path more than once.
+//
+// This function works globally, applying to all descriptors loaded by this
+// package. If you instead want more granular support for handling alternate
+// import paths -- such as for a single invocation of a function in this
+// package or when the alternate path is only used from one file (so you don't
+// want the alternate path used when loading every other file), use an
+// ImportResolver instead.
+func RegisterImportPath(registerPath, importPath string) {
+ if len(importPath) == 0 {
+ panic("import path cannot be empty")
+ }
+ _, err := protoregistry.GlobalFiles.FindFileByPath(registerPath)
+ if err != nil {
+ panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
+ }
+ globalImportPathMu.Lock()
+ defer globalImportPathMu.Unlock()
+ if reg := globalImportPathConf[importPath]; reg != "" {
+ panic(fmt.Sprintf("import path %q already registered for %s", importPath, reg))
+ }
+ if globalImportPathConf == nil {
+ globalImportPathConf = map[string]string{}
+ }
+ globalImportPathConf[importPath] = registerPath
+}
+
+// ResolveImport resolves the given import path. If it has been registered as an
+// alternate via RegisterImportPath, the registered path is returned. Otherwise,
+// the given import path is returned unchanged.
+func ResolveImport(importPath string) string {
+ importPath = clean(importPath)
+ globalImportPathMu.RLock()
+ defer globalImportPathMu.RUnlock()
+ reg := globalImportPathConf[importPath]
+ if reg == "" {
+ return importPath
+ }
+ return reg
+}
+
+// ImportResolver lets you work-around linking issues that are caused by
+// mismatches between how a particular proto source file is registered in the Go
+// protobuf runtime and how that same file is imported by other files. The file
+// is registered using the same relative path given to protoc when the file is
+// compiled (i.e. when Go code is generated). So if any file tries to import
+// that source file, but using a different relative path, then a link error will
+// occur when this package tries to load a descriptor for the importing file.
+//
+// For example, let's say we have two proto source files: "foo/bar.proto" and
+// "fubar/baz.proto". The latter imports the former using a line like so:
+//
+// import "foo/bar.proto";
+//
+// However, when protoc is invoked, the command-line args looks like so:
+//
+// protoc -Ifoo/ --go_out=foo/ bar.proto
+// protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+//
+// Because the path given to protoc is just "bar.proto" and "baz.proto", this is
+// how they are registered in the Go protobuf runtime. So, when loading the
+// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
+// but will find no file registered with that path:
+//
+// fd, err := desc.LoadFileDescriptor("baz.proto")
+// // err will be non-nil, complaining that there is no such file
+// // found named "foo/bar.proto"
+//
+// This can be remedied by registering alternate import paths using an
+// ImportResolver. Continuing with the example above, the code below would fix
+// any link issue:
+//
+// var r desc.ImportResolver
+// r.RegisterImportPath("bar.proto", "foo/bar.proto")
+// fd, err := r.LoadFileDescriptor("baz.proto")
+// // err will be nil; descriptor successfully loaded!
+//
+// If there are files that are *always* imported using a different relative
+// path then how they are registered, consider using the global
+// RegisterImportPath function, so you don't have to use an ImportResolver for
+// every file that imports it.
+//
+// Note that the new protobuf runtime (v1.4+) verifies that import paths are
+// correct and that descriptors can be linked during package initialization. So
+// customizing import paths for descriptor resolution is no longer necessary.
+type ImportResolver struct {
+ children map[string]*ImportResolver
+ importPaths map[string]string
+
+ // By default, an ImportResolver will fallback to consulting any paths
+ // registered via the top-level RegisterImportPath function. Setting this
+ // field to true will cause the ImportResolver to skip that fallback and
+ // only examine its own locally registered paths.
+ SkipFallbackRules bool
+}
+
+// ResolveImport resolves the given import path in the context of the given
+// source file. If a matching alternate has been registered with this resolver
+// via a call to RegisterImportPath or RegisterImportPathFrom, then the
+// registered path is returned. Otherwise, the given import path is returned
+// unchanged.
+func (r *ImportResolver) ResolveImport(source, importPath string) string {
+ if r != nil {
+ res := r.resolveImport(clean(source), clean(importPath))
+ if res != "" {
+ return res
+ }
+ if r.SkipFallbackRules {
+ return importPath
+ }
+ }
+ return ResolveImport(importPath)
+}
+
+func (r *ImportResolver) resolveImport(source, importPath string) string {
+ if source == "" {
+ return r.importPaths[importPath]
+ }
+ var car, cdr string
+ idx := strings.IndexRune(source, '/')
+ if idx < 0 {
+ car, cdr = source, ""
+ } else {
+ car, cdr = source[:idx], source[idx+1:]
+ }
+ ch := r.children[car]
+ if ch != nil {
+ if reg := ch.resolveImport(cdr, importPath); reg != "" {
+ return reg
+ }
+ }
+ return r.importPaths[importPath]
+}
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path with this resolver. Any appearance of the given import path
+// when linking files will instead try to link the given registered path. If the
+// registered path cannot be located, then linking will fallback to the actual
+// imported path.
+//
+// This method will panic if given an empty path or if the same import path is
+// registered more than once.
+//
+// To constrain the contexts where the given import path is to be re-written,
+// use RegisterImportPathFrom instead.
+func (r *ImportResolver) RegisterImportPath(registerPath, importPath string) {
+ r.RegisterImportPathFrom(registerPath, importPath, "")
+}
+
+// RegisterImportPathFrom registers an alternate import path for a given
+// registered proto file path with this resolver, but only for imports in the
+// specified source context.
+//
+// The source context can be the name of a folder or a proto source file. Any
+// appearance of the given import path in that context will instead try to link
+// the given registered path. To be in context, the file that is being linked
+// (i.e. the one whose import statement is being resolved) must be the same
+// relative path of the source context or be a sub-path (i.e. a descendant of
+// the source folder).
+//
+// If the registered path cannot be located, then linking will fallback to the
+// actual imported path.
+//
+// This method will panic if given an empty path. The source context, on the
+// other hand, is allowed to be blank. A blank source matches all files. This
+// method also panics if the same import path is registered in the same source
+// context more than once.
+func (r *ImportResolver) RegisterImportPathFrom(registerPath, importPath, source string) {
+ importPath = clean(importPath)
+ if len(importPath) == 0 {
+ panic("import path cannot be empty")
+ }
+ registerPath = clean(registerPath)
+ if len(registerPath) == 0 {
+ panic("registered path cannot be empty")
+ }
+ r.registerImportPathFrom(registerPath, importPath, clean(source))
+}
+
+func (r *ImportResolver) registerImportPathFrom(registerPath, importPath, source string) {
+ if source == "" {
+ if r.importPaths == nil {
+ r.importPaths = map[string]string{}
+ } else if reg := r.importPaths[importPath]; reg != "" {
+ panic(fmt.Sprintf("already registered import path %q as %q", importPath, registerPath))
+ }
+ r.importPaths[importPath] = registerPath
+ return
+ }
+ var car, cdr string
+ idx := strings.IndexRune(source, '/')
+ if idx < 0 {
+ car, cdr = source, ""
+ } else {
+ car, cdr = source[:idx], source[idx+1:]
+ }
+ ch := r.children[car]
+ if ch == nil {
+ if r.children == nil {
+ r.children = map[string]*ImportResolver{}
+ }
+ ch = &ImportResolver{}
+ r.children[car] = ch
+ }
+ ch.registerImportPathFrom(registerPath, importPath, cdr)
+}
+
+// LoadFileDescriptor is the same as the package function of the same name, but
+// any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
+ return LoadFileDescriptor(filePath)
+}
+
+// LoadMessageDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking
+// files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
+ return LoadMessageDescriptor(msgName)
+}
+
+// LoadMessageDescriptorForMessage is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
+ return LoadMessageDescriptorForMessage(msg)
+}
+
+// LoadMessageDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
+ return LoadMessageDescriptorForType(msgType)
+}
+
+// LoadEnumDescriptorForEnum is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+ return LoadEnumDescriptorForEnum(enum)
+}
+
+// LoadEnumDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+ return LoadEnumDescriptorForType(enumType)
+}
+
+// LoadFieldDescriptorForExtension is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+ return LoadFieldDescriptorForExtension(ext)
+}
+
+// CreateFileDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) CreateFileDescriptor(fdp *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+ return createFileDescriptor(fdp, deps, r)
+}
+
+// CreateFileDescriptors is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor protos.
+func (r *ImportResolver) CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+ return createFileDescriptors(fds, r)
+}
+
+// CreateFileDescriptorFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) {
+ return createFileDescriptorFromSet(fds, r)
+}
+
+// CreateFileDescriptorsFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+ return createFileDescriptorsFromSet(fds, r)
+}
+
+const dotPrefix = "./"
+
+func clean(path string) string {
+ if path == "" {
+ return ""
+ }
+ path = filepath.ToSlash(filepath.Clean(path))
+ if path == "." {
+ return ""
+ }
+ return strings.TrimPrefix(path, dotPrefix)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
new file mode 100644
index 0000000..aa8c3e9
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
@@ -0,0 +1,75 @@
+package internal
+
+import (
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// ProcessProto3OptionalFields adds synthetic oneofs to the given message descriptor
+// for each proto3 optional field. It also updates the fields to have the correct
+// oneof index reference. The given callback, if not nil, is called for each synthetic
+// oneof created.
+func ProcessProto3OptionalFields(msgd *descriptorpb.DescriptorProto, callback func(*descriptorpb.FieldDescriptorProto, *descriptorpb.OneofDescriptorProto)) {
+ var allNames map[string]struct{}
+ for _, fd := range msgd.Field {
+ if fd.GetProto3Optional() {
+ // lazy init the set of all names
+ if allNames == nil {
+ allNames = map[string]struct{}{}
+ for _, fd := range msgd.Field {
+ allNames[fd.GetName()] = struct{}{}
+ }
+ for _, od := range msgd.OneofDecl {
+ allNames[od.GetName()] = struct{}{}
+ }
+ // NB: protoc only considers names of other fields and oneofs
+ // when computing the synthetic oneof name. But that feels like
+ // a bug, since it means it could generate a name that conflicts
+ // with some other symbol defined in the message. If it's decided
+ // that's NOT a bug and is desirable, then we should remove the
+ // following four loops to mimic protoc's behavior.
+ for _, xd := range msgd.Extension {
+ allNames[xd.GetName()] = struct{}{}
+ }
+ for _, ed := range msgd.EnumType {
+ allNames[ed.GetName()] = struct{}{}
+ for _, evd := range ed.Value {
+ allNames[evd.GetName()] = struct{}{}
+ }
+ }
+ for _, fd := range msgd.NestedType {
+ allNames[fd.GetName()] = struct{}{}
+ }
+ for _, n := range msgd.ReservedName {
+ allNames[n] = struct{}{}
+ }
+ }
+
+ // Compute a name for the synthetic oneof. This uses the same
+ // algorithm as used in protoc:
+ // https://github.com/protocolbuffers/protobuf/blob/74ad62759e0a9b5a21094f3fb9bb4ebfaa0d1ab8/src/google/protobuf/compiler/parser.cc#L785-L803
+ ooName := fd.GetName()
+ if !strings.HasPrefix(ooName, "_") {
+ ooName = "_" + ooName
+ }
+ for {
+ _, ok := allNames[ooName]
+ if !ok {
+ // found a unique name
+ allNames[ooName] = struct{}{}
+ break
+ }
+ ooName = "X" + ooName
+ }
+
+ fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl)))
+ ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)}
+ msgd.OneofDecl = append(msgd.OneofDecl, ood)
+ if callback != nil {
+ callback(fd, ood)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/registry.go b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go
new file mode 100644
index 0000000..d7259e4
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go
@@ -0,0 +1,67 @@
+package internal
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/dynamicpb"
+)
+
+// RegisterExtensionsFromImportedFile registers extensions in the given file as well
+// as those in its public imports. So if another file imports the given fd, this adds
+// all extensions made visible to that importing file.
+//
+// All extensions in the given file are made visible to the importing file, and so are
+// extensions in any public imports in the given file.
+func RegisterExtensionsFromImportedFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+ registerTypesForFile(reg, fd, true, true)
+}
+
+// RegisterExtensionsVisibleToFile registers all extensions visible to the given file.
+// This includes all extensions defined in fd and as well as extensions defined in the
+// files that it imports (and any public imports thereof, etc).
+//
+// This is effectively the same as registering the extensions in fd and then calling
+// RegisterExtensionsFromImportedFile for each file imported by fd.
+func RegisterExtensionsVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+ registerTypesForFile(reg, fd, true, false)
+}
+
+// RegisterTypesVisibleToFile registers all types visible to the given file.
+// This is the same as RegisterExtensionsVisibleToFile but it also registers
+// message and enum types, not just extensions.
+func RegisterTypesVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+ registerTypesForFile(reg, fd, false, false)
+}
+
+func registerTypesForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor, extensionsOnly, publicImportsOnly bool) {
+ registerTypes(reg, fd, extensionsOnly)
+ for i := 0; i < fd.Imports().Len(); i++ {
+ imp := fd.Imports().Get(i)
+ if imp.IsPublic || !publicImportsOnly {
+ registerTypesForFile(reg, imp, extensionsOnly, true)
+ }
+ }
+}
+
+func registerTypes(reg *protoregistry.Types, elem fileOrMessage, extensionsOnly bool) {
+ for i := 0; i < elem.Extensions().Len(); i++ {
+ _ = reg.RegisterExtension(dynamicpb.NewExtensionType(elem.Extensions().Get(i)))
+ }
+ if !extensionsOnly {
+ for i := 0; i < elem.Messages().Len(); i++ {
+ _ = reg.RegisterMessage(dynamicpb.NewMessageType(elem.Messages().Get(i)))
+ }
+ for i := 0; i < elem.Enums().Len(); i++ {
+ _ = reg.RegisterEnum(dynamicpb.NewEnumType(elem.Enums().Get(i)))
+ }
+ }
+ for i := 0; i < elem.Messages().Len(); i++ {
+ registerTypes(reg, elem.Messages().Get(i), extensionsOnly)
+ }
+}
+
+type fileOrMessage interface {
+ Extensions() protoreflect.ExtensionDescriptors
+ Messages() protoreflect.MessageDescriptors
+ Enums() protoreflect.EnumDescriptors
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
new file mode 100644
index 0000000..6037128
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -0,0 +1,107 @@
+package internal
+
+import (
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// SourceInfoMap is a map of paths in a descriptor to the corresponding source
+// code info.
+type SourceInfoMap map[string][]*descriptorpb.SourceCodeInfo_Location
+
+// Get returns the source code info for the given path. If there are
+// multiple locations for the same path, the first one is returned.
+func (m SourceInfoMap) Get(path []int32) *descriptorpb.SourceCodeInfo_Location {
+ v := m[asMapKey(path)]
+ if len(v) > 0 {
+ return v[0]
+ }
+ return nil
+}
+
+// GetAll returns all source code info for the given path.
+func (m SourceInfoMap) GetAll(path []int32) []*descriptorpb.SourceCodeInfo_Location {
+ return m[asMapKey(path)]
+}
+
+// Add stores the given source code info for the given path.
+func (m SourceInfoMap) Add(path []int32, loc *descriptorpb.SourceCodeInfo_Location) {
+ m[asMapKey(path)] = append(m[asMapKey(path)], loc)
+}
+
+// PutIfAbsent stores the given source code info for the given path only if the
+// given path does not exist in the map. This method returns true when the value
+// is stored, false if the path already exists.
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *descriptorpb.SourceCodeInfo_Location) bool {
+ k := asMapKey(path)
+ if _, ok := m[k]; ok {
+ return false
+ }
+ m[k] = []*descriptorpb.SourceCodeInfo_Location{loc}
+ return true
+}
+
+func asMapKey(slice []int32) string {
+ // NB: arrays should be usable as map keys, but this does not
+ // work due to a bug: https://github.com/golang/go/issues/22605
+ //rv := reflect.ValueOf(slice)
+ //arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem())
+ //array := reflect.New(arrayType).Elem()
+ //reflect.Copy(array, rv)
+ //return array.Interface()
+
+ b := make([]byte, len(slice)*4)
+ j := 0
+ for _, s := range slice {
+ b[j] = byte(s)
+ b[j+1] = byte(s >> 8)
+ b[j+2] = byte(s >> 16)
+ b[j+3] = byte(s >> 24)
+ j += 4
+ }
+ return string(b)
+}
+
+// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
+// source code info in the given file descriptor proto.
+func CreateSourceInfoMap(fd *descriptorpb.FileDescriptorProto) SourceInfoMap {
+ res := SourceInfoMap{}
+ PopulateSourceInfoMap(fd, res)
+ return res
+}
+
+// PopulateSourceInfoMap populates the given SourceInfoMap with information from
+// the given file descriptor.
+func PopulateSourceInfoMap(fd *descriptorpb.FileDescriptorProto, m SourceInfoMap) {
+ for _, l := range fd.GetSourceCodeInfo().GetLocation() {
+ m.Add(l.Path, l)
+ }
+}
+
+// NB: This wonkiness allows desc.Descriptor impl to implement an interface that
+// is only usable from this package, by embedding a SourceInfoComputeFunc that
+// implements the actual logic (which must live in desc package to avoid a
+// dependency cycle).
+
+// SourceInfoComputer is a single method which will be invoked to recompute
+// source info. This is needed for the protoparse package, which needs to link
+// descriptors without source info in order to interpret options, but then needs
+// to re-compute source info after that interpretation so that final linked
+// descriptors expose the right info.
+type SourceInfoComputer interface {
+ recomputeSourceInfo()
+}
+
+// SourceInfoComputeFunc is the type that a desc.Descriptor will embed. It will
+// be aliased in the desc package to an unexported name so it is not marked as
+// an exported field in reflection and not present in Go docs.
+type SourceInfoComputeFunc func()
+
+func (f SourceInfoComputeFunc) recomputeSourceInfo() {
+ f()
+}
+
+// RecomputeSourceInfo is used to initiate recomputation of source info. This is
+// is used by the protoparse package, after it interprets options.
+func RecomputeSourceInfo(c SourceInfoComputer) {
+ c.recomputeSourceInfo()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
new file mode 100644
index 0000000..595c872
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -0,0 +1,296 @@
+package internal
+
+import (
+ "math"
+ "unicode"
+ "unicode/utf8"
+)
+
+const (
+ // MaxNormalTag is the maximum allowed tag number for a field in a normal message.
+ MaxNormalTag = 536870911 // 2^29 - 1
+
+ // MaxMessageSetTag is the maximum allowed tag number of a field in a message that
+ // uses the message set wire format.
+ MaxMessageSetTag = math.MaxInt32 - 1
+
+ // MaxTag is the maximum allowed tag number. (It is the same as MaxMessageSetTag
+ // since that is the absolute highest allowed.)
+ MaxTag = MaxMessageSetTag
+
+ // SpecialReservedStart is the first tag in a range that is reserved and not
+ // allowed for use in message definitions.
+ SpecialReservedStart = 19000
+ // SpecialReservedEnd is the last tag in a range that is reserved and not
+ // allowed for use in message definitions.
+ SpecialReservedEnd = 19999
+
+ // NB: It would be nice to use constants from generated code instead of
+ // hard-coding these here. But code-gen does not emit these as constants
+ // anywhere. The only places they appear in generated code are struct tags
+ // on fields of the generated descriptor protos.
+
+ // File_packageTag is the tag number of the package element in a file
+ // descriptor proto.
+ File_packageTag = 2
+ // File_dependencyTag is the tag number of the dependencies element in a
+ // file descriptor proto.
+ File_dependencyTag = 3
+ // File_messagesTag is the tag number of the messages element in a file
+ // descriptor proto.
+ File_messagesTag = 4
+ // File_enumsTag is the tag number of the enums element in a file descriptor
+ // proto.
+ File_enumsTag = 5
+ // File_servicesTag is the tag number of the services element in a file
+ // descriptor proto.
+ File_servicesTag = 6
+ // File_extensionsTag is the tag number of the extensions element in a file
+ // descriptor proto.
+ File_extensionsTag = 7
+ // File_optionsTag is the tag number of the options element in a file
+ // descriptor proto.
+ File_optionsTag = 8
+ // File_syntaxTag is the tag number of the syntax element in a file
+ // descriptor proto.
+ File_syntaxTag = 12
+ // File_editionTag is the tag number of the edition element in a file
+ // descriptor proto.
+ File_editionTag = 14
+ // Message_nameTag is the tag number of the name element in a message
+ // descriptor proto.
+ Message_nameTag = 1
+ // Message_fieldsTag is the tag number of the fields element in a message
+ // descriptor proto.
+ Message_fieldsTag = 2
+ // Message_nestedMessagesTag is the tag number of the nested messages
+ // element in a message descriptor proto.
+ Message_nestedMessagesTag = 3
+ // Message_enumsTag is the tag number of the enums element in a message
+ // descriptor proto.
+ Message_enumsTag = 4
+ // Message_extensionRangeTag is the tag number of the extension ranges
+ // element in a message descriptor proto.
+ Message_extensionRangeTag = 5
+ // Message_extensionsTag is the tag number of the extensions element in a
+ // message descriptor proto.
+ Message_extensionsTag = 6
+ // Message_optionsTag is the tag number of the options element in a message
+ // descriptor proto.
+ Message_optionsTag = 7
+ // Message_oneOfsTag is the tag number of the one-ofs element in a message
+ // descriptor proto.
+ Message_oneOfsTag = 8
+ // Message_reservedRangeTag is the tag number of the reserved ranges element
+ // in a message descriptor proto.
+ Message_reservedRangeTag = 9
+ // Message_reservedNameTag is the tag number of the reserved names element
+ // in a message descriptor proto.
+ Message_reservedNameTag = 10
+ // ExtensionRange_startTag is the tag number of the start index in an
+ // extension range proto.
+ ExtensionRange_startTag = 1
+ // ExtensionRange_endTag is the tag number of the end index in an
+ // extension range proto.
+ ExtensionRange_endTag = 2
+ // ExtensionRange_optionsTag is the tag number of the options element in an
+ // extension range proto.
+ ExtensionRange_optionsTag = 3
+ // ReservedRange_startTag is the tag number of the start index in a reserved
+ // range proto.
+ ReservedRange_startTag = 1
+ // ReservedRange_endTag is the tag number of the end index in a reserved
+ // range proto.
+ ReservedRange_endTag = 2
+ // Field_nameTag is the tag number of the name element in a field descriptor
+ // proto.
+ Field_nameTag = 1
+ // Field_extendeeTag is the tag number of the extendee element in a field
+ // descriptor proto.
+ Field_extendeeTag = 2
+ // Field_numberTag is the tag number of the number element in a field
+ // descriptor proto.
+ Field_numberTag = 3
+ // Field_labelTag is the tag number of the label element in a field
+ // descriptor proto.
+ Field_labelTag = 4
+ // Field_typeTag is the tag number of the type element in a field descriptor
+ // proto.
+ Field_typeTag = 5
+ // Field_typeNameTag is the tag number of the type name element in a field
+ // descriptor proto.
+ Field_typeNameTag = 6
+ // Field_defaultTag is the tag number of the default value element in a
+ // field descriptor proto.
+ Field_defaultTag = 7
+ // Field_optionsTag is the tag number of the options element in a field
+ // descriptor proto.
+ Field_optionsTag = 8
+ // Field_jsonNameTag is the tag number of the JSON name element in a field
+ // descriptor proto.
+ Field_jsonNameTag = 10
+ // Field_proto3OptionalTag is the tag number of the proto3_optional element
+ // in a descriptor proto.
+ Field_proto3OptionalTag = 17
+ // OneOf_nameTag is the tag number of the name element in a one-of
+ // descriptor proto.
+ OneOf_nameTag = 1
+ // OneOf_optionsTag is the tag number of the options element in a one-of
+ // descriptor proto.
+ OneOf_optionsTag = 2
+ // Enum_nameTag is the tag number of the name element in an enum descriptor
+ // proto.
+ Enum_nameTag = 1
+ // Enum_valuesTag is the tag number of the values element in an enum
+ // descriptor proto.
+ Enum_valuesTag = 2
+ // Enum_optionsTag is the tag number of the options element in an enum
+ // descriptor proto.
+ Enum_optionsTag = 3
+ // Enum_reservedRangeTag is the tag number of the reserved ranges element in
+ // an enum descriptor proto.
+ Enum_reservedRangeTag = 4
+ // Enum_reservedNameTag is the tag number of the reserved names element in
+ // an enum descriptor proto.
+ Enum_reservedNameTag = 5
+ // EnumVal_nameTag is the tag number of the name element in an enum value
+ // descriptor proto.
+ EnumVal_nameTag = 1
+ // EnumVal_numberTag is the tag number of the number element in an enum
+ // value descriptor proto.
+ EnumVal_numberTag = 2
+ // EnumVal_optionsTag is the tag number of the options element in an enum
+ // value descriptor proto.
+ EnumVal_optionsTag = 3
+ // Service_nameTag is the tag number of the name element in a service
+ // descriptor proto.
+ Service_nameTag = 1
+ // Service_methodsTag is the tag number of the methods element in a service
+ // descriptor proto.
+ Service_methodsTag = 2
+ // Service_optionsTag is the tag number of the options element in a service
+ // descriptor proto.
+ Service_optionsTag = 3
+ // Method_nameTag is the tag number of the name element in a method
+ // descriptor proto.
+ Method_nameTag = 1
+ // Method_inputTag is the tag number of the input type element in a method
+ // descriptor proto.
+ Method_inputTag = 2
+ // Method_outputTag is the tag number of the output type element in a method
+ // descriptor proto.
+ Method_outputTag = 3
+ // Method_optionsTag is the tag number of the options element in a method
+ // descriptor proto.
+ Method_optionsTag = 4
+ // Method_inputStreamTag is the tag number of the input stream flag in a
+ // method descriptor proto.
+ Method_inputStreamTag = 5
+ // Method_outputStreamTag is the tag number of the output stream flag in a
+ // method descriptor proto.
+ Method_outputStreamTag = 6
+
+ // UninterpretedOptionsTag is the tag number of the uninterpreted options
+ // element. All *Options messages use the same tag for the field that stores
+ // uninterpreted options.
+ UninterpretedOptionsTag = 999
+
+ // Uninterpreted_nameTag is the tag number of the name element in an
+ // uninterpreted options proto.
+ Uninterpreted_nameTag = 2
+ // Uninterpreted_identTag is the tag number of the identifier value in an
+ // uninterpreted options proto.
+ Uninterpreted_identTag = 3
+ // Uninterpreted_posIntTag is the tag number of the positive int value in an
+ // uninterpreted options proto.
+ Uninterpreted_posIntTag = 4
+ // Uninterpreted_negIntTag is the tag number of the negative int value in an
+ // uninterpreted options proto.
+ Uninterpreted_negIntTag = 5
+ // Uninterpreted_doubleTag is the tag number of the double value in an
+ // uninterpreted options proto.
+ Uninterpreted_doubleTag = 6
+ // Uninterpreted_stringTag is the tag number of the string value in an
+ // uninterpreted options proto.
+ Uninterpreted_stringTag = 7
+ // Uninterpreted_aggregateTag is the tag number of the aggregate value in an
+ // uninterpreted options proto.
+ Uninterpreted_aggregateTag = 8
+ // UninterpretedName_nameTag is the tag number of the name element in an
+ // uninterpreted option name proto.
+ UninterpretedName_nameTag = 1
+)
+
+// JsonName returns the default JSON name for a field with the given name.
+// This mirrors the algorithm in protoc:
+//
+// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95
+func JsonName(name string) string {
+ var js []rune
+ nextUpper := false
+ for _, r := range name {
+ if r == '_' {
+ nextUpper = true
+ continue
+ }
+ if nextUpper {
+ nextUpper = false
+ js = append(js, unicode.ToUpper(r))
+ } else {
+ js = append(js, r)
+ }
+ }
+ return string(js)
+}
+
+// InitCap returns the given field name, but with the first letter capitalized.
+func InitCap(name string) string {
+ r, sz := utf8.DecodeRuneInString(name)
+ return string(unicode.ToUpper(r)) + name[sz:]
+}
+
+// CreatePrefixList returns a list of package prefixes to search when resolving
+// a symbol name. If the given package is blank, it returns only the empty
+// string. If the given package contains only one token, e.g. "foo", it returns
+// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
+// successively shorter prefixes of the package and then the empty string. For
+// example, for a package named "foo.bar.baz" it will return the following list:
+//
+// ["foo.bar.baz", "foo.bar", "foo", ""]
+func CreatePrefixList(pkg string) []string {
+ if pkg == "" {
+ return []string{""}
+ }
+
+ numDots := 0
+ // one pass to pre-allocate the returned slice
+ for i := 0; i < len(pkg); i++ {
+ if pkg[i] == '.' {
+ numDots++
+ }
+ }
+ if numDots == 0 {
+ return []string{pkg, ""}
+ }
+
+ prefixes := make([]string, numDots+2)
+ // second pass to fill in returned slice
+ for i := 0; i < len(pkg); i++ {
+ if pkg[i] == '.' {
+ prefixes[numDots] = pkg[:i]
+ numDots--
+ }
+ }
+ prefixes[0] = pkg
+
+ return prefixes
+}
+
+// GetMaxTag returns the max tag number allowed, based on whether a message uses
+// message set wire format or not.
+func GetMaxTag(isMessageSet bool) int32 {
+ if isMessageSet {
+ return MaxMessageSetTag
+ }
+ return MaxNormalTag
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
new file mode 100644
index 0000000..8fd09ac
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -0,0 +1,258 @@
+package desc
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc/sourceinfo"
+ "github.com/jhump/protoreflect/internal"
+)
+
+// The global cache is used to store descriptors that wrap items in
+// protoregistry.GlobalTypes and protoregistry.GlobalFiles. This prevents
+// repeating work to re-wrap underlying global descriptors.
+var (
+ // We put all wrapped file and message descriptors in this cache.
+ loadedDescriptors = lockingCache{cache: mapCache{}}
+
+ // Unfortunately, we need a different mechanism for enums for
+ // compatibility with old APIs, which required that they were
+ // registered in a different way :(
+ loadedEnumsMu sync.RWMutex
+ loadedEnums = map[reflect.Type]*EnumDescriptor{}
+)
+
+// LoadFileDescriptor creates a file descriptor using the bytes returned by
+// proto.FileDescriptor. Descriptors are cached so that they do not need to be
+// re-processed if the same file is fetched again later.
+func LoadFileDescriptor(file string) (*FileDescriptor, error) {
+ d, err := sourceinfo.GlobalFiles.FindFileByPath(file)
+ if errors.Is(err, protoregistry.NotFound) {
+ // for backwards compatibility, see if this matches a known old
+ // alias for the file (older versions of libraries that registered
+ // the files using incorrect/non-canonical paths)
+ if alt := internal.StdFileAliases[file]; alt != "" {
+ d, err = sourceinfo.GlobalFiles.FindFileByPath(alt)
+ }
+ }
+ if err != nil {
+ if !errors.Is(err, protoregistry.NotFound) {
+ return nil, internal.ErrNoSuchFile(file)
+ }
+ return nil, err
+ }
+ if fd := loadedDescriptors.get(d); fd != nil {
+ return fd.(*FileDescriptor), nil
+ }
+
+ var fd *FileDescriptor
+ loadedDescriptors.withLock(func(cache descriptorCache) {
+ fd, err = wrapFile(d, cache)
+ })
+ return fd, err
+}
+
+// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
+// Message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
+ mt, err := sourceinfo.GlobalTypes.FindMessageByName(protoreflect.FullName(message))
+ if err != nil {
+ if errors.Is(err, protoregistry.NotFound) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return loadMessageDescriptor(mt.Descriptor())
+}
+
+func loadMessageDescriptor(md protoreflect.MessageDescriptor) (*MessageDescriptor, error) {
+ d := loadedDescriptors.get(md)
+ if d != nil {
+ return d.(*MessageDescriptor), nil
+ }
+
+ var err error
+ loadedDescriptors.withLock(func(cache descriptorCache) {
+ d, err = wrapMessage(md, cache)
+ })
+ if err != nil {
+ return nil, err
+ }
+ return d.(*MessageDescriptor), err
+}
+
+// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
+ m, err := messageFromType(messageType)
+ if err != nil {
+ return nil, err
+ }
+ return LoadMessageDescriptorForMessage(m)
+}
+
+// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
+// returned by message.Descriptor(). If the given type is not recognized, then a nil
+// descriptor is returned.
+func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
+ // efficiently handle dynamic messages
+ type descriptorable interface {
+ GetMessageDescriptor() *MessageDescriptor
+ }
+ if d, ok := message.(descriptorable); ok {
+ return d.GetMessageDescriptor(), nil
+ }
+
+ var md protoreflect.MessageDescriptor
+ if m, ok := message.(protoreflect.ProtoMessage); ok {
+ md = m.ProtoReflect().Descriptor()
+ } else {
+ md = proto.MessageReflect(message).Descriptor()
+ }
+ return loadMessageDescriptor(sourceinfo.WrapMessage(md))
+}
+
+func messageFromType(mt reflect.Type) (proto.Message, error) {
+ if mt.Kind() != reflect.Ptr {
+ mt = reflect.PtrTo(mt)
+ }
+ m, ok := reflect.Zero(mt).Interface().(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("failed to create message from type: %v", mt)
+ }
+ return m, nil
+}
+
+// interface implemented by all generated enums
+type protoEnum interface {
+ EnumDescriptor() ([]byte, []int)
+}
+
+// NB: There is no LoadEnumDescriptor that takes a fully-qualified enum name because
+// it is not useful since protoc-gen-go does not expose the name anywhere in generated
+// code or register it in a way that is it accessible for reflection code. This also
+// means we have to cache enum descriptors differently -- we can only cache them as
+// they are requested, as opposed to caching all enum types whenever a file descriptor
+// is cached. This is because we need to know the generated type of the enums, and we
+// don't know that at the time of caching file descriptors.
+
+// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by enum.EnumDescriptor() for the given enum type.
+func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+ // we cache descriptors using non-pointer type
+ if enumType.Kind() == reflect.Ptr {
+ enumType = enumType.Elem()
+ }
+ e := getEnumFromCache(enumType)
+ if e != nil {
+ return e, nil
+ }
+ enum, err := enumFromType(enumType)
+ if err != nil {
+ return nil, err
+ }
+
+ return loadEnumDescriptor(enumType, enum)
+}
+
+func getEnumFromCache(t reflect.Type) *EnumDescriptor {
+ loadedEnumsMu.RLock()
+ defer loadedEnumsMu.RUnlock()
+ return loadedEnums[t]
+}
+
+func putEnumInCache(t reflect.Type, d *EnumDescriptor) {
+ loadedEnumsMu.Lock()
+ defer loadedEnumsMu.Unlock()
+ loadedEnums[t] = d
+}
+
+// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
+// returned by enum.EnumDescriptor().
+func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+ et := reflect.TypeOf(enum)
+ // we cache descriptors using non-pointer type
+ if et.Kind() == reflect.Ptr {
+ et = et.Elem()
+ enum = reflect.Zero(et).Interface().(protoEnum)
+ }
+ e := getEnumFromCache(et)
+ if e != nil {
+ return e, nil
+ }
+
+ return loadEnumDescriptor(et, enum)
+}
+
+func enumFromType(et reflect.Type) (protoEnum, error) {
+ e, ok := reflect.Zero(et).Interface().(protoEnum)
+ if !ok {
+ if et.Kind() != reflect.Ptr {
+ et = et.Elem()
+ }
+ e, ok = reflect.Zero(et).Interface().(protoEnum)
+ }
+ if !ok {
+ return nil, fmt.Errorf("failed to create enum from type: %v", et)
+ }
+ return e, nil
+}
+
+func getDescriptorForEnum(enum protoEnum) (*descriptorpb.FileDescriptorProto, []int, error) {
+ fdb, path := enum.EnumDescriptor()
+ name := fmt.Sprintf("%T", enum)
+ fd, err := internal.DecodeFileDescriptor(name, fdb)
+ return fd, path, err
+}
+
+func loadEnumDescriptor(et reflect.Type, enum protoEnum) (*EnumDescriptor, error) {
+ fdp, path, err := getDescriptorForEnum(enum)
+ if err != nil {
+ return nil, err
+ }
+
+ fd, err := LoadFileDescriptor(fdp.GetName())
+ if err != nil {
+ return nil, err
+ }
+
+ ed := findEnum(fd, path)
+ putEnumInCache(et, ed)
+ return ed, nil
+}
+
+func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
+ if len(path) == 1 {
+ return fd.GetEnumTypes()[path[0]]
+ }
+ md := fd.GetMessageTypes()[path[0]]
+ for _, i := range path[1 : len(path)-1] {
+ md = md.GetNestedMessageTypes()[i]
+ }
+ return md.GetNestedEnumTypes()[path[len(path)-1]]
+}
+
+// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
+// extension description.
+func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+ file, err := LoadFileDescriptor(ext.Filename)
+ if err != nil {
+ return nil, err
+ }
+ field, ok := file.FindSymbol(ext.Name).(*FieldDescriptor)
+ // make sure descriptor agrees with attributes of the ExtensionDesc
+ if !ok || !field.IsExtension() || field.GetOwner().GetFullyQualifiedName() != proto.MessageName(ext.ExtendedType) ||
+ field.GetNumber() != ext.Field {
+ return nil, fmt.Errorf("file descriptor contained unexpected object with name %s", ext.Name)
+ }
+ return field, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go
new file mode 100644
index 0000000..2b6b124
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go
@@ -0,0 +1,716 @@
+package protoparse
+
+import (
+ "fmt"
+
+ "github.com/bufbuild/protocompile/ast"
+
+ ast2 "github.com/jhump/protoreflect/desc/protoparse/ast"
+)
+
+func convertAST(file *ast.FileNode) *ast2.FileNode {
+ elements := make([]ast2.FileElement, len(file.Decls))
+ for i := range file.Decls {
+ elements[i] = convertASTFileElement(file, file.Decls[i])
+ }
+ root := ast2.NewFileNode(convertASTSyntax(file, file.Syntax), elements)
+ eofInfo := file.NodeInfo(file.EOF)
+ root.FinalComments = convertASTComments(eofInfo.LeadingComments())
+ root.FinalWhitespace = eofInfo.LeadingWhitespace()
+ return root
+}
+
+func convertASTSyntax(f *ast.FileNode, s *ast.SyntaxNode) *ast2.SyntaxNode {
+ return ast2.NewSyntaxNode(
+ convertASTKeyword(f, s.Keyword),
+ convertASTRune(f, s.Equals),
+ convertASTString(f, s.Syntax),
+ convertASTRune(f, s.Semicolon),
+ )
+}
+
+func convertASTFileElement(f *ast.FileNode, el ast.FileElement) ast2.FileElement {
+ switch el := el.(type) {
+ case *ast.ImportNode:
+ return convertASTImport(f, el)
+ case *ast.PackageNode:
+ return convertASTPackage(f, el)
+ case *ast.OptionNode:
+ return convertASTOption(f, el)
+ case *ast.MessageNode:
+ return convertASTMessage(f, el)
+ case *ast.EnumNode:
+ return convertASTEnum(f, el)
+ case *ast.ExtendNode:
+ return convertASTExtend(f, el)
+ case *ast.ServiceNode:
+ return convertASTService(f, el)
+ case *ast.EmptyDeclNode:
+ return convertASTEmpty(f, el)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.FileElement: %T", el))
+ }
+}
+
+func convertASTImport(f *ast.FileNode, imp *ast.ImportNode) *ast2.ImportNode {
+ var public, weak *ast2.KeywordNode
+ if imp.Public != nil {
+ public = convertASTKeyword(f, imp.Public)
+ }
+ if imp.Weak != nil {
+ weak = convertASTKeyword(f, imp.Weak)
+ }
+ return ast2.NewImportNode(
+ convertASTKeyword(f, imp.Keyword),
+ public, weak,
+ convertASTString(f, imp.Name),
+ convertASTRune(f, imp.Semicolon),
+ )
+}
+
+func convertASTPackage(f *ast.FileNode, p *ast.PackageNode) *ast2.PackageNode {
+ return ast2.NewPackageNode(
+ convertASTKeyword(f, p.Keyword),
+ convertASTIdent(f, p.Name),
+ convertASTRune(f, p.Semicolon),
+ )
+}
+
+func convertASTOption(f *ast.FileNode, o *ast.OptionNode) *ast2.OptionNode {
+ if o.Keyword == nil {
+ return ast2.NewCompactOptionNode(
+ convertASTOptionName(f, o.Name),
+ convertASTRune(f, o.Equals),
+ convertASTValue(f, o.Val),
+ )
+ }
+ return ast2.NewOptionNode(
+ convertASTKeyword(f, o.Keyword),
+ convertASTOptionName(f, o.Name),
+ convertASTRune(f, o.Equals),
+ convertASTValue(f, o.Val),
+ convertASTRune(f, o.Semicolon),
+ )
+}
+
+func convertASTOptionName(f *ast.FileNode, n *ast.OptionNameNode) *ast2.OptionNameNode {
+ parts := make([]*ast2.FieldReferenceNode, len(n.Parts))
+ for i := range n.Parts {
+ parts[i] = convertASTFieldReference(f, n.Parts[i])
+ }
+ dots := make([]*ast2.RuneNode, len(n.Dots))
+ for i := range n.Dots {
+ dots[i] = convertASTRune(f, n.Dots[i])
+ }
+ return ast2.NewOptionNameNode(parts, dots)
+}
+
+func convertASTFieldReference(f *ast.FileNode, n *ast.FieldReferenceNode) *ast2.FieldReferenceNode {
+ switch {
+ case n.IsExtension():
+ return ast2.NewExtensionFieldReferenceNode(
+ convertASTRune(f, n.Open),
+ convertASTIdent(f, n.Name),
+ convertASTRune(f, n.Close),
+ )
+ case n.IsAnyTypeReference():
+ return ast2.NewAnyTypeReferenceNode(
+ convertASTRune(f, n.Open),
+ convertASTIdent(f, n.URLPrefix),
+ convertASTRune(f, n.Slash),
+ convertASTIdent(f, n.Name),
+ convertASTRune(f, n.Close),
+ )
+ default:
+ return ast2.NewFieldReferenceNode(convertASTIdent(f, n.Name).(*ast2.IdentNode))
+ }
+}
+
+func convertASTMessage(f *ast.FileNode, m *ast.MessageNode) *ast2.MessageNode {
+ decls := make([]ast2.MessageElement, len(m.Decls))
+ for i := range m.Decls {
+ decls[i] = convertASTMessageElement(f, m.Decls[i])
+ }
+ return ast2.NewMessageNode(
+ convertASTKeyword(f, m.Keyword),
+ convertASTIdentToken(f, m.Name),
+ convertASTRune(f, m.OpenBrace),
+ decls,
+ convertASTRune(f, m.CloseBrace),
+ )
+}
+
+func convertASTMessageElement(f *ast.FileNode, el ast.MessageElement) ast2.MessageElement {
+ switch el := el.(type) {
+ case *ast.OptionNode:
+ return convertASTOption(f, el)
+ case *ast.FieldNode:
+ return convertASTField(f, el)
+ case *ast.MapFieldNode:
+ return convertASTMapField(f, el)
+ case *ast.OneofNode:
+ return convertASTOneOf(f, el)
+ case *ast.GroupNode:
+ return convertASTGroup(f, el)
+ case *ast.MessageNode:
+ return convertASTMessage(f, el)
+ case *ast.EnumNode:
+ return convertASTEnum(f, el)
+ case *ast.ExtendNode:
+ return convertASTExtend(f, el)
+ case *ast.ExtensionRangeNode:
+ return convertASTExtensions(f, el)
+ case *ast.ReservedNode:
+ return convertASTReserved(f, el)
+ case *ast.EmptyDeclNode:
+ return convertASTEmpty(f, el)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.MessageElement: %T", el))
+ }
+}
+
+func convertASTField(f *ast.FileNode, fld *ast.FieldNode) *ast2.FieldNode {
+ var lbl *ast2.KeywordNode
+ if fld.Label.KeywordNode != nil {
+ lbl = convertASTKeyword(f, fld.Label.KeywordNode)
+ }
+ var opts *ast2.CompactOptionsNode
+ if fld.Options != nil {
+ opts = convertASTCompactOptions(f, fld.Options)
+ }
+ return ast2.NewFieldNode(
+ lbl,
+ convertASTIdent(f, fld.FldType),
+ convertASTIdentToken(f, fld.Name),
+ convertASTRune(f, fld.Equals),
+ convertASTUintLiteral(f, fld.Tag),
+ opts,
+ convertASTRune(f, fld.Semicolon),
+ )
+}
+
+func convertASTMapField(f *ast.FileNode, fld *ast.MapFieldNode) *ast2.MapFieldNode {
+ var opts *ast2.CompactOptionsNode
+ if fld.Options != nil {
+ opts = convertASTCompactOptions(f, fld.Options)
+ }
+ return ast2.NewMapFieldNode(
+ convertASTMapFieldType(f, fld.MapType),
+ convertASTIdentToken(f, fld.Name),
+ convertASTRune(f, fld.Equals),
+ convertASTUintLiteral(f, fld.Tag),
+ opts,
+ convertASTRune(f, fld.Semicolon),
+ )
+}
+
+func convertASTMapFieldType(f *ast.FileNode, t *ast.MapTypeNode) *ast2.MapTypeNode {
+ return ast2.NewMapTypeNode(
+ convertASTKeyword(f, t.Keyword),
+ convertASTRune(f, t.OpenAngle),
+ convertASTIdentToken(f, t.KeyType),
+ convertASTRune(f, t.Comma),
+ convertASTIdent(f, t.ValueType),
+ convertASTRune(f, t.CloseAngle),
+ )
+}
+
+func convertASTGroup(f *ast.FileNode, g *ast.GroupNode) *ast2.GroupNode {
+ var lbl *ast2.KeywordNode
+ if g.Label.KeywordNode != nil {
+ lbl = convertASTKeyword(f, g.Label.KeywordNode)
+ }
+ var opts *ast2.CompactOptionsNode
+ if g.Options != nil {
+ opts = convertASTCompactOptions(f, g.Options)
+ }
+ decls := make([]ast2.MessageElement, len(g.Decls))
+ for i := range g.Decls {
+ decls[i] = convertASTMessageElement(f, g.Decls[i])
+ }
+ return ast2.NewGroupNode(
+ lbl,
+ convertASTKeyword(f, g.Keyword),
+ convertASTIdentToken(f, g.Name),
+ convertASTRune(f, g.Equals),
+ convertASTUintLiteral(f, g.Tag),
+ opts,
+ convertASTRune(f, g.OpenBrace),
+ decls,
+ convertASTRune(f, g.CloseBrace),
+ )
+}
+
+func convertASTOneOf(f *ast.FileNode, oo *ast.OneofNode) *ast2.OneOfNode {
+ decls := make([]ast2.OneOfElement, len(oo.Decls))
+ for i := range oo.Decls {
+ decls[i] = convertASTOneOfElement(f, oo.Decls[i])
+ }
+ return ast2.NewOneOfNode(
+ convertASTKeyword(f, oo.Keyword),
+ convertASTIdentToken(f, oo.Name),
+ convertASTRune(f, oo.OpenBrace),
+ decls,
+ convertASTRune(f, oo.CloseBrace),
+ )
+}
+
+func convertASTOneOfElement(f *ast.FileNode, el ast.OneofElement) ast2.OneOfElement {
+ switch el := el.(type) {
+ case *ast.OptionNode:
+ return convertASTOption(f, el)
+ case *ast.FieldNode:
+ return convertASTField(f, el)
+ case *ast.GroupNode:
+ return convertASTGroup(f, el)
+ case *ast.EmptyDeclNode:
+ return convertASTEmpty(f, el)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.OneOfElement: %T", el))
+ }
+}
+
+func convertASTExtensions(f *ast.FileNode, e *ast.ExtensionRangeNode) *ast2.ExtensionRangeNode {
+ var opts *ast2.CompactOptionsNode
+ if e.Options != nil {
+ opts = convertASTCompactOptions(f, e.Options)
+ }
+ ranges := make([]*ast2.RangeNode, len(e.Ranges))
+ for i := range e.Ranges {
+ ranges[i] = convertASTRange(f, e.Ranges[i])
+ }
+ commas := make([]*ast2.RuneNode, len(e.Commas))
+ for i := range e.Commas {
+ commas[i] = convertASTRune(f, e.Commas[i])
+ }
+ return ast2.NewExtensionRangeNode(
+ convertASTKeyword(f, e.Keyword),
+ ranges, commas, opts,
+ convertASTRune(f, e.Semicolon),
+ )
+}
+
+func convertASTReserved(f *ast.FileNode, r *ast.ReservedNode) *ast2.ReservedNode {
+ ranges := make([]*ast2.RangeNode, len(r.Ranges))
+ for i := range r.Ranges {
+ ranges[i] = convertASTRange(f, r.Ranges[i])
+ }
+ commas := make([]*ast2.RuneNode, len(r.Commas))
+ for i := range r.Commas {
+ commas[i] = convertASTRune(f, r.Commas[i])
+ }
+ names := make([]ast2.StringValueNode, len(r.Names))
+ for i := range r.Names {
+ names[i] = convertASTString(f, r.Names[i])
+ }
+ if len(r.Ranges) > 0 {
+ return ast2.NewReservedRangesNode(
+ convertASTKeyword(f, r.Keyword),
+ ranges, commas,
+ convertASTRune(f, r.Semicolon),
+ )
+ }
+ return ast2.NewReservedNamesNode(
+ convertASTKeyword(f, r.Keyword),
+ names, commas,
+ convertASTRune(f, r.Semicolon),
+ )
+}
+
+func convertASTRange(f *ast.FileNode, r *ast.RangeNode) *ast2.RangeNode {
+ var to, max *ast2.KeywordNode
+ var end ast2.IntValueNode
+ if r.To != nil {
+ to = convertASTKeyword(f, r.To)
+ }
+ if r.Max != nil {
+ max = convertASTKeyword(f, r.Max)
+ }
+ if r.EndVal != nil {
+ end = convertASTInt(f, r.EndVal)
+ }
+ return ast2.NewRangeNode(
+ convertASTInt(f, r.StartVal),
+ to, end, max,
+ )
+}
+
+func convertASTEnum(f *ast.FileNode, e *ast.EnumNode) *ast2.EnumNode {
+ decls := make([]ast2.EnumElement, len(e.Decls))
+ for i := range e.Decls {
+ decls[i] = convertASTEnumElement(f, e.Decls[i])
+ }
+ return ast2.NewEnumNode(
+ convertASTKeyword(f, e.Keyword),
+ convertASTIdentToken(f, e.Name),
+ convertASTRune(f, e.OpenBrace),
+ decls,
+ convertASTRune(f, e.CloseBrace),
+ )
+}
+
+func convertASTEnumElement(f *ast.FileNode, el ast.EnumElement) ast2.EnumElement {
+ switch el := el.(type) {
+ case *ast.OptionNode:
+ return convertASTOption(f, el)
+ case *ast.EnumValueNode:
+ return convertASTEnumValue(f, el)
+ case *ast.ReservedNode:
+ return convertASTReserved(f, el)
+ case *ast.EmptyDeclNode:
+ return convertASTEmpty(f, el)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.EnumElement: %T", el))
+ }
+}
+
+func convertASTEnumValue(f *ast.FileNode, e *ast.EnumValueNode) *ast2.EnumValueNode {
+ var opts *ast2.CompactOptionsNode
+ if e.Options != nil {
+ opts = convertASTCompactOptions(f, e.Options)
+ }
+ return ast2.NewEnumValueNode(
+ convertASTIdentToken(f, e.Name),
+ convertASTRune(f, e.Equals),
+ convertASTInt(f, e.Number),
+ opts,
+ convertASTRune(f, e.Semicolon),
+ )
+}
+
+func convertASTExtend(f *ast.FileNode, e *ast.ExtendNode) *ast2.ExtendNode {
+ decls := make([]ast2.ExtendElement, len(e.Decls))
+ for i := range e.Decls {
+ decls[i] = convertASTExtendElement(f, e.Decls[i])
+ }
+ return ast2.NewExtendNode(
+ convertASTKeyword(f, e.Keyword),
+ convertASTIdent(f, e.Extendee),
+ convertASTRune(f, e.OpenBrace),
+ decls,
+ convertASTRune(f, e.CloseBrace),
+ )
+}
+
+func convertASTExtendElement(f *ast.FileNode, el ast.ExtendElement) ast2.ExtendElement {
+ switch el := el.(type) {
+ case *ast.FieldNode:
+ return convertASTField(f, el)
+ case *ast.GroupNode:
+ return convertASTGroup(f, el)
+ case *ast.EmptyDeclNode:
+ return convertASTEmpty(f, el)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.ExtendElement: %T", el))
+ }
+}
+
+func convertASTService(f *ast.FileNode, s *ast.ServiceNode) *ast2.ServiceNode {
+ decls := make([]ast2.ServiceElement, len(s.Decls))
+ for i := range s.Decls {
+ decls[i] = convertASTServiceElement(f, s.Decls[i])
+ }
+ return ast2.NewServiceNode(
+ convertASTKeyword(f, s.Keyword),
+ convertASTIdentToken(f, s.Name),
+ convertASTRune(f, s.OpenBrace),
+ decls,
+ convertASTRune(f, s.CloseBrace),
+ )
+}
+
+func convertASTServiceElement(f *ast.FileNode, el ast.ServiceElement) ast2.ServiceElement {
+ switch el := el.(type) {
+ case *ast.OptionNode:
+ return convertASTOption(f, el)
+ case *ast.RPCNode:
+ return convertASTMethod(f, el)
+ case *ast.EmptyDeclNode:
+ return convertASTEmpty(f, el)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.ServiceElement: %T", el))
+ }
+}
+
+func convertASTMethod(f *ast.FileNode, m *ast.RPCNode) *ast2.RPCNode {
+ if m.OpenBrace == nil {
+ return ast2.NewRPCNode(
+ convertASTKeyword(f, m.Keyword),
+ convertASTIdentToken(f, m.Name),
+ convertASTMethodType(f, m.Input),
+ convertASTKeyword(f, m.Returns),
+ convertASTMethodType(f, m.Output),
+ convertASTRune(f, m.Semicolon),
+ )
+ }
+ decls := make([]ast2.RPCElement, len(m.Decls))
+ for i := range m.Decls {
+ decls[i] = convertASTMethodElement(f, m.Decls[i])
+ }
+ return ast2.NewRPCNodeWithBody(
+ convertASTKeyword(f, m.Keyword),
+ convertASTIdentToken(f, m.Name),
+ convertASTMethodType(f, m.Input),
+ convertASTKeyword(f, m.Returns),
+ convertASTMethodType(f, m.Output),
+ convertASTRune(f, m.OpenBrace),
+ decls,
+ convertASTRune(f, m.CloseBrace),
+ )
+}
+
+func convertASTMethodElement(f *ast.FileNode, el ast.RPCElement) ast2.RPCElement {
+ switch el := el.(type) {
+ case *ast.OptionNode:
+ return convertASTOption(f, el)
+ case *ast.EmptyDeclNode:
+ return convertASTEmpty(f, el)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.RPCElement: %T", el))
+ }
+}
+
+func convertASTMethodType(f *ast.FileNode, t *ast.RPCTypeNode) *ast2.RPCTypeNode {
+ var stream *ast2.KeywordNode
+ if t.Stream != nil {
+ stream = convertASTKeyword(f, t.Stream)
+ }
+ return ast2.NewRPCTypeNode(
+ convertASTRune(f, t.OpenParen),
+ stream,
+ convertASTIdent(f, t.MessageType),
+ convertASTRune(f, t.CloseParen),
+ )
+}
+
+func convertASTCompactOptions(f *ast.FileNode, opts *ast.CompactOptionsNode) *ast2.CompactOptionsNode {
+ elems := make([]*ast2.OptionNode, len(opts.Options))
+ for i := range opts.Options {
+ elems[i] = convertASTOption(f, opts.Options[i])
+ }
+ commas := make([]*ast2.RuneNode, len(opts.Commas))
+ for i := range opts.Commas {
+ commas[i] = convertASTRune(f, opts.Commas[i])
+ }
+ return ast2.NewCompactOptionsNode(
+ convertASTRune(f, opts.OpenBracket),
+ elems, commas,
+ convertASTRune(f, opts.CloseBracket),
+ )
+}
+
+func convertASTEmpty(f *ast.FileNode, e *ast.EmptyDeclNode) *ast2.EmptyDeclNode {
+ return ast2.NewEmptyDeclNode(convertASTRune(f, e.Semicolon))
+}
+
+func convertASTValue(f *ast.FileNode, v ast.ValueNode) ast2.ValueNode {
+ switch v := v.(type) {
+ case *ast.IdentNode:
+ return convertASTIdentToken(f, v)
+ case *ast.CompoundIdentNode:
+ return convertASTCompoundIdent(f, v)
+ case *ast.StringLiteralNode:
+ return convertASTStringLiteral(f, v)
+ case *ast.CompoundStringLiteralNode:
+ return convertASTCompoundStringLiteral(f, v)
+ case *ast.UintLiteralNode:
+ return convertASTUintLiteral(f, v)
+ case *ast.NegativeIntLiteralNode:
+ return convertASTNegativeIntLiteral(f, v)
+ case *ast.FloatLiteralNode:
+ return convertASTFloatLiteral(f, v)
+ case *ast.SpecialFloatLiteralNode:
+ return convertASTSpecialFloatLiteral(f, v)
+ case *ast.SignedFloatLiteralNode:
+ return convertASTSignedFloatLiteral(f, v)
+ case *ast.ArrayLiteralNode:
+ return convertASTArrayLiteral(f, v)
+ case *ast.MessageLiteralNode:
+ return convertASTMessageLiteral(f, v)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.ValueNode: %T", v))
+ }
+}
+
+func convertASTIdent(f *ast.FileNode, ident ast.IdentValueNode) ast2.IdentValueNode {
+ switch ident := ident.(type) {
+ case *ast.IdentNode:
+ return convertASTIdentToken(f, ident)
+ case *ast.CompoundIdentNode:
+ return convertASTCompoundIdent(f, ident)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.IdentValueNode: %T", ident))
+ }
+}
+
+func convertASTIdentToken(f *ast.FileNode, ident *ast.IdentNode) *ast2.IdentNode {
+ return ast2.NewIdentNode(ident.Val, convertASTTokenInfo(f, ident.Token()))
+}
+
+func convertASTCompoundIdent(f *ast.FileNode, ident *ast.CompoundIdentNode) *ast2.CompoundIdentNode {
+ var leadingDot *ast2.RuneNode
+ if ident.LeadingDot != nil {
+ leadingDot = convertASTRune(f, ident.LeadingDot)
+ }
+ components := make([]*ast2.IdentNode, len(ident.Components))
+ for i := range ident.Components {
+ components[i] = convertASTIdentToken(f, ident.Components[i])
+ }
+ dots := make([]*ast2.RuneNode, len(ident.Dots))
+ for i := range ident.Dots {
+ dots[i] = convertASTRune(f, ident.Dots[i])
+ }
+ return ast2.NewCompoundIdentNode(leadingDot, components, dots)
+}
+
+func convertASTString(f *ast.FileNode, str ast.StringValueNode) ast2.StringValueNode {
+ switch str := str.(type) {
+ case *ast.StringLiteralNode:
+ return convertASTStringLiteral(f, str)
+ case *ast.CompoundStringLiteralNode:
+ return convertASTCompoundStringLiteral(f, str)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.StringValueNode: %T", str))
+ }
+}
+
+func convertASTStringLiteral(f *ast.FileNode, str *ast.StringLiteralNode) *ast2.StringLiteralNode {
+ return ast2.NewStringLiteralNode(str.Val, convertASTTokenInfo(f, str.Token()))
+}
+
+func convertASTCompoundStringLiteral(f *ast.FileNode, str *ast.CompoundStringLiteralNode) *ast2.CompoundStringLiteralNode {
+ children := str.Children()
+ components := make([]*ast2.StringLiteralNode, len(children))
+ for i := range children {
+ components[i] = convertASTStringLiteral(f, children[i].(*ast.StringLiteralNode))
+ }
+ return ast2.NewCompoundLiteralStringNode(components...)
+}
+
+func convertASTInt(f *ast.FileNode, n ast.IntValueNode) ast2.IntValueNode {
+ switch n := n.(type) {
+ case *ast.UintLiteralNode:
+ return convertASTUintLiteral(f, n)
+ case *ast.NegativeIntLiteralNode:
+ return convertASTNegativeIntLiteral(f, n)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.IntValueNode: %T", n))
+ }
+}
+
+func convertASTUintLiteral(f *ast.FileNode, n *ast.UintLiteralNode) *ast2.UintLiteralNode {
+ return ast2.NewUintLiteralNode(n.Val, convertASTTokenInfo(f, n.Token()))
+}
+
+func convertASTNegativeIntLiteral(f *ast.FileNode, n *ast.NegativeIntLiteralNode) *ast2.NegativeIntLiteralNode {
+ return ast2.NewNegativeIntLiteralNode(convertASTRune(f, n.Minus), convertASTUintLiteral(f, n.Uint))
+}
+
+func convertASTFloat(f *ast.FileNode, n ast.FloatValueNode) ast2.FloatValueNode {
+ switch n := n.(type) {
+ case *ast.FloatLiteralNode:
+ return convertASTFloatLiteral(f, n)
+ case *ast.SpecialFloatLiteralNode:
+ return convertASTSpecialFloatLiteral(f, n)
+ case *ast.UintLiteralNode:
+ return convertASTUintLiteral(f, n)
+ default:
+ panic(fmt.Sprintf("unrecognized type of ast.FloatValueNode: %T", n))
+ }
+}
+
+func convertASTFloatLiteral(f *ast.FileNode, n *ast.FloatLiteralNode) *ast2.FloatLiteralNode {
+ return ast2.NewFloatLiteralNode(n.Val, convertASTTokenInfo(f, n.Token()))
+}
+
+func convertASTSpecialFloatLiteral(f *ast.FileNode, n *ast.SpecialFloatLiteralNode) *ast2.SpecialFloatLiteralNode {
+ return ast2.NewSpecialFloatLiteralNode(convertASTKeyword(f, n.KeywordNode))
+}
+
+func convertASTSignedFloatLiteral(f *ast.FileNode, n *ast.SignedFloatLiteralNode) *ast2.SignedFloatLiteralNode {
+ return ast2.NewSignedFloatLiteralNode(convertASTRune(f, n.Sign), convertASTFloat(f, n.Float))
+}
+
+func convertASTArrayLiteral(f *ast.FileNode, ar *ast.ArrayLiteralNode) *ast2.ArrayLiteralNode {
+ vals := make([]ast2.ValueNode, len(ar.Elements))
+ for i := range ar.Elements {
+ vals[i] = convertASTValue(f, ar.Elements[i])
+ }
+ commas := make([]*ast2.RuneNode, len(ar.Commas))
+ for i := range ar.Commas {
+ commas[i] = convertASTRune(f, ar.Commas[i])
+ }
+ return ast2.NewArrayLiteralNode(
+ convertASTRune(f, ar.OpenBracket),
+ vals, commas,
+ convertASTRune(f, ar.CloseBracket),
+ )
+}
+
+func convertASTMessageLiteral(f *ast.FileNode, m *ast.MessageLiteralNode) *ast2.MessageLiteralNode {
+ fields := make([]*ast2.MessageFieldNode, len(m.Elements))
+ for i := range m.Elements {
+ fields[i] = convertASTMessageLiteralField(f, m.Elements[i])
+ }
+ seps := make([]*ast2.RuneNode, len(m.Seps))
+ for i := range m.Seps {
+ if m.Seps[i] != nil {
+ seps[i] = convertASTRune(f, m.Seps[i])
+ }
+ }
+ return ast2.NewMessageLiteralNode(
+ convertASTRune(f, m.Open),
+ fields, seps,
+ convertASTRune(f, m.Close),
+ )
+}
+
+func convertASTMessageLiteralField(f *ast.FileNode, fld *ast.MessageFieldNode) *ast2.MessageFieldNode {
+ var sep *ast2.RuneNode
+ if fld.Sep != nil {
+ sep = convertASTRune(f, fld.Sep)
+ }
+ return ast2.NewMessageFieldNode(
+ convertASTFieldReference(f, fld.Name),
+ sep,
+ convertASTValue(f, fld.Val),
+ )
+}
+
+func convertASTKeyword(f *ast.FileNode, k *ast.KeywordNode) *ast2.KeywordNode {
+ return ast2.NewKeywordNode(k.Val, convertASTTokenInfo(f, k.Token()))
+}
+
+func convertASTRune(f *ast.FileNode, r *ast.RuneNode) *ast2.RuneNode {
+ return ast2.NewRuneNode(r.Rune, convertASTTokenInfo(f, r.Token()))
+}
+
+func convertASTTokenInfo(f *ast.FileNode, tok ast.Token) ast2.TokenInfo {
+ info := f.TokenInfo(tok)
+ return ast2.TokenInfo{
+ PosRange: ast2.PosRange{
+ Start: info.Start(),
+ End: info.End(),
+ },
+ RawText: info.RawText(),
+ LeadingWhitespace: info.LeadingWhitespace(),
+ LeadingComments: convertASTComments(info.LeadingComments()),
+ TrailingComments: convertASTComments(info.TrailingComments()),
+ }
+}
+
+func convertASTComments(comments ast.Comments) []ast2.Comment {
+ results := make([]ast2.Comment, comments.Len())
+ for i := 0; i < comments.Len(); i++ {
+ cmt := comments.Index(i)
+ results[i] = ast2.Comment{
+ PosRange: ast2.PosRange{
+ Start: cmt.Start(),
+ End: cmt.End(),
+ },
+ LeadingWhitespace: cmt.LeadingWhitespace(),
+ Text: cmt.RawText(),
+ }
+ }
+ return results
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go
new file mode 100644
index 0000000..e890200
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/doc.go
@@ -0,0 +1,27 @@
+// Package ast defines types for modeling the AST (Abstract Syntax
+// Tree) for the protocol buffers source language.
+//
+// All nodes of the tree implement the Node interface. Leaf nodes in the
+// tree implement TerminalNode and all others implement CompositeNode.
+// The root of the tree for a proto source file is a *FileNode.
+//
+// Comments are not represented as nodes in the tree. Instead, they are
+// attached to all terminal nodes in the tree. So, when lexing, comments
+// are accumulated until the next non-comment token is found. The AST
+// model in this package thus provides access to all comments in the
+// file, regardless of location (unlike the SourceCodeInfo present in
+// descriptor protos, which are lossy). The comments associated with a
+// a non-leaf/non-token node (i.e. a CompositeNode) come from the first
+// and last nodes in its sub-tree.
+//
+// Creation of AST nodes should use the factory functions in this
+// package instead of struct literals. Some factory functions accept
+// optional arguments, which means the arguments can be nil. If nil
+// values are provided for other (non-optional) arguments, the resulting
+// node may be invalid and cause panics later in the program.
+//
+// This package defines numerous interfaces. However, user code should
+// not attempt to implement any of them. Most consumers of an AST will
+// not work correctly if they encounter concrete implementations other
+// than the ones defined in this package.
+package ast
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go
new file mode 100644
index 0000000..446a6a0
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/enum.go
@@ -0,0 +1,154 @@
+package ast
+
+import "fmt"
+
+// EnumNode represents an enum declaration. Example:
+//
+// enum Foo { BAR = 0; BAZ = 1 }
+type EnumNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Name *IdentNode
+ OpenBrace *RuneNode
+ Decls []EnumElement
+ CloseBrace *RuneNode
+}
+
+func (*EnumNode) fileElement() {}
+func (*EnumNode) msgElement() {}
+
+// NewEnumNode creates a new *EnumNode. All arguments must be non-nil. While
+// it is technically allowed for decls to be nil or empty, the resulting node
+// will not be a valid enum, which must have at least one value.
+// - keyword: The token corresponding to the "enum" keyword.
+// - name: The token corresponding to the enum's name.
+// - openBrace: The token corresponding to the "{" rune that starts the body.
+// - decls: All declarations inside the enum body.
+// - closeBrace: The token corresponding to the "}" rune that ends the body.
+func NewEnumNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []EnumElement, closeBrace *RuneNode) *EnumNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if openBrace == nil {
+ panic("openBrace is nil")
+ }
+ if closeBrace == nil {
+ panic("closeBrace is nil")
+ }
+ children := make([]Node, 0, 4+len(decls))
+ children = append(children, keyword, name, openBrace)
+ for _, decl := range decls {
+ children = append(children, decl)
+ }
+ children = append(children, closeBrace)
+
+ for _, decl := range decls {
+ switch decl.(type) {
+ case *OptionNode, *EnumValueNode, *ReservedNode, *EmptyDeclNode:
+ default:
+ panic(fmt.Sprintf("invalid EnumElement type: %T", decl))
+ }
+ }
+
+ return &EnumNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Name: name,
+ OpenBrace: openBrace,
+ CloseBrace: closeBrace,
+ Decls: decls,
+ }
+}
+
+// EnumElement is an interface implemented by all AST nodes that can
+// appear in the body of an enum declaration.
+type EnumElement interface {
+ Node
+ enumElement()
+}
+
+var _ EnumElement = (*OptionNode)(nil)
+var _ EnumElement = (*EnumValueNode)(nil)
+var _ EnumElement = (*ReservedNode)(nil)
+var _ EnumElement = (*EmptyDeclNode)(nil)
+
+// EnumValueDeclNode is a placeholder interface for AST nodes that represent
+// enum values. This allows NoSourceNode to be used in place of *EnumValueNode
+// for some usages.
+type EnumValueDeclNode interface {
+ Node
+ GetName() Node
+ GetNumber() Node
+}
+
+var _ EnumValueDeclNode = (*EnumValueNode)(nil)
+var _ EnumValueDeclNode = NoSourceNode{}
+
+// EnumNode represents an enum declaration. Example:
+//
+// UNSET = 0 [deprecated = true];
+type EnumValueNode struct {
+ compositeNode
+ Name *IdentNode
+ Equals *RuneNode
+ Number IntValueNode
+ Options *CompactOptionsNode
+ Semicolon *RuneNode
+}
+
+func (*EnumValueNode) enumElement() {}
+
+// NewEnumValueNode creates a new *EnumValueNode. All arguments must be non-nil
+// except opts which is only non-nil if the declaration included options.
+// - name: The token corresponding to the enum value's name.
+// - equals: The token corresponding to the '=' rune after the name.
+// - number: The token corresponding to the enum value's number.
+// - opts: Optional set of enum value options.
+// - semicolon: The token corresponding to the ";" rune that ends the declaration.
+func NewEnumValueNode(name *IdentNode, equals *RuneNode, number IntValueNode, opts *CompactOptionsNode, semicolon *RuneNode) *EnumValueNode {
+ if name == nil {
+ panic("name is nil")
+ }
+ if equals == nil {
+ panic("equals is nil")
+ }
+ if number == nil {
+ panic("number is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ numChildren := 4
+ if opts != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ children = append(children, name, equals, number)
+ if opts != nil {
+ children = append(children, opts)
+ }
+ children = append(children, semicolon)
+ return &EnumValueNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Name: name,
+ Equals: equals,
+ Number: number,
+ Options: opts,
+ Semicolon: semicolon,
+ }
+}
+
+func (e *EnumValueNode) GetName() Node {
+ return e.Name
+}
+
+func (e *EnumValueNode) GetNumber() Node {
+ return e.Number
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go
new file mode 100644
index 0000000..7ec9391
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/field.go
@@ -0,0 +1,659 @@
+package ast
+
+import "fmt"
+
+// FieldDeclNode is a node in the AST that defines a field. This includes
+// normal message fields as well as extensions. There are multiple types
+// of AST nodes that declare fields:
+// - *FieldNode
+// - *GroupNode
+// - *MapFieldNode
+// - *SyntheticMapField
+//
+// This also allows NoSourceNode to be used in place of one of the above
+// for some usages.
+type FieldDeclNode interface {
+ Node
+ FieldLabel() Node
+ FieldName() Node
+ FieldType() Node
+ FieldTag() Node
+ FieldExtendee() Node
+ GetGroupKeyword() Node
+ GetOptions() *CompactOptionsNode
+}
+
+var _ FieldDeclNode = (*FieldNode)(nil)
+var _ FieldDeclNode = (*GroupNode)(nil)
+var _ FieldDeclNode = (*MapFieldNode)(nil)
+var _ FieldDeclNode = (*SyntheticMapField)(nil)
+var _ FieldDeclNode = NoSourceNode{}
+
+// FieldNode represents a normal field declaration (not groups or maps). It
+// can represent extension fields as well as non-extension fields (both inside
+// of messages and inside of one-ofs). Example:
+//
+// optional string foo = 1;
+type FieldNode struct {
+ compositeNode
+ Label FieldLabel
+ FldType IdentValueNode
+ Name *IdentNode
+ Equals *RuneNode
+ Tag *UintLiteralNode
+ Options *CompactOptionsNode
+ Semicolon *RuneNode
+
+ // This is an up-link to the containing *ExtendNode for fields
+ // that are defined inside of "extend" blocks.
+ Extendee *ExtendNode
+}
+
+func (*FieldNode) msgElement() {}
+func (*FieldNode) oneOfElement() {}
+func (*FieldNode) extendElement() {}
+
+// NewFieldNode creates a new *FieldNode. The label and options arguments may be
+// nil but the others must be non-nil.
+// - label: The token corresponding to the label keyword if present ("optional",
+// "required", or "repeated").
+// - fieldType: The token corresponding to the field's type.
+// - name: The token corresponding to the field's name.
+// - equals: The token corresponding to the '=' rune after the name.
+// - tag: The token corresponding to the field's tag number.
+// - opts: Optional set of field options.
+// - semicolon: The token corresponding to the ";" rune that ends the declaration.
+func NewFieldNode(label *KeywordNode, fieldType IdentValueNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *FieldNode {
+ if fieldType == nil {
+ panic("fieldType is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if equals == nil {
+ panic("equals is nil")
+ }
+ if tag == nil {
+ panic("tag is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ numChildren := 5
+ if label != nil {
+ numChildren++
+ }
+ if opts != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ if label != nil {
+ children = append(children, label)
+ }
+ children = append(children, fieldType, name, equals, tag)
+ if opts != nil {
+ children = append(children, opts)
+ }
+ children = append(children, semicolon)
+
+ return &FieldNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Label: newFieldLabel(label),
+ FldType: fieldType,
+ Name: name,
+ Equals: equals,
+ Tag: tag,
+ Options: opts,
+ Semicolon: semicolon,
+ }
+}
+
+func (n *FieldNode) FieldLabel() Node {
+ // proto3 fields and fields inside one-ofs will not have a label and we need
+ // this check in order to return a nil node -- otherwise we'd return a
+ // non-nil node that has a nil pointer value in it :/
+ if n.Label.KeywordNode == nil {
+ return nil
+ }
+ return n.Label.KeywordNode
+}
+
+func (n *FieldNode) FieldName() Node {
+ return n.Name
+}
+
+func (n *FieldNode) FieldType() Node {
+ return n.FldType
+}
+
+func (n *FieldNode) FieldTag() Node {
+ return n.Tag
+}
+
+func (n *FieldNode) FieldExtendee() Node {
+ if n.Extendee != nil {
+ return n.Extendee.Extendee
+ }
+ return nil
+}
+
+func (n *FieldNode) GetGroupKeyword() Node {
+ return nil
+}
+
+func (n *FieldNode) GetOptions() *CompactOptionsNode {
+ return n.Options
+}
+
+// FieldLabel represents the label of a field, which indicates its cardinality
+// (i.e. whether it is optional, required, or repeated).
+type FieldLabel struct {
+ *KeywordNode
+ Repeated bool
+ Required bool
+}
+
+func newFieldLabel(lbl *KeywordNode) FieldLabel {
+ repeated, required := false, false
+ if lbl != nil {
+ repeated = lbl.Val == "repeated"
+ required = lbl.Val == "required"
+ }
+ return FieldLabel{
+ KeywordNode: lbl,
+ Repeated: repeated,
+ Required: required,
+ }
+}
+
+// IsPresent returns true if a label keyword was present in the declaration
+// and false if it was absent.
+func (f *FieldLabel) IsPresent() bool {
+ return f.KeywordNode != nil
+}
+
+// GroupNode represents a group declaration, which doubles as a field and inline
+// message declaration. It can represent extension fields as well as
+// non-extension fields (both inside of messages and inside of one-ofs).
+// Example:
+//
+// optional group Key = 4 {
+// optional uint64 id = 1;
+// optional string name = 2;
+// }
+type GroupNode struct {
+ compositeNode
+ Label FieldLabel
+ Keyword *KeywordNode
+ Name *IdentNode
+ Equals *RuneNode
+ Tag *UintLiteralNode
+ Options *CompactOptionsNode
+ MessageBody
+
+ // This is an up-link to the containing *ExtendNode for groups
+ // that are defined inside of "extend" blocks.
+ Extendee *ExtendNode
+}
+
+func (*GroupNode) msgElement() {}
+func (*GroupNode) oneOfElement() {}
+func (*GroupNode) extendElement() {}
+
+// NewGroupNode creates a new *GroupNode. The label and options arguments may be
+// nil but the others must be non-nil.
+// - label: The token corresponding to the label keyword if present ("optional",
+// "required", or "repeated").
+// - keyword: The token corresponding to the "group" keyword.
+// - name: The token corresponding to the field's name.
+// - equals: The token corresponding to the '=' rune after the name.
+// - tag: The token corresponding to the field's tag number.
+// - opts: Optional set of field options.
+// - openBrace: The token corresponding to the "{" rune that starts the body.
+// - decls: All declarations inside the group body.
+// - closeBrace: The token corresponding to the "}" rune that ends the body.
+func NewGroupNode(label *KeywordNode, keyword *KeywordNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *GroupNode {
+ if keyword == nil {
+ panic("fieldType is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if equals == nil {
+ panic("equals is nil")
+ }
+ if tag == nil {
+ panic("tag is nil")
+ }
+ if openBrace == nil {
+ panic("openBrace is nil")
+ }
+ if closeBrace == nil {
+ panic("closeBrace is nil")
+ }
+ numChildren := 6 + len(decls)
+ if label != nil {
+ numChildren++
+ }
+ if opts != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ if label != nil {
+ children = append(children, label)
+ }
+ children = append(children, keyword, name, equals, tag)
+ if opts != nil {
+ children = append(children, opts)
+ }
+ children = append(children, openBrace)
+ for _, decl := range decls {
+ children = append(children, decl)
+ }
+ children = append(children, closeBrace)
+
+ ret := &GroupNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Label: newFieldLabel(label),
+ Keyword: keyword,
+ Name: name,
+ Equals: equals,
+ Tag: tag,
+ Options: opts,
+ }
+ populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace)
+ return ret
+}
+
+func (n *GroupNode) FieldLabel() Node {
+ if n.Label.KeywordNode == nil {
+ // return nil interface to indicate absence, not a typed nil
+ return nil
+ }
+ return n.Label.KeywordNode
+}
+
+func (n *GroupNode) FieldName() Node {
+ return n.Name
+}
+
+func (n *GroupNode) FieldType() Node {
+ return n.Keyword
+}
+
+func (n *GroupNode) FieldTag() Node {
+ return n.Tag
+}
+
+func (n *GroupNode) FieldExtendee() Node {
+ if n.Extendee != nil {
+ return n.Extendee.Extendee
+ }
+ return nil
+}
+
+func (n *GroupNode) GetGroupKeyword() Node {
+ return n.Keyword
+}
+
+func (n *GroupNode) GetOptions() *CompactOptionsNode {
+ return n.Options
+}
+
+func (n *GroupNode) MessageName() Node {
+ return n.Name
+}
+
+// OneOfDeclNode is a node in the AST that defines a oneof. There are
+// multiple types of AST nodes that declare oneofs:
+// - *OneOfNode
+// - *SyntheticOneOf
+//
+// This also allows NoSourceNode to be used in place of one of the above
+// for some usages.
+type OneOfDeclNode interface {
+ Node
+ OneOfName() Node
+}
+
+// OneOfNode represents a one-of declaration. Example:
+//
+// oneof query {
+// string by_name = 2;
+// Type by_type = 3;
+// Address by_address = 4;
+// Labels by_label = 5;
+// }
+type OneOfNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Name *IdentNode
+ OpenBrace *RuneNode
+ Decls []OneOfElement
+ CloseBrace *RuneNode
+}
+
+func (*OneOfNode) msgElement() {}
+
+// NewOneOfNode creates a new *OneOfNode. All arguments must be non-nil. While
+// it is technically allowed for decls to be nil or empty, the resulting node
+// will not be a valid oneof, which must have at least one field.
+// - keyword: The token corresponding to the "oneof" keyword.
+// - name: The token corresponding to the oneof's name.
+// - openBrace: The token corresponding to the "{" rune that starts the body.
+// - decls: All declarations inside the oneof body.
+// - closeBrace: The token corresponding to the "}" rune that ends the body.
+func NewOneOfNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []OneOfElement, closeBrace *RuneNode) *OneOfNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if openBrace == nil {
+ panic("openBrace is nil")
+ }
+ if closeBrace == nil {
+ panic("closeBrace is nil")
+ }
+ children := make([]Node, 0, 4+len(decls))
+ children = append(children, keyword, name, openBrace)
+ for _, decl := range decls {
+ children = append(children, decl)
+ }
+ children = append(children, closeBrace)
+
+ for _, decl := range decls {
+ switch decl := decl.(type) {
+ case *OptionNode, *FieldNode, *GroupNode, *EmptyDeclNode:
+ default:
+ panic(fmt.Sprintf("invalid OneOfElement type: %T", decl))
+ }
+ }
+
+ return &OneOfNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Name: name,
+ OpenBrace: openBrace,
+ Decls: decls,
+ CloseBrace: closeBrace,
+ }
+}
+
+func (n *OneOfNode) OneOfName() Node {
+ return n.Name
+}
+
+// SyntheticOneOf is not an actual node in the AST but a synthetic node
+// that implements OneOfDeclNode. These are used to represent the implicit
+// oneof declarations that enclose "proto3 optional" fields.
+type SyntheticOneOf struct {
+ Field *FieldNode
+}
+
+// NewSyntheticOneOf creates a new *SyntheticOneOf that corresponds to the
+// given proto3 optional field.
+func NewSyntheticOneOf(field *FieldNode) *SyntheticOneOf {
+ return &SyntheticOneOf{Field: field}
+}
+
+func (n *SyntheticOneOf) Start() *SourcePos {
+ return n.Field.Start()
+}
+
+func (n *SyntheticOneOf) End() *SourcePos {
+ return n.Field.End()
+}
+
+func (n *SyntheticOneOf) LeadingComments() []Comment {
+ return nil
+}
+
+func (n *SyntheticOneOf) TrailingComments() []Comment {
+ return nil
+}
+
+func (n *SyntheticOneOf) OneOfName() Node {
+ return n.Field.FieldName()
+}
+
+// OneOfElement is an interface implemented by all AST nodes that can
+// appear in the body of a oneof declaration.
+type OneOfElement interface {
+ Node
+ oneOfElement()
+}
+
+var _ OneOfElement = (*OptionNode)(nil)
+var _ OneOfElement = (*FieldNode)(nil)
+var _ OneOfElement = (*GroupNode)(nil)
+var _ OneOfElement = (*EmptyDeclNode)(nil)
+
+// MapTypeNode represents the type declaration for a map field. It defines
+// both the key and value types for the map. Example:
+//
+// map<string, Values>
+type MapTypeNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ OpenAngle *RuneNode
+ KeyType *IdentNode
+ Comma *RuneNode
+ ValueType IdentValueNode
+ CloseAngle *RuneNode
+}
+
+// NewMapTypeNode creates a new *MapTypeNode. All arguments must be non-nil.
+// - keyword: The token corresponding to the "map" keyword.
+// - openAngle: The token corresponding to the "<" rune after the keyword.
+// - keyType: The token corresponding to the key type for the map.
+// - comma: The token corresponding to the "," rune between key and value types.
+// - valType: The token corresponding to the value type for the map.
+// - closeAngle: The token corresponding to the ">" rune that ends the declaration.
+func NewMapTypeNode(keyword *KeywordNode, openAngle *RuneNode, keyType *IdentNode, comma *RuneNode, valType IdentValueNode, closeAngle *RuneNode) *MapTypeNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if openAngle == nil {
+ panic("openAngle is nil")
+ }
+ if keyType == nil {
+ panic("keyType is nil")
+ }
+ if comma == nil {
+ panic("comma is nil")
+ }
+ if valType == nil {
+ panic("valType is nil")
+ }
+ if closeAngle == nil {
+ panic("closeAngle is nil")
+ }
+ children := []Node{keyword, openAngle, keyType, comma, valType, closeAngle}
+ return &MapTypeNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ OpenAngle: openAngle,
+ KeyType: keyType,
+ Comma: comma,
+ ValueType: valType,
+ CloseAngle: closeAngle,
+ }
+}
+
+// MapFieldNode represents a map field declaration. Example:
+//
+// map<string,string> replacements = 3 [deprecated = true];
+type MapFieldNode struct {
+ compositeNode
+ MapType *MapTypeNode
+ Name *IdentNode
+ Equals *RuneNode
+ Tag *UintLiteralNode
+ Options *CompactOptionsNode
+ Semicolon *RuneNode
+}
+
+func (*MapFieldNode) msgElement() {}
+
+// NewMapFieldNode creates a new *MapFieldNode. All arguments must be non-nil
+// except opts, which may be nil.
+// - mapType: The token corresponding to the map type.
+// - name: The token corresponding to the field's name.
+// - equals: The token corresponding to the '=' rune after the name.
+// - tag: The token corresponding to the field's tag number.
+// - opts: Optional set of field options.
+// - semicolon: The token corresponding to the ";" rune that ends the declaration.
+func NewMapFieldNode(mapType *MapTypeNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *MapFieldNode {
+ if mapType == nil {
+ panic("mapType is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if equals == nil {
+ panic("equals is nil")
+ }
+ if tag == nil {
+ panic("tag is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ numChildren := 5
+ if opts != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ children = append(children, mapType, name, equals, tag)
+ if opts != nil {
+ children = append(children, opts)
+ }
+ children = append(children, semicolon)
+
+ return &MapFieldNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ MapType: mapType,
+ Name: name,
+ Equals: equals,
+ Tag: tag,
+ Options: opts,
+ Semicolon: semicolon,
+ }
+}
+
+func (n *MapFieldNode) FieldLabel() Node {
+ return nil
+}
+
+func (n *MapFieldNode) FieldName() Node {
+ return n.Name
+}
+
+func (n *MapFieldNode) FieldType() Node {
+ return n.MapType
+}
+
+func (n *MapFieldNode) FieldTag() Node {
+ return n.Tag
+}
+
+func (n *MapFieldNode) FieldExtendee() Node {
+ return nil
+}
+
+func (n *MapFieldNode) GetGroupKeyword() Node {
+ return nil
+}
+
+func (n *MapFieldNode) GetOptions() *CompactOptionsNode {
+ return n.Options
+}
+
+func (n *MapFieldNode) MessageName() Node {
+ return n.Name
+}
+
+func (n *MapFieldNode) KeyField() *SyntheticMapField {
+ return NewSyntheticMapField(n.MapType.KeyType, 1)
+}
+
+func (n *MapFieldNode) ValueField() *SyntheticMapField {
+ return NewSyntheticMapField(n.MapType.ValueType, 2)
+}
+
+// SyntheticMapField is not an actual node in the AST but a synthetic node
+// that implements FieldDeclNode. These are used to represent the implicit
+// field declarations of the "key" and "value" fields in a map entry.
+type SyntheticMapField struct {
+ Ident IdentValueNode
+ Tag *UintLiteralNode
+}
+
+// NewSyntheticMapField creates a new *SyntheticMapField for the given
+// identifier (either a key or value type in a map declaration) and tag
+// number (1 for key, 2 for value).
+func NewSyntheticMapField(ident IdentValueNode, tagNum uint64) *SyntheticMapField {
+ tag := &UintLiteralNode{
+ terminalNode: terminalNode{
+ posRange: PosRange{Start: *ident.Start(), End: *ident.End()},
+ },
+ Val: tagNum,
+ }
+ return &SyntheticMapField{Ident: ident, Tag: tag}
+}
+
+func (n *SyntheticMapField) Start() *SourcePos {
+ return n.Ident.Start()
+}
+
+func (n *SyntheticMapField) End() *SourcePos {
+ return n.Ident.End()
+}
+
+func (n *SyntheticMapField) LeadingComments() []Comment {
+ return nil
+}
+
+func (n *SyntheticMapField) TrailingComments() []Comment {
+ return nil
+}
+
+func (n *SyntheticMapField) FieldLabel() Node {
+ return n.Ident
+}
+
+func (n *SyntheticMapField) FieldName() Node {
+ return n.Ident
+}
+
+func (n *SyntheticMapField) FieldType() Node {
+ return n.Ident
+}
+
+func (n *SyntheticMapField) FieldTag() Node {
+ return n.Tag
+}
+
+func (n *SyntheticMapField) FieldExtendee() Node {
+ return nil
+}
+
+func (n *SyntheticMapField) GetGroupKeyword() Node {
+ return nil
+}
+
+func (n *SyntheticMapField) GetOptions() *CompactOptionsNode {
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go
new file mode 100644
index 0000000..332cb0c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/file.go
@@ -0,0 +1,236 @@
+package ast
+
+import "fmt"
+
+// FileDeclNode is a placeholder interface for AST nodes that represent files.
+// This allows NoSourceNode to be used in place of *FileNode for some usages.
+type FileDeclNode interface {
+ Node
+ GetSyntax() Node
+}
+
+var _ FileDeclNode = (*FileNode)(nil)
+var _ FileDeclNode = NoSourceNode{}
+
+// FileNode is the root of the AST hierarchy. It represents an entire
+// protobuf source file.
+type FileNode struct {
+ compositeNode
+ Syntax *SyntaxNode // nil if file has no syntax declaration
+ Decls []FileElement
+
+ // TODO: add Edition *EditionNode
+
+ // Any comments that follow the last token in the file.
+ FinalComments []Comment
+ // Any whitespace at the end of the file (after the last token or
+ // last comment in the file).
+ FinalWhitespace string
+}
+
+// NewFileNode creates a new *FileNode. The syntax parameter is optional. If it
+// is absent, it means the file had no syntax declaration.
+//
+// This function panics if the concrete type of any element of decls is not
+// from this package.
+func NewFileNode(syntax *SyntaxNode, decls []FileElement) *FileNode {
+ numChildren := len(decls)
+ if syntax != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ if syntax != nil {
+ children = append(children, syntax)
+ }
+ for _, decl := range decls {
+ children = append(children, decl)
+ }
+
+ for _, decl := range decls {
+ switch decl := decl.(type) {
+ case *PackageNode, *ImportNode, *OptionNode, *MessageNode,
+ *EnumNode, *ExtendNode, *ServiceNode, *EmptyDeclNode:
+ default:
+ panic(fmt.Sprintf("invalid FileElement type: %T", decl))
+ }
+ }
+
+ return &FileNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Syntax: syntax,
+ Decls: decls,
+ }
+}
+
+func NewEmptyFileNode(filename string) *FileNode {
+ return &FileNode{
+ compositeNode: compositeNode{
+ children: []Node{NewNoSourceNode(filename)},
+ },
+ }
+}
+
+func (f *FileNode) GetSyntax() Node {
+ return f.Syntax
+}
+
+// FileElement is an interface implemented by all AST nodes that are
+// allowed as top-level declarations in the file.
+type FileElement interface {
+ Node
+ fileElement()
+}
+
+var _ FileElement = (*ImportNode)(nil)
+var _ FileElement = (*PackageNode)(nil)
+var _ FileElement = (*OptionNode)(nil)
+var _ FileElement = (*MessageNode)(nil)
+var _ FileElement = (*EnumNode)(nil)
+var _ FileElement = (*ExtendNode)(nil)
+var _ FileElement = (*ServiceNode)(nil)
+var _ FileElement = (*EmptyDeclNode)(nil)
+
+// SyntaxNode represents a syntax declaration, which if present must be
+// the first non-comment content. Example:
+//
+// syntax = "proto2";
+//
+// Files that don't have a syntax node are assumed to use proto2 syntax.
+type SyntaxNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Equals *RuneNode
+ Syntax StringValueNode
+ Semicolon *RuneNode
+}
+
+// NewSyntaxNode creates a new *SyntaxNode. All four arguments must be non-nil:
+// - keyword: The token corresponding to the "syntax" keyword.
+// - equals: The token corresponding to the "=" rune.
+// - syntax: The actual syntax value, e.g. "proto2" or "proto3".
+// - semicolon: The token corresponding to the ";" rune that ends the declaration.
+func NewSyntaxNode(keyword *KeywordNode, equals *RuneNode, syntax StringValueNode, semicolon *RuneNode) *SyntaxNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if equals == nil {
+ panic("equals is nil")
+ }
+ if syntax == nil {
+ panic("syntax is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ children := []Node{keyword, equals, syntax, semicolon}
+ return &SyntaxNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Equals: equals,
+ Syntax: syntax,
+ Semicolon: semicolon,
+ }
+}
+
+// ImportNode represents an import statement. Example:
+//
+// import "google/protobuf/empty.proto";
+type ImportNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ // Optional; if present indicates this is a public import
+ Public *KeywordNode
+ // Optional; if present indicates this is a weak import
+ Weak *KeywordNode
+ Name StringValueNode
+ Semicolon *RuneNode
+}
+
+// NewImportNode creates a new *ImportNode. The public and weak arguments are optional
+// and only one or the other (or neither) may be specified, not both. When public is
+// non-nil, it indicates the "public" keyword in the import statement and means this is
+// a public import. When weak is non-nil, it indicates the "weak" keyword in the import
+// statement and means this is a weak import. When both are nil, this is a normal import.
+// The other arguments must be non-nil:
+// - keyword: The token corresponding to the "import" keyword.
+// - public: The token corresponding to the optional "public" keyword.
+// - weak: The token corresponding to the optional "weak" keyword.
+// - name: The actual imported file name.
+// - semicolon: The token corresponding to the ";" rune that ends the declaration.
+func NewImportNode(keyword *KeywordNode, public *KeywordNode, weak *KeywordNode, name StringValueNode, semicolon *RuneNode) *ImportNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ numChildren := 3
+ if public != nil || weak != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ children = append(children, keyword)
+ if public != nil {
+ children = append(children, public)
+ } else if weak != nil {
+ children = append(children, weak)
+ }
+ children = append(children, name, semicolon)
+
+ return &ImportNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Public: public,
+ Weak: weak,
+ Name: name,
+ Semicolon: semicolon,
+ }
+}
+
+func (*ImportNode) fileElement() {}
+
+// PackageNode represents a package declaration. Example:
+//
+// package foobar.com;
+type PackageNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Name IdentValueNode
+ Semicolon *RuneNode
+}
+
+func (*PackageNode) fileElement() {}
+
+// NewPackageNode creates a new *PackageNode. All three arguments must be non-nil:
+// - keyword: The token corresponding to the "package" keyword.
+// - name: The package name declared for the file.
+// - semicolon: The token corresponding to the ";" rune that ends the declaration.
+func NewPackageNode(keyword *KeywordNode, name IdentValueNode, semicolon *RuneNode) *PackageNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ children := []Node{keyword, name, semicolon}
+ return &PackageNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Name: name,
+ Semicolon: semicolon,
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go
new file mode 100644
index 0000000..ed97e97
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/identifiers.go
@@ -0,0 +1,134 @@
+package ast
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Identifier is a possibly-qualified name. This is used to distinguish
+// ValueNode values that are references/identifiers vs. those that are
+// string literals.
+type Identifier string
+
+// IdentValueNode is an AST node that represents an identifier.
+type IdentValueNode interface {
+ ValueNode
+ AsIdentifier() Identifier
+}
+
+var _ IdentValueNode = (*IdentNode)(nil)
+var _ IdentValueNode = (*CompoundIdentNode)(nil)
+
+// IdentNode represents a simple, unqualified identifier. These are used to name
+// elements declared in a protobuf file or to refer to elements. Example:
+//
+// foobar
+type IdentNode struct {
+ terminalNode
+ Val string
+}
+
+// NewIdentNode creates a new *IdentNode. The given val is the identifier text.
+func NewIdentNode(val string, info TokenInfo) *IdentNode {
+ return &IdentNode{
+ terminalNode: info.asTerminalNode(),
+ Val: val,
+ }
+}
+
+func (n *IdentNode) Value() interface{} {
+ return n.AsIdentifier()
+}
+
+func (n *IdentNode) AsIdentifier() Identifier {
+ return Identifier(n.Val)
+}
+
+// ToKeyword is used to convert identifiers to keywords. Since keywords are not
+// reserved in the protobuf language, they are initially lexed as identifiers
+// and then converted to keywords based on context.
+func (n *IdentNode) ToKeyword() *KeywordNode {
+ return (*KeywordNode)(n)
+}
+
+// CompoundIdentNode represents a qualified identifier. A qualified identifier
+// has at least one dot and possibly multiple identifier names (all separated by
+// dots). If the identifier has a leading dot, then it is a *fully* qualified
+// identifier. Example:
+//
+// .com.foobar.Baz
+type CompoundIdentNode struct {
+ compositeNode
+ // Optional leading dot, indicating that the identifier is fully qualified.
+ LeadingDot *RuneNode
+ Components []*IdentNode
+ // Dots[0] is the dot after Components[0]. The length of Dots is always
+ // one less than the length of Components.
+ Dots []*RuneNode
+ // The text value of the identifier, with all components and dots
+ // concatenated.
+ Val string
+}
+
+// NewCompoundIdentNode creates a *CompoundIdentNode. The leadingDot may be nil.
+// The dots arg must have a length that is one less than the length of
+// components. The components arg must not be empty.
+func NewCompoundIdentNode(leadingDot *RuneNode, components []*IdentNode, dots []*RuneNode) *CompoundIdentNode {
+ if len(components) == 0 {
+ panic("must have at least one component")
+ }
+ if len(dots) != len(components)-1 {
+ panic(fmt.Sprintf("%d components requires %d dots, not %d", len(components), len(components)-1, len(dots)))
+ }
+ numChildren := len(components)*2 - 1
+ if leadingDot != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ var b strings.Builder
+ if leadingDot != nil {
+ children = append(children, leadingDot)
+ b.WriteRune(leadingDot.Rune)
+ }
+ for i, comp := range components {
+ if i > 0 {
+ dot := dots[i-1]
+ children = append(children, dot)
+ b.WriteRune(dot.Rune)
+ }
+ children = append(children, comp)
+ b.WriteString(comp.Val)
+ }
+ return &CompoundIdentNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ LeadingDot: leadingDot,
+ Components: components,
+ Dots: dots,
+ Val: b.String(),
+ }
+}
+
+func (n *CompoundIdentNode) Value() interface{} {
+ return n.AsIdentifier()
+}
+
+func (n *CompoundIdentNode) AsIdentifier() Identifier {
+ return Identifier(n.Val)
+}
+
+// KeywordNode is an AST node that represents a keyword. Keywords are
+// like identifiers, but they have special meaning in particular contexts.
+// Example:
+//
+// message
+type KeywordNode IdentNode
+
+// NewKeywordNode creates a new *KeywordNode. The given val is the keyword.
+func NewKeywordNode(val string, info TokenInfo) *KeywordNode {
+ return &KeywordNode{
+ terminalNode: info.asTerminalNode(),
+ Val: val,
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go
new file mode 100644
index 0000000..c98b0f8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/message.go
@@ -0,0 +1,199 @@
+package ast
+
+import "fmt"
+
+// MessageDeclNode is a node in the AST that defines a message type. This
+// includes normal message fields as well as implicit messages:
+// - *MessageNode
+// - *GroupNode (the group is a field and inline message type)
+// - *MapFieldNode (map fields implicitly define a MapEntry message type)
+//
+// This also allows NoSourceNode to be used in place of one of the above
+// for some usages.
+type MessageDeclNode interface {
+ Node
+ MessageName() Node
+}
+
+var _ MessageDeclNode = (*MessageNode)(nil)
+var _ MessageDeclNode = (*GroupNode)(nil)
+var _ MessageDeclNode = (*MapFieldNode)(nil)
+var _ MessageDeclNode = NoSourceNode{}
+
+// MessageNode represents a message declaration. Example:
+//
+// message Foo {
+// string name = 1;
+// repeated string labels = 2;
+// bytes extra = 3;
+// }
+type MessageNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Name *IdentNode
+ MessageBody
+}
+
+func (*MessageNode) fileElement() {}
+func (*MessageNode) msgElement() {}
+
+// NewMessageNode creates a new *MessageNode. All arguments must be non-nil.
+// - keyword: The token corresponding to the "message" keyword.
+// - name: The token corresponding to the field's name.
+// - openBrace: The token corresponding to the "{" rune that starts the body.
+// - decls: All declarations inside the message body.
+// - closeBrace: The token corresponding to the "}" rune that ends the body.
+func NewMessageNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *MessageNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if openBrace == nil {
+ panic("openBrace is nil")
+ }
+ if closeBrace == nil {
+ panic("closeBrace is nil")
+ }
+ children := make([]Node, 0, 4+len(decls))
+ children = append(children, keyword, name, openBrace)
+ for _, decl := range decls {
+ children = append(children, decl)
+ }
+ children = append(children, closeBrace)
+
+ ret := &MessageNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Name: name,
+ }
+ populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace)
+ return ret
+}
+
+func (n *MessageNode) MessageName() Node {
+ return n.Name
+}
+
+// MessageBody represents the body of a message. It is used by both
+// MessageNodes and GroupNodes.
+type MessageBody struct {
+ OpenBrace *RuneNode
+ Decls []MessageElement
+ CloseBrace *RuneNode
+}
+
+func populateMessageBody(m *MessageBody, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) {
+ m.OpenBrace = openBrace
+ m.Decls = decls
+ for _, decl := range decls {
+ switch decl.(type) {
+ case *OptionNode, *FieldNode, *MapFieldNode, *GroupNode, *OneOfNode,
+ *MessageNode, *EnumNode, *ExtendNode, *ExtensionRangeNode,
+ *ReservedNode, *EmptyDeclNode:
+ default:
+ panic(fmt.Sprintf("invalid MessageElement type: %T", decl))
+ }
+ }
+ m.CloseBrace = closeBrace
+}
+
+// MessageElement is an interface implemented by all AST nodes that can
+// appear in a message body.
+type MessageElement interface {
+ Node
+ msgElement()
+}
+
+var _ MessageElement = (*OptionNode)(nil)
+var _ MessageElement = (*FieldNode)(nil)
+var _ MessageElement = (*MapFieldNode)(nil)
+var _ MessageElement = (*OneOfNode)(nil)
+var _ MessageElement = (*GroupNode)(nil)
+var _ MessageElement = (*MessageNode)(nil)
+var _ MessageElement = (*EnumNode)(nil)
+var _ MessageElement = (*ExtendNode)(nil)
+var _ MessageElement = (*ExtensionRangeNode)(nil)
+var _ MessageElement = (*ReservedNode)(nil)
+var _ MessageElement = (*EmptyDeclNode)(nil)
+
+// ExtendNode represents a declaration of extension fields. Example:
+//
+// extend google.protobuf.FieldOptions {
+// bool redacted = 33333;
+// }
+type ExtendNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Extendee IdentValueNode
+ OpenBrace *RuneNode
+ Decls []ExtendElement
+ CloseBrace *RuneNode
+}
+
+func (*ExtendNode) fileElement() {}
+func (*ExtendNode) msgElement() {}
+
+// NewExtendNode creates a new *ExtendNode. All arguments must be non-nil.
+// - keyword: The token corresponding to the "extend" keyword.
+// - extendee: The token corresponding to the name of the extended message.
+// - openBrace: The token corresponding to the "{" rune that starts the body.
+// - decls: All declarations inside the message body.
+// - closeBrace: The token corresponding to the "}" rune that ends the body.
+func NewExtendNode(keyword *KeywordNode, extendee IdentValueNode, openBrace *RuneNode, decls []ExtendElement, closeBrace *RuneNode) *ExtendNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if extendee == nil {
+ panic("extendee is nil")
+ }
+ if openBrace == nil {
+ panic("openBrace is nil")
+ }
+ if closeBrace == nil {
+ panic("closeBrace is nil")
+ }
+ children := make([]Node, 0, 4+len(decls))
+ children = append(children, keyword, extendee, openBrace)
+ for _, decl := range decls {
+ children = append(children, decl)
+ }
+ children = append(children, closeBrace)
+
+ ret := &ExtendNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Extendee: extendee,
+ OpenBrace: openBrace,
+ Decls: decls,
+ CloseBrace: closeBrace,
+ }
+ for _, decl := range decls {
+ switch decl := decl.(type) {
+ case *FieldNode:
+ decl.Extendee = ret
+ case *GroupNode:
+ decl.Extendee = ret
+ case *EmptyDeclNode:
+ default:
+ panic(fmt.Sprintf("invalid ExtendElement type: %T", decl))
+ }
+ }
+ return ret
+}
+
+// ExtendElement is an interface implemented by all AST nodes that can
+// appear in the body of an extends declaration.
+type ExtendElement interface {
+ Node
+ extendElement()
+}
+
+var _ ExtendElement = (*FieldNode)(nil)
+var _ ExtendElement = (*GroupNode)(nil)
+var _ ExtendElement = (*EmptyDeclNode)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go
new file mode 100644
index 0000000..44e02b1
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/no_source.go
@@ -0,0 +1,103 @@
+package ast
+
+// UnknownPos is a placeholder position when only the source file
+// name is known.
+func UnknownPos(filename string) *SourcePos {
+ return &SourcePos{Filename: filename}
+}
+
+// NoSourceNode is a placeholder AST node that implements numerous
+// interfaces in this package. It can be used to represent an AST
+// element for a file whose source is not available.
+type NoSourceNode struct {
+ pos *SourcePos
+}
+
+// NewNoSourceNode creates a new NoSourceNode for the given filename.
+func NewNoSourceNode(filename string) NoSourceNode {
+ return NoSourceNode{pos: UnknownPos(filename)}
+}
+
+func (n NoSourceNode) Start() *SourcePos {
+ return n.pos
+}
+
+func (n NoSourceNode) End() *SourcePos {
+ return n.pos
+}
+
+func (n NoSourceNode) LeadingComments() []Comment {
+ return nil
+}
+
+func (n NoSourceNode) TrailingComments() []Comment {
+ return nil
+}
+
+func (n NoSourceNode) GetSyntax() Node {
+ return n
+}
+
+func (n NoSourceNode) GetName() Node {
+ return n
+}
+
+func (n NoSourceNode) GetValue() ValueNode {
+ return n
+}
+
+func (n NoSourceNode) FieldLabel() Node {
+ return n
+}
+
+func (n NoSourceNode) FieldName() Node {
+ return n
+}
+
+func (n NoSourceNode) FieldType() Node {
+ return n
+}
+
+func (n NoSourceNode) FieldTag() Node {
+ return n
+}
+
+func (n NoSourceNode) FieldExtendee() Node {
+ return n
+}
+
+func (n NoSourceNode) GetGroupKeyword() Node {
+ return n
+}
+
+func (n NoSourceNode) GetOptions() *CompactOptionsNode {
+ return nil
+}
+
+func (n NoSourceNode) RangeStart() Node {
+ return n
+}
+
+func (n NoSourceNode) RangeEnd() Node {
+ return n
+}
+
+func (n NoSourceNode) GetNumber() Node {
+ return n
+}
+
+func (n NoSourceNode) MessageName() Node {
+ return n
+}
+
+func (n NoSourceNode) GetInputType() Node {
+ return n
+}
+
+func (n NoSourceNode) GetOutputType() Node {
+ return n
+}
+
+func (n NoSourceNode) Value() interface{} {
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go
new file mode 100644
index 0000000..a2a8a3b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/node.go
@@ -0,0 +1,200 @@
+package ast
+
+// Node is the interface implemented by all nodes in the AST. It
+// provides information about the span of this AST node in terms
+// of location in the source file. It also provides information
+// about all prior comments (attached as leading comments) and
+// optional subsequent comments (attached as trailing comments).
+type Node interface {
+ Start() *SourcePos
+ End() *SourcePos
+ LeadingComments() []Comment
+ TrailingComments() []Comment
+}
+
+// TerminalNode represents a leaf in the AST. These represent
+// the tokens/lexemes in the protobuf language. Comments and
+// whitespace are accumulated by the lexer and associated with
+// the following lexed token.
+type TerminalNode interface {
+ Node
+ // PopLeadingComment removes the first leading comment from this
+ // token and returns it. If the node has no leading comments then
+ // this method will panic.
+ PopLeadingComment() Comment
+ // PushTrailingComment appends the given comment to the token's
+ // trailing comments.
+ PushTrailingComment(Comment)
+ // LeadingWhitespace returns any whitespace between the prior comment
+ // (last leading comment), if any, or prior lexed token and this token.
+ LeadingWhitespace() string
+ // RawText returns the raw text of the token as read from the source.
+ RawText() string
+}
+
+var _ TerminalNode = (*StringLiteralNode)(nil)
+var _ TerminalNode = (*UintLiteralNode)(nil)
+var _ TerminalNode = (*FloatLiteralNode)(nil)
+var _ TerminalNode = (*IdentNode)(nil)
+var _ TerminalNode = (*BoolLiteralNode)(nil)
+var _ TerminalNode = (*SpecialFloatLiteralNode)(nil)
+var _ TerminalNode = (*KeywordNode)(nil)
+var _ TerminalNode = (*RuneNode)(nil)
+
+// TokenInfo represents state accumulated by the lexer to associated with a
+// token (aka terminal node).
+type TokenInfo struct {
+ // The location of the token in the source file.
+ PosRange
+ // The raw text of the token.
+ RawText string
+ // Any comments encountered preceding this token.
+ LeadingComments []Comment
+ // Any leading whitespace immediately preceding this token.
+ LeadingWhitespace string
+ // Any trailing comments following this token. This is usually
+ // empty as tokens are created by the lexer immediately and
+ // trailing comments are accounted for afterwards, added using
+ // the node's PushTrailingComment method.
+ TrailingComments []Comment
+}
+
+func (t *TokenInfo) asTerminalNode() terminalNode {
+ return terminalNode{
+ posRange: t.PosRange,
+ leadingComments: t.LeadingComments,
+ leadingWhitespace: t.LeadingWhitespace,
+ trailingComments: t.TrailingComments,
+ raw: t.RawText,
+ }
+}
+
+// CompositeNode represents any non-terminal node in the tree. These
+// are interior or root nodes and have child nodes.
+type CompositeNode interface {
+ Node
+ // All AST nodes that are immediate children of this one.
+ Children() []Node
+}
+
+// terminalNode contains book-keeping shared by all TerminalNode
+// implementations. It is embedded in all such node types in this
+// package. It provides the implementation of the TerminalNode
+// interface.
+type terminalNode struct {
+ posRange PosRange
+ leadingComments []Comment
+ leadingWhitespace string
+ trailingComments []Comment
+ raw string
+}
+
+func (n *terminalNode) Start() *SourcePos {
+ return &n.posRange.Start
+}
+
+func (n *terminalNode) End() *SourcePos {
+ return &n.posRange.End
+}
+
+func (n *terminalNode) LeadingComments() []Comment {
+ return n.leadingComments
+}
+
+func (n *terminalNode) TrailingComments() []Comment {
+ return n.trailingComments
+}
+
+func (n *terminalNode) PopLeadingComment() Comment {
+ c := n.leadingComments[0]
+ n.leadingComments = n.leadingComments[1:]
+ return c
+}
+
+func (n *terminalNode) PushTrailingComment(c Comment) {
+ n.trailingComments = append(n.trailingComments, c)
+}
+
+func (n *terminalNode) LeadingWhitespace() string {
+ return n.leadingWhitespace
+}
+
+func (n *terminalNode) RawText() string {
+ return n.raw
+}
+
+// compositeNode contains book-keeping shared by all CompositeNode
+// implementations. It is embedded in all such node types in this
+// package. It provides the implementation of the CompositeNode
+// interface.
+type compositeNode struct {
+ children []Node
+}
+
+func (n *compositeNode) Children() []Node {
+ return n.children
+}
+
+func (n *compositeNode) Start() *SourcePos {
+ return n.children[0].Start()
+}
+
+func (n *compositeNode) End() *SourcePos {
+ return n.children[len(n.children)-1].End()
+}
+
+func (n *compositeNode) LeadingComments() []Comment {
+ return n.children[0].LeadingComments()
+}
+
+func (n *compositeNode) TrailingComments() []Comment {
+ return n.children[len(n.children)-1].TrailingComments()
+}
+
+// RuneNode represents a single rune in protobuf source. Runes
+// are typically collected into tokens, but some runes stand on
+// their own, such as punctuation/symbols like commas, semicolons,
+// equals signs, open and close symbols (braces, brackets, angles,
+// and parentheses), and periods/dots.
+type RuneNode struct {
+ terminalNode
+ Rune rune
+}
+
+// NewRuneNode creates a new *RuneNode with the given properties.
+func NewRuneNode(r rune, info TokenInfo) *RuneNode {
+ return &RuneNode{
+ terminalNode: info.asTerminalNode(),
+ Rune: r,
+ }
+}
+
+// EmptyDeclNode represents an empty declaration in protobuf source.
+// These amount to extra semicolons, with no actual content preceding
+// the semicolon.
+type EmptyDeclNode struct {
+ compositeNode
+ Semicolon *RuneNode
+}
+
+// NewEmptyDeclNode creates a new *EmptyDeclNode. The one argument must
+// be non-nil.
+func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode {
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ return &EmptyDeclNode{
+ compositeNode: compositeNode{
+ children: []Node{semicolon},
+ },
+ Semicolon: semicolon,
+ }
+}
+
+func (e *EmptyDeclNode) fileElement() {}
+func (e *EmptyDeclNode) msgElement() {}
+func (e *EmptyDeclNode) extendElement() {}
+func (e *EmptyDeclNode) oneOfElement() {}
+func (e *EmptyDeclNode) enumElement() {}
+func (e *EmptyDeclNode) serviceElement() {}
+func (e *EmptyDeclNode) methodElement() {}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go
new file mode 100644
index 0000000..c4ed169
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/options.go
@@ -0,0 +1,361 @@
+package ast
+
+import "fmt"
+
+// OptionDeclNode is a placeholder interface for AST nodes that represent
+// options. This allows NoSourceNode to be used in place of *OptionNode
+// for some usages.
+type OptionDeclNode interface {
+ Node
+ GetName() Node
+ GetValue() ValueNode
+}
+
+var _ OptionDeclNode = (*OptionNode)(nil)
+var _ OptionDeclNode = NoSourceNode{}
+
+// OptionNode represents the declaration of a single option for an element.
+// It is used both for normal option declarations (start with "option" keyword
+// and end with semicolon) and for compact options found in fields, enum values,
+// and extension ranges. Example:
+//
+// option (custom.option) = "foo";
+type OptionNode struct {
+ compositeNode
+ Keyword *KeywordNode // absent for compact options
+ Name *OptionNameNode
+ Equals *RuneNode
+ Val ValueNode
+ Semicolon *RuneNode // absent for compact options
+}
+
+func (e *OptionNode) fileElement() {}
+func (e *OptionNode) msgElement() {}
+func (e *OptionNode) oneOfElement() {}
+func (e *OptionNode) enumElement() {}
+func (e *OptionNode) serviceElement() {}
+func (e *OptionNode) methodElement() {}
+
+// NewOptionNode creates a new *OptionNode for a full option declaration (as
+// used in files, messages, oneofs, enums, services, and methods). All arguments
+// must be non-nil. (Also see NewCompactOptionNode.)
+// - keyword: The token corresponding to the "option" keyword.
+// - name: The token corresponding to the name of the option.
+// - equals: The token corresponding to the "=" rune after the name.
+// - val: The token corresponding to the option value.
+// - semicolon: The token corresponding to the ";" rune that ends the declaration.
+func NewOptionNode(keyword *KeywordNode, name *OptionNameNode, equals *RuneNode, val ValueNode, semicolon *RuneNode) *OptionNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if equals == nil {
+ panic("equals is nil")
+ }
+ if val == nil {
+ panic("val is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ children := []Node{keyword, name, equals, val, semicolon}
+ return &OptionNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Name: name,
+ Equals: equals,
+ Val: val,
+ Semicolon: semicolon,
+ }
+}
+
+// NewCompactOptionNode creates a new *OptionNode for a full compact declaration
+// (as used in fields, enum values, and extension ranges). All arguments must be
+// non-nil.
+// - name: The token corresponding to the name of the option.
+// - equals: The token corresponding to the "=" rune after the name.
+// - val: The token corresponding to the option value.
+func NewCompactOptionNode(name *OptionNameNode, equals *RuneNode, val ValueNode) *OptionNode {
+ if name == nil {
+ panic("name is nil")
+ }
+ if equals == nil {
+ panic("equals is nil")
+ }
+ if val == nil {
+ panic("val is nil")
+ }
+ children := []Node{name, equals, val}
+ return &OptionNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Name: name,
+ Equals: equals,
+ Val: val,
+ }
+}
+
+func (n *OptionNode) GetName() Node {
+ return n.Name
+}
+
+func (n *OptionNode) GetValue() ValueNode {
+ return n.Val
+}
+
+// OptionNameNode represents an option name or even a traversal through message
+// types to name a nested option field. Example:
+//
+// (foo.bar).baz.(bob)
+type OptionNameNode struct {
+ compositeNode
+ Parts []*FieldReferenceNode
+ // Dots represent the separating '.' characters between name parts. The
+ // length of this slice must be exactly len(Parts)-1, each item in Parts
+ // having a corresponding item in this slice *except the last* (since a
+ // trailing dot is not allowed).
+ //
+ // These do *not* include dots that are inside of an extension name. For
+ // example: (foo.bar).baz.(bob) has three parts:
+ // 1. (foo.bar) - an extension name
+ // 2. baz - a regular field in foo.bar
+ // 3. (bob) - an extension field in baz
+ // Note that the dot in foo.bar will thus not be present in Dots but is
+ // instead in Parts[0].
+ Dots []*RuneNode
+}
+
+// NewOptionNameNode creates a new *OptionNameNode. The dots arg must have a
+// length that is one less than the length of parts. The parts arg must not be
+// empty.
+func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNameNode {
+ if len(parts) == 0 {
+ panic("must have at least one part")
+ }
+ if len(dots) != len(parts)-1 {
+ panic(fmt.Sprintf("%d parts requires %d dots, not %d", len(parts), len(parts)-1, len(dots)))
+ }
+ children := make([]Node, 0, len(parts)*2-1)
+ for i, part := range parts {
+ if part == nil {
+ panic(fmt.Sprintf("parts[%d] is nil", i))
+ }
+ if i > 0 {
+ if dots[i-1] == nil {
+ panic(fmt.Sprintf("dots[%d] is nil", i-1))
+ }
+ children = append(children, dots[i-1])
+ }
+ children = append(children, part)
+ }
+ return &OptionNameNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Parts: parts,
+ Dots: dots,
+ }
+}
+
+// FieldReferenceNode is a reference to a field name. It can indicate a regular
+// field (simple unqualified name), an extension field (possibly-qualified name
+// that is enclosed either in brackets or parentheses), or an "any" type
+// reference (a type URL in the form "server.host/fully.qualified.Name" that is
+// enclosed in brackets).
+//
+// Extension names are used in options to refer to custom options (which are
+// actually extensions), in which case the name is enclosed in parentheses "("
+// and ")". They can also be used to refer to extension fields of options.
+//
+// Extension names are also used in message literals to set extension fields,
+// in which case the name is enclosed in square brackets "[" and "]".
+//
+// "Any" type references can only be used in message literals, and are not
+// allowed in option names. They are always enclosed in square brackets. An
+// "any" type reference is distinguished from an extension name by the presence
+// of a slash, which must be present in an "any" type reference and must be
+// absent in an extension name.
+//
+// Examples:
+//
+// foobar
+// (foo.bar)
+// [foo.bar]
+// [type.googleapis.com/foo.bar]
+type FieldReferenceNode struct {
+ compositeNode
+ Open *RuneNode // only present for extension names and "any" type references
+
+ // only present for "any" type references
+ UrlPrefix IdentValueNode
+ Slash *RuneNode
+
+ Name IdentValueNode
+
+ Close *RuneNode // only present for extension names and "any" type references
+}
+
+// NewFieldReferenceNode creates a new *FieldReferenceNode for a regular field.
+// The name arg must not be nil.
+func NewFieldReferenceNode(name *IdentNode) *FieldReferenceNode {
+ if name == nil {
+ panic("name is nil")
+ }
+ children := []Node{name}
+ return &FieldReferenceNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Name: name,
+ }
+}
+
+// NewExtensionFieldReferenceNode creates a new *FieldReferenceNode for an
+// extension field. All args must be non-nil. The openSym and closeSym runes
+// should be "(" and ")" or "[" and "]".
+func NewExtensionFieldReferenceNode(openSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode {
+ if name == nil {
+ panic("name is nil")
+ }
+ if openSym == nil {
+ panic("openSym is nil")
+ }
+ if closeSym == nil {
+ panic("closeSym is nil")
+ }
+ children := []Node{openSym, name, closeSym}
+ return &FieldReferenceNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Open: openSym,
+ Name: name,
+ Close: closeSym,
+ }
+}
+
+// NewAnyTypeReferenceNode creates a new *FieldReferenceNode for an "any"
+// type reference. All args must be non-nil. The openSym and closeSym runes
+// should be "[" and "]". The slashSym run should be "/".
+func NewAnyTypeReferenceNode(openSym *RuneNode, urlPrefix IdentValueNode, slashSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode {
+ if name == nil {
+ panic("name is nil")
+ }
+ if openSym == nil {
+ panic("openSym is nil")
+ }
+ if closeSym == nil {
+ panic("closeSym is nil")
+ }
+ if urlPrefix == nil {
+ panic("urlPrefix is nil")
+ }
+ if slashSym == nil {
+ panic("slashSym is nil")
+ }
+ children := []Node{openSym, urlPrefix, slashSym, name, closeSym}
+ return &FieldReferenceNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Open: openSym,
+ UrlPrefix: urlPrefix,
+ Slash: slashSym,
+ Name: name,
+ Close: closeSym,
+ }
+}
+
+// IsExtension reports if this is an extension name or not (e.g. enclosed in
+// punctuation, such as parentheses or brackets).
+func (a *FieldReferenceNode) IsExtension() bool {
+ return a.Open != nil && a.Slash == nil
+}
+
+// IsExtension reports if this is an extension name or not (e.g. enclosed in
+// punctuation, such as parentheses or brackets).
+func (a *FieldReferenceNode) IsAnyTypeReference() bool {
+ return a.Slash != nil
+}
+
+func (a *FieldReferenceNode) Value() string {
+ if a.Open != nil {
+ if a.Slash != nil {
+ return string(a.Open.Rune) + string(a.UrlPrefix.AsIdentifier()) + string(a.Slash.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune)
+ }
+ return string(a.Open.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune)
+ } else {
+ return string(a.Name.AsIdentifier())
+ }
+}
+
+// CompactOptionsNode represents a compact options declaration, as used with
+// fields, enum values, and extension ranges. Example:
+//
+// [deprecated = true, json_name = "foo_bar"]
+type CompactOptionsNode struct {
+ compositeNode
+ OpenBracket *RuneNode
+ Options []*OptionNode
+ // Commas represent the separating ',' characters between options. The
+ // length of this slice must be exactly len(Options)-1, with each item
+ // in Options having a corresponding item in this slice *except the last*
+ // (since a trailing comma is not allowed).
+ Commas []*RuneNode
+ CloseBracket *RuneNode
+}
+
+// NewCompactOptionsNode creates a *CompactOptionsNode. All args must be
+// non-nil. The commas arg must have a length that is one less than the
+// length of opts. The opts arg must not be empty.
+func NewCompactOptionsNode(openBracket *RuneNode, opts []*OptionNode, commas []*RuneNode, closeBracket *RuneNode) *CompactOptionsNode {
+ if openBracket == nil {
+ panic("openBracket is nil")
+ }
+ if closeBracket == nil {
+ panic("closeBracket is nil")
+ }
+ if len(opts) == 0 {
+ panic("must have at least one part")
+ }
+ if len(commas) != len(opts)-1 {
+ panic(fmt.Sprintf("%d opts requires %d commas, not %d", len(opts), len(opts)-1, len(commas)))
+ }
+ children := make([]Node, 0, len(opts)*2+1)
+ children = append(children, openBracket)
+ for i, opt := range opts {
+ if i > 0 {
+ if commas[i-1] == nil {
+ panic(fmt.Sprintf("commas[%d] is nil", i-1))
+ }
+ children = append(children, commas[i-1])
+ }
+ if opt == nil {
+ panic(fmt.Sprintf("opts[%d] is nil", i))
+ }
+ children = append(children, opt)
+ }
+ children = append(children, closeBracket)
+
+ return &CompactOptionsNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ OpenBracket: openBracket,
+ Options: opts,
+ Commas: commas,
+ CloseBracket: closeBracket,
+ }
+}
+
+func (e *CompactOptionsNode) GetElements() []*OptionNode {
+ if e == nil {
+ return nil
+ }
+ return e.Options
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go
new file mode 100644
index 0000000..271200c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/print.go
@@ -0,0 +1,86 @@
+package ast
+
+import "io"
+
+// Print prints the given AST node to the given output. This operation
+// basically walks the AST and, for each TerminalNode, prints the node's
+// leading comments, leading whitespace, the node's raw text, and then
+// any trailing comments. If the given node is a *FileNode, it will then
+// also print the file's FinalComments and FinalWhitespace.
+func Print(w io.Writer, node Node) error {
+ sw, ok := w.(stringWriter)
+ if !ok {
+ sw = &strWriter{w}
+ }
+ var err error
+ Walk(node, func(n Node) (bool, VisitFunc) {
+ if err != nil {
+ return false, nil
+ }
+ token, ok := n.(TerminalNode)
+ if !ok {
+ return true, nil
+ }
+
+ err = printComments(sw, token.LeadingComments())
+ if err != nil {
+ return false, nil
+ }
+
+ _, err = sw.WriteString(token.LeadingWhitespace())
+ if err != nil {
+ return false, nil
+ }
+
+ _, err = sw.WriteString(token.RawText())
+ if err != nil {
+ return false, nil
+ }
+
+ err = printComments(sw, token.TrailingComments())
+ return false, nil
+ })
+ if err != nil {
+ return err
+ }
+
+ if file, ok := node.(*FileNode); ok {
+ err = printComments(sw, file.FinalComments)
+ if err != nil {
+ return err
+ }
+ _, err = sw.WriteString(file.FinalWhitespace)
+ return err
+ }
+
+ return nil
+}
+
+func printComments(sw stringWriter, comments []Comment) error {
+ for _, comment := range comments {
+ if _, err := sw.WriteString(comment.LeadingWhitespace); err != nil {
+ return err
+ }
+ if _, err := sw.WriteString(comment.Text); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// many io.Writer impls also provide a string-based method
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// adapter, in case the given writer does NOT provide a string-based method
+type strWriter struct {
+ io.Writer
+}
+
+func (s *strWriter) WriteString(str string) (int, error) {
+ if str == "" {
+ return 0, nil
+ }
+ return s.Write([]byte(str))
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go
new file mode 100644
index 0000000..cdd78ba
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/ranges.go
@@ -0,0 +1,305 @@
+package ast
+
+import "fmt"
+
+// ExtensionRangeNode represents an extension range declaration in an extendable
+// message. Example:
+//
+// extensions 100 to max;
+type ExtensionRangeNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Ranges []*RangeNode
+ // Commas represent the separating ',' characters between ranges. The
+ // length of this slice must be exactly len(Ranges)-1, each item in Ranges
+ // having a corresponding item in this slice *except the last* (since a
+ // trailing comma is not allowed).
+ Commas []*RuneNode
+ Options *CompactOptionsNode
+ Semicolon *RuneNode
+}
+
+func (e *ExtensionRangeNode) msgElement() {}
+
+// NewExtensionRangeNode creates a new *ExtensionRangeNode. All args must be
+// non-nil except opts, which may be nil.
+// - keyword: The token corresponding to the "extends" keyword.
+// - ranges: One or more range expressions.
+// - commas: Tokens that represent the "," runes that delimit the range expressions.
+// The length of commas must be one less than the length of ranges.
+// - opts: The node corresponding to options that apply to each of the ranges.
+// - semicolon The token corresponding to the ";" rune that ends the declaration.
+func NewExtensionRangeNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, opts *CompactOptionsNode, semicolon *RuneNode) *ExtensionRangeNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ if len(ranges) == 0 {
+ panic("must have at least one range")
+ }
+ if len(commas) != len(ranges)-1 {
+ panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas)))
+ }
+ numChildren := len(ranges)*2 + 1
+ if opts != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ children = append(children, keyword)
+ for i, rng := range ranges {
+ if i > 0 {
+ if commas[i-1] == nil {
+ panic(fmt.Sprintf("commas[%d] is nil", i-1))
+ }
+ children = append(children, commas[i-1])
+ }
+ if rng == nil {
+ panic(fmt.Sprintf("ranges[%d] is nil", i))
+ }
+ children = append(children, rng)
+ }
+ if opts != nil {
+ children = append(children, opts)
+ }
+ children = append(children, semicolon)
+ return &ExtensionRangeNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Ranges: ranges,
+ Commas: commas,
+ Options: opts,
+ Semicolon: semicolon,
+ }
+}
+
+// RangeDeclNode is a placeholder interface for AST nodes that represent
+// numeric values. This allows NoSourceNode to be used in place of *RangeNode
+// for some usages.
+type RangeDeclNode interface {
+ Node
+ RangeStart() Node
+ RangeEnd() Node
+}
+
+var _ RangeDeclNode = (*RangeNode)(nil)
+var _ RangeDeclNode = NoSourceNode{}
+
+// RangeNode represents a range expression, used in both extension ranges and
+// reserved ranges. Example:
+//
+// 1000 to max
+type RangeNode struct {
+ compositeNode
+ StartVal IntValueNode
+ // if To is non-nil, then exactly one of EndVal or Max must also be non-nil
+ To *KeywordNode
+ // EndVal and Max are mutually exclusive
+ EndVal IntValueNode
+ Max *KeywordNode
+}
+
+// NewRangeNode creates a new *RangeNode. The start argument must be non-nil.
+// The to argument represents the "to" keyword. If present (i.e. if it is non-nil),
+// then so must be exactly one of end or max. If max is non-nil, it indicates a
+// "100 to max" style range. But if end is non-nil, the end of the range is a
+// literal, such as "100 to 200".
+func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, max *KeywordNode) *RangeNode {
+ if start == nil {
+ panic("start is nil")
+ }
+ numChildren := 1
+ if to != nil {
+ if end == nil && max == nil {
+ panic("to is not nil, but end and max both are")
+ }
+ if end != nil && max != nil {
+ panic("end and max cannot be both non-nil")
+ }
+ numChildren = 3
+ } else {
+ if end != nil {
+ panic("to is nil, but end is not")
+ }
+ if max != nil {
+ panic("to is nil, but max is not")
+ }
+ }
+ children := make([]Node, 0, numChildren)
+ children = append(children, start)
+ if to != nil {
+ children = append(children, to)
+ if end != nil {
+ children = append(children, end)
+ } else {
+ children = append(children, max)
+ }
+ }
+ return &RangeNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ StartVal: start,
+ To: to,
+ EndVal: end,
+ Max: max,
+ }
+}
+
+func (n *RangeNode) RangeStart() Node {
+ return n.StartVal
+}
+
+func (n *RangeNode) RangeEnd() Node {
+ if n.Max != nil {
+ return n.Max
+ }
+ if n.EndVal != nil {
+ return n.EndVal
+ }
+ return n.StartVal
+}
+
+func (n *RangeNode) StartValue() interface{} {
+ return n.StartVal.Value()
+}
+
+func (n *RangeNode) StartValueAsInt32(min, max int32) (int32, bool) {
+ return AsInt32(n.StartVal, min, max)
+}
+
+func (n *RangeNode) EndValue() interface{} {
+ if n.EndVal == nil {
+ return nil
+ }
+ return n.EndVal.Value()
+}
+
+func (n *RangeNode) EndValueAsInt32(min, max int32) (int32, bool) {
+ if n.Max != nil {
+ return max, true
+ }
+ if n.EndVal == nil {
+ return n.StartValueAsInt32(min, max)
+ }
+ return AsInt32(n.EndVal, min, max)
+}
+
+// ReservedNode represents reserved declaration, which can be used to reserve
+// either names or numbers. Examples:
+//
+// reserved 1, 10-12, 15;
+// reserved "foo", "bar", "baz";
+type ReservedNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ // If non-empty, this node represents reserved ranges and Names will be empty.
+ Ranges []*RangeNode
+ // If non-empty, this node represents reserved names and Ranges will be empty.
+ Names []StringValueNode
+ // Commas represent the separating ',' characters between options. The
+ // length of this slice must be exactly len(Ranges)-1 or len(Names)-1, depending
+ // on whether this node represents reserved ranges or reserved names. Each item
+ // in Ranges or Names has a corresponding item in this slice *except the last*
+ // (since a trailing comma is not allowed).
+ Commas []*RuneNode
+ Semicolon *RuneNode
+}
+
+func (*ReservedNode) msgElement() {}
+func (*ReservedNode) enumElement() {}
+
+// NewReservedRangesNode creates a new *ReservedNode that represents reserved
+// numeric ranges. All args must be non-nil.
+// - keyword: The token corresponding to the "reserved" keyword.
+// - ranges: One or more range expressions.
+// - commas: Tokens that represent the "," runes that delimit the range expressions.
+// The length of commas must be one less than the length of ranges.
+// - semicolon The token corresponding to the ";" rune that ends the declaration.
+func NewReservedRangesNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ if len(ranges) == 0 {
+ panic("must have at least one range")
+ }
+ if len(commas) != len(ranges)-1 {
+ panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas)))
+ }
+ children := make([]Node, 0, len(ranges)*2+1)
+ children = append(children, keyword)
+ for i, rng := range ranges {
+ if i > 0 {
+ if commas[i-1] == nil {
+ panic(fmt.Sprintf("commas[%d] is nil", i-1))
+ }
+ children = append(children, commas[i-1])
+ }
+ if rng == nil {
+ panic(fmt.Sprintf("ranges[%d] is nil", i))
+ }
+ children = append(children, rng)
+ }
+ children = append(children, semicolon)
+ return &ReservedNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Ranges: ranges,
+ Commas: commas,
+ Semicolon: semicolon,
+ }
+}
+
+// NewReservedNamesNode creates a new *ReservedNode that represents reserved
+// names. All args must be non-nil.
+// - keyword: The token corresponding to the "reserved" keyword.
+// - names: One or more names.
+// - commas: Tokens that represent the "," runes that delimit the names.
+// The length of commas must be one less than the length of names.
+// - semicolon The token corresponding to the ";" rune that ends the declaration.
+func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ if len(names) == 0 {
+ panic("must have at least one name")
+ }
+ if len(commas) != len(names)-1 {
+ panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas)))
+ }
+ children := make([]Node, 0, len(names)*2+1)
+ children = append(children, keyword)
+ for i, name := range names {
+ if i > 0 {
+ if commas[i-1] == nil {
+ panic(fmt.Sprintf("commas[%d] is nil", i-1))
+ }
+ children = append(children, commas[i-1])
+ }
+ if name == nil {
+ panic(fmt.Sprintf("names[%d] is nil", i))
+ }
+ children = append(children, name)
+ }
+ children = append(children, semicolon)
+ return &ReservedNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Names: names,
+ Commas: commas,
+ Semicolon: semicolon,
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go
new file mode 100644
index 0000000..739b29c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/service.go
@@ -0,0 +1,273 @@
+package ast
+
+import "fmt"
+
+// ServiceNode represents a service declaration. Example:
+//
+// service Foo {
+// rpc Bar (Baz) returns (Bob);
+// rpc Frobnitz (stream Parts) returns (Gyzmeaux);
+// }
+type ServiceNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Name *IdentNode
+ OpenBrace *RuneNode
+ Decls []ServiceElement
+ CloseBrace *RuneNode
+}
+
+func (*ServiceNode) fileElement() {}
+
+// NewServiceNode creates a new *ServiceNode. All arguments must be non-nil.
+// - keyword: The token corresponding to the "service" keyword.
+// - name: The token corresponding to the service's name.
+// - openBrace: The token corresponding to the "{" rune that starts the body.
+// - decls: All declarations inside the service body.
+// - closeBrace: The token corresponding to the "}" rune that ends the body.
+func NewServiceNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []ServiceElement, closeBrace *RuneNode) *ServiceNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if openBrace == nil {
+ panic("openBrace is nil")
+ }
+ if closeBrace == nil {
+ panic("closeBrace is nil")
+ }
+ children := make([]Node, 0, 4+len(decls))
+ children = append(children, keyword, name, openBrace)
+ for _, decl := range decls {
+ children = append(children, decl)
+ }
+ children = append(children, closeBrace)
+
+ for _, decl := range decls {
+ switch decl := decl.(type) {
+ case *OptionNode, *RPCNode, *EmptyDeclNode:
+ default:
+ panic(fmt.Sprintf("invalid ServiceElement type: %T", decl))
+ }
+ }
+
+ return &ServiceNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Name: name,
+ OpenBrace: openBrace,
+ Decls: decls,
+ CloseBrace: closeBrace,
+ }
+}
+
+// ServiceElement is an interface implemented by all AST nodes that can
+// appear in the body of a service declaration.
+type ServiceElement interface {
+ Node
+ serviceElement()
+}
+
+var _ ServiceElement = (*OptionNode)(nil)
+var _ ServiceElement = (*RPCNode)(nil)
+var _ ServiceElement = (*EmptyDeclNode)(nil)
+
+// RPCDeclNode is a placeholder interface for AST nodes that represent RPC
+// declarations. This allows NoSourceNode to be used in place of *RPCNode
+// for some usages.
+type RPCDeclNode interface {
+ Node
+ GetInputType() Node
+ GetOutputType() Node
+}
+
+var _ RPCDeclNode = (*RPCNode)(nil)
+var _ RPCDeclNode = NoSourceNode{}
+
+// RPCNode represents an RPC declaration. Example:
+//
+// rpc Foo (Bar) returns (Baz);
+type RPCNode struct {
+ compositeNode
+ Keyword *KeywordNode
+ Name *IdentNode
+ Input *RPCTypeNode
+ Returns *KeywordNode
+ Output *RPCTypeNode
+ Semicolon *RuneNode
+ OpenBrace *RuneNode
+ Decls []RPCElement
+ CloseBrace *RuneNode
+}
+
+func (n *RPCNode) serviceElement() {}
+
+// NewRPCNode creates a new *RPCNode with no body. All arguments must be non-nil.
+// - keyword: The token corresponding to the "rpc" keyword.
+// - name: The token corresponding to the RPC's name.
+// - input: The token corresponding to the RPC input message type.
+// - returns: The token corresponding to the "returns" keyword that precedes the output type.
+// - output: The token corresponding to the RPC output message type.
+// - semicolon: The token corresponding to the ";" rune that ends the declaration.
+func NewRPCNode(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, semicolon *RuneNode) *RPCNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if input == nil {
+ panic("input is nil")
+ }
+ if returns == nil {
+ panic("returns is nil")
+ }
+ if output == nil {
+ panic("output is nil")
+ }
+ if semicolon == nil {
+ panic("semicolon is nil")
+ }
+ children := []Node{keyword, name, input, returns, output, semicolon}
+ return &RPCNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Name: name,
+ Input: input,
+ Returns: returns,
+ Output: output,
+ Semicolon: semicolon,
+ }
+}
+
+// NewRPCNodeWithBody creates a new *RPCNode that includes a body (and possibly
+// options). All arguments must be non-nil.
+// - keyword: The token corresponding to the "rpc" keyword.
+// - name: The token corresponding to the RPC's name.
+// - input: The token corresponding to the RPC input message type.
+// - returns: The token corresponding to the "returns" keyword that precedes the output type.
+// - output: The token corresponding to the RPC output message type.
+// - openBrace: The token corresponding to the "{" rune that starts the body.
+// - decls: All declarations inside the RPC body.
+// - closeBrace: The token corresponding to the "}" rune that ends the body.
+func NewRPCNodeWithBody(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, openBrace *RuneNode, decls []RPCElement, closeBrace *RuneNode) *RPCNode {
+ if keyword == nil {
+ panic("keyword is nil")
+ }
+ if name == nil {
+ panic("name is nil")
+ }
+ if input == nil {
+ panic("input is nil")
+ }
+ if returns == nil {
+ panic("returns is nil")
+ }
+ if output == nil {
+ panic("output is nil")
+ }
+ if openBrace == nil {
+ panic("openBrace is nil")
+ }
+ if closeBrace == nil {
+ panic("closeBrace is nil")
+ }
+ children := make([]Node, 0, 7+len(decls))
+ children = append(children, keyword, name, input, returns, output, openBrace)
+ for _, decl := range decls {
+ children = append(children, decl)
+ }
+ children = append(children, closeBrace)
+
+ for _, decl := range decls {
+ switch decl := decl.(type) {
+ case *OptionNode, *EmptyDeclNode:
+ default:
+ panic(fmt.Sprintf("invalid RPCElement type: %T", decl))
+ }
+ }
+
+ return &RPCNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Keyword: keyword,
+ Name: name,
+ Input: input,
+ Returns: returns,
+ Output: output,
+ OpenBrace: openBrace,
+ Decls: decls,
+ CloseBrace: closeBrace,
+ }
+}
+
+func (n *RPCNode) GetInputType() Node {
+ return n.Input.MessageType
+}
+
+func (n *RPCNode) GetOutputType() Node {
+ return n.Output.MessageType
+}
+
+// RPCElement is an interface implemented by all AST nodes that can
+// appear in the body of an rpc declaration (aka method).
+type RPCElement interface {
+ Node
+ methodElement()
+}
+
+var _ RPCElement = (*OptionNode)(nil)
+var _ RPCElement = (*EmptyDeclNode)(nil)
+
+// RPCTypeNode represents the declaration of a request or response type for an
+// RPC. Example:
+//
+// (stream foo.Bar)
+type RPCTypeNode struct {
+ compositeNode
+ OpenParen *RuneNode
+ Stream *KeywordNode
+ MessageType IdentValueNode
+ CloseParen *RuneNode
+}
+
+// NewRPCTypeNode creates a new *RPCTypeNode. All arguments must be non-nil
+// except stream, which may be nil.
+// - openParen: The token corresponding to the "(" rune that starts the declaration.
+// - stream: The token corresponding to the "stream" keyword or nil if not present.
+// - msgType: The token corresponding to the message type's name.
+// - closeParen: The token corresponding to the ")" rune that ends the declaration.
+func NewRPCTypeNode(openParen *RuneNode, stream *KeywordNode, msgType IdentValueNode, closeParen *RuneNode) *RPCTypeNode {
+ if openParen == nil {
+ panic("openParen is nil")
+ }
+ if msgType == nil {
+ panic("msgType is nil")
+ }
+ if closeParen == nil {
+ panic("closeParen is nil")
+ }
+ var children []Node
+ if stream != nil {
+ children = []Node{openParen, stream, msgType, closeParen}
+ } else {
+ children = []Node{openParen, msgType, closeParen}
+ }
+
+ return &RPCTypeNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ OpenParen: openParen,
+ Stream: stream,
+ MessageType: msgType,
+ CloseParen: closeParen,
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go
new file mode 100644
index 0000000..8ab09c6
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/source_pos.go
@@ -0,0 +1,29 @@
+package ast
+
+import (
+ "github.com/bufbuild/protocompile/ast"
+)
+
+// SourcePos identifies a location in a proto source file.
+type SourcePos = ast.SourcePos
+
+// PosRange is a range of positions in a source file that indicates
+// the span of some region of source, such as a single token or
+// a sub-tree of the AST.
+type PosRange struct {
+ Start, End SourcePos
+}
+
+// Comment represents a single comment in a source file. It indicates
+// the position of the comment and its contents.
+type Comment struct {
+ // The location of the comment in the source file.
+ PosRange
+ // Any whitespace between the prior lexical element (either a token
+ // or other comment) and this comment.
+ LeadingWhitespace string
+ // The text of the comment, including any "//" or "/*" and "*/"
+ // symbols at the start and end. Single-line comments will include
+ // the trailing newline rune in Text.
+ Text string
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go
new file mode 100644
index 0000000..91f5a35
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/values.go
@@ -0,0 +1,575 @@
+package ast
+
+import (
+ "fmt"
+ "math"
+ "strings"
+)
+
+// ValueNode is an AST node that represents a literal value.
+//
+// It also includes references (e.g. IdentifierValueNode), which can be
+// used as values in some contexts, such as describing the default value
+// for a field, which can refer to an enum value.
+//
+// This also allows NoSourceNode to be used in place of a real value node
+// for some usages.
+type ValueNode interface {
+ Node
+ // Value returns a Go representation of the value. For scalars, this
+ // will be a string, int64, uint64, float64, or bool. This could also
+ // be an Identifier (e.g. IdentValueNodes). It can also be a composite
+ // literal:
+ // * For array literals, the type returned will be []ValueNode
+ // * For message literals, the type returned will be []*MessageFieldNode
+ Value() interface{}
+}
+
+var _ ValueNode = (*IdentNode)(nil)
+var _ ValueNode = (*CompoundIdentNode)(nil)
+var _ ValueNode = (*StringLiteralNode)(nil)
+var _ ValueNode = (*CompoundStringLiteralNode)(nil)
+var _ ValueNode = (*UintLiteralNode)(nil)
+var _ ValueNode = (*PositiveUintLiteralNode)(nil)
+var _ ValueNode = (*NegativeIntLiteralNode)(nil)
+var _ ValueNode = (*FloatLiteralNode)(nil)
+var _ ValueNode = (*SpecialFloatLiteralNode)(nil)
+var _ ValueNode = (*SignedFloatLiteralNode)(nil)
+var _ ValueNode = (*BoolLiteralNode)(nil)
+var _ ValueNode = (*ArrayLiteralNode)(nil)
+var _ ValueNode = (*MessageLiteralNode)(nil)
+var _ ValueNode = NoSourceNode{}
+
+// StringValueNode is an AST node that represents a string literal.
+// Such a node can be a single literal (*StringLiteralNode) or a
+// concatenation of multiple literals (*CompoundStringLiteralNode).
+type StringValueNode interface {
+ ValueNode
+ AsString() string
+}
+
+var _ StringValueNode = (*StringLiteralNode)(nil)
+var _ StringValueNode = (*CompoundStringLiteralNode)(nil)
+
+// StringLiteralNode represents a simple string literal. Example:
+//
+// "proto2"
+type StringLiteralNode struct {
+ terminalNode
+ // Val is the actual string value that the literal indicates.
+ Val string
+}
+
+// NewStringLiteralNode creates a new *StringLiteralNode with the given val.
+func NewStringLiteralNode(val string, info TokenInfo) *StringLiteralNode {
+ return &StringLiteralNode{
+ terminalNode: info.asTerminalNode(),
+ Val: val,
+ }
+}
+
+func (n *StringLiteralNode) Value() interface{} {
+ return n.AsString()
+}
+
+func (n *StringLiteralNode) AsString() string {
+ return n.Val
+}
+
+// CompoundStringLiteralNode represents a compound string literal, which is
+// the concatenaton of adjacent string literals. Example:
+//
+// "this " "is" " all one " "string"
+type CompoundStringLiteralNode struct {
+ compositeNode
+ Val string
+}
+
+// NewCompoundLiteralStringNode creates a new *CompoundStringLiteralNode that
+// consists of the given string components. The components argument may not be
+// empty.
+func NewCompoundLiteralStringNode(components ...*StringLiteralNode) *CompoundStringLiteralNode {
+ if len(components) == 0 {
+ panic("must have at least one component")
+ }
+ children := make([]Node, len(components))
+ var b strings.Builder
+ for i, comp := range components {
+ children[i] = comp
+ b.WriteString(comp.Val)
+ }
+ return &CompoundStringLiteralNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Val: b.String(),
+ }
+}
+
+func (n *CompoundStringLiteralNode) Value() interface{} {
+ return n.AsString()
+}
+
+func (n *CompoundStringLiteralNode) AsString() string {
+ return n.Val
+}
+
+// IntValueNode is an AST node that represents an integer literal. If
+// an integer literal is too large for an int64 (or uint64 for
+// positive literals), it is represented instead by a FloatValueNode.
+type IntValueNode interface {
+ ValueNode
+ AsInt64() (int64, bool)
+ AsUint64() (uint64, bool)
+}
+
+// AsInt32 range checks the given int value and returns its value is
+// in the range or 0, false if it is outside the range.
+func AsInt32(n IntValueNode, min, max int32) (int32, bool) {
+ i, ok := n.AsInt64()
+ if !ok {
+ return 0, false
+ }
+ if i < int64(min) || i > int64(max) {
+ return 0, false
+ }
+ return int32(i), true
+}
+
+var _ IntValueNode = (*UintLiteralNode)(nil)
+var _ IntValueNode = (*PositiveUintLiteralNode)(nil)
+var _ IntValueNode = (*NegativeIntLiteralNode)(nil)
+
+// UintLiteralNode represents a simple integer literal with no sign character.
+type UintLiteralNode struct {
+ terminalNode
+ // Val is the numeric value indicated by the literal
+ Val uint64
+}
+
+// NewUintLiteralNode creates a new *UintLiteralNode with the given val.
+func NewUintLiteralNode(val uint64, info TokenInfo) *UintLiteralNode {
+ return &UintLiteralNode{
+ terminalNode: info.asTerminalNode(),
+ Val: val,
+ }
+}
+
+func (n *UintLiteralNode) Value() interface{} {
+ return n.Val
+}
+
+func (n *UintLiteralNode) AsInt64() (int64, bool) {
+ if n.Val > math.MaxInt64 {
+ return 0, false
+ }
+ return int64(n.Val), true
+}
+
+func (n *UintLiteralNode) AsUint64() (uint64, bool) {
+ return n.Val, true
+}
+
+func (n *UintLiteralNode) AsFloat() float64 {
+ return float64(n.Val)
+}
+
+// PositiveUintLiteralNode represents an integer literal with a positive (+) sign.
+//
+// Deprecated: A valid AST will not contain a node of this type. The Protobuf
+// language does not actually allow a numeric literal to have a leading "+"
+// positive sign.
+type PositiveUintLiteralNode struct {
+ compositeNode
+ Plus *RuneNode
+ Uint *UintLiteralNode
+ Val uint64
+}
+
+// NewPositiveUintLiteralNode creates a new *PositiveUintLiteralNode. Both
+// arguments must be non-nil.
+//
+// Deprecated: The ast.PositiveUintLiteralNode node type should not be used.
+func NewPositiveUintLiteralNode(sign *RuneNode, i *UintLiteralNode) *PositiveUintLiteralNode {
+ if sign == nil {
+ panic("sign is nil")
+ }
+ if i == nil {
+ panic("i is nil")
+ }
+ children := []Node{sign, i}
+ return &PositiveUintLiteralNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Plus: sign,
+ Uint: i,
+ Val: i.Val,
+ }
+}
+
+func (n *PositiveUintLiteralNode) Value() interface{} {
+ return n.Val
+}
+
+func (n *PositiveUintLiteralNode) AsInt64() (int64, bool) {
+ if n.Val > math.MaxInt64 {
+ return 0, false
+ }
+ return int64(n.Val), true
+}
+
+func (n *PositiveUintLiteralNode) AsUint64() (uint64, bool) {
+ return n.Val, true
+}
+
+// NegativeIntLiteralNode represents an integer literal with a negative (-) sign.
+type NegativeIntLiteralNode struct {
+ compositeNode
+ Minus *RuneNode
+ Uint *UintLiteralNode
+ Val int64
+}
+
+// NewNegativeIntLiteralNode creates a new *NegativeIntLiteralNode. Both
+// arguments must be non-nil.
+func NewNegativeIntLiteralNode(sign *RuneNode, i *UintLiteralNode) *NegativeIntLiteralNode {
+ if sign == nil {
+ panic("sign is nil")
+ }
+ if i == nil {
+ panic("i is nil")
+ }
+ children := []Node{sign, i}
+ return &NegativeIntLiteralNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Minus: sign,
+ Uint: i,
+ Val: -int64(i.Val),
+ }
+}
+
+func (n *NegativeIntLiteralNode) Value() interface{} {
+ return n.Val
+}
+
+func (n *NegativeIntLiteralNode) AsInt64() (int64, bool) {
+ return n.Val, true
+}
+
+func (n *NegativeIntLiteralNode) AsUint64() (uint64, bool) {
+ if n.Val < 0 {
+ return 0, false
+ }
+ return uint64(n.Val), true
+}
+
+// FloatValueNode is an AST node that represents a numeric literal with
+// a floating point, in scientific notation, or too large to fit in an
+// int64 or uint64.
+type FloatValueNode interface {
+ ValueNode
+ AsFloat() float64
+}
+
+var _ FloatValueNode = (*FloatLiteralNode)(nil)
+var _ FloatValueNode = (*SpecialFloatLiteralNode)(nil)
+var _ FloatValueNode = (*UintLiteralNode)(nil)
+
+// FloatLiteralNode represents a floating point numeric literal.
+type FloatLiteralNode struct {
+ terminalNode
+ // Val is the numeric value indicated by the literal
+ Val float64
+}
+
+// NewFloatLiteralNode creates a new *FloatLiteralNode with the given val.
+func NewFloatLiteralNode(val float64, info TokenInfo) *FloatLiteralNode {
+ return &FloatLiteralNode{
+ terminalNode: info.asTerminalNode(),
+ Val: val,
+ }
+}
+
+func (n *FloatLiteralNode) Value() interface{} {
+ return n.AsFloat()
+}
+
+func (n *FloatLiteralNode) AsFloat() float64 {
+ return n.Val
+}
+
+// SpecialFloatLiteralNode represents a special floating point numeric literal
+// for "inf" and "nan" values.
+type SpecialFloatLiteralNode struct {
+ *KeywordNode
+ Val float64
+}
+
+// NewSpecialFloatLiteralNode returns a new *SpecialFloatLiteralNode for the
+// given keyword, which must be "inf" or "nan".
+func NewSpecialFloatLiteralNode(name *KeywordNode) *SpecialFloatLiteralNode {
+ var f float64
+ if name.Val == "inf" {
+ f = math.Inf(1)
+ } else {
+ f = math.NaN()
+ }
+ return &SpecialFloatLiteralNode{
+ KeywordNode: name,
+ Val: f,
+ }
+}
+
+func (n *SpecialFloatLiteralNode) Value() interface{} {
+ return n.AsFloat()
+}
+
+func (n *SpecialFloatLiteralNode) AsFloat() float64 {
+ return n.Val
+}
+
+// SignedFloatLiteralNode represents a signed floating point number.
+type SignedFloatLiteralNode struct {
+ compositeNode
+ Sign *RuneNode
+ Float FloatValueNode
+ Val float64
+}
+
+// NewSignedFloatLiteralNode creates a new *SignedFloatLiteralNode. Both
+// arguments must be non-nil.
+func NewSignedFloatLiteralNode(sign *RuneNode, f FloatValueNode) *SignedFloatLiteralNode {
+ if sign == nil {
+ panic("sign is nil")
+ }
+ if f == nil {
+ panic("f is nil")
+ }
+ children := []Node{sign, f}
+ val := f.AsFloat()
+ if sign.Rune == '-' {
+ val = -val
+ }
+ return &SignedFloatLiteralNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Sign: sign,
+ Float: f,
+ Val: val,
+ }
+}
+
+func (n *SignedFloatLiteralNode) Value() interface{} {
+ return n.Val
+}
+
+func (n *SignedFloatLiteralNode) AsFloat() float64 {
+ return n.Val
+}
+
+// BoolLiteralNode represents a boolean literal.
+//
+// Deprecated: The AST uses IdentNode for boolean literals, where the
+// identifier value is "true" or "false". This is required because an
+// identifier "true" is not necessarily a boolean value as it could also
+// be an enum value named "true" (ditto for "false").
+type BoolLiteralNode struct {
+ *KeywordNode
+ Val bool
+}
+
+// NewBoolLiteralNode returns a new *BoolLiteralNode for the given keyword,
+// which must be "true" or "false".
+func NewBoolLiteralNode(name *KeywordNode) *BoolLiteralNode {
+ return &BoolLiteralNode{
+ KeywordNode: name,
+ Val: name.Val == "true",
+ }
+}
+
+func (n *BoolLiteralNode) Value() interface{} {
+ return n.Val
+}
+
+// ArrayLiteralNode represents an array literal, which is only allowed inside of
+// a MessageLiteralNode, to indicate values for a repeated field. Example:
+//
+// ["foo", "bar", "baz"]
+type ArrayLiteralNode struct {
+ compositeNode
+ OpenBracket *RuneNode
+ Elements []ValueNode
+ // Commas represent the separating ',' characters between elements. The
+ // length of this slice must be exactly len(Elements)-1, with each item
+ // in Elements having a corresponding item in this slice *except the last*
+ // (since a trailing comma is not allowed).
+ Commas []*RuneNode
+ CloseBracket *RuneNode
+}
+
+// NewArrayLiteralNode creates a new *ArrayLiteralNode. The openBracket and
+// closeBracket args must be non-nil and represent the "[" and "]" runes that
+// surround the array values. The given commas arg must have a length that is
+// one less than the length of the vals arg. However, vals may be empty, in
+// which case commas must also be empty.
+func NewArrayLiteralNode(openBracket *RuneNode, vals []ValueNode, commas []*RuneNode, closeBracket *RuneNode) *ArrayLiteralNode {
+ if openBracket == nil {
+ panic("openBracket is nil")
+ }
+ if closeBracket == nil {
+ panic("closeBracket is nil")
+ }
+ if len(vals) == 0 && len(commas) != 0 {
+ panic("vals is empty but commas is not")
+ }
+ if len(vals) > 0 && len(commas) != len(vals)-1 {
+ panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals)-1, len(commas)))
+ }
+ children := make([]Node, 0, len(vals)*2+1)
+ children = append(children, openBracket)
+ for i, val := range vals {
+ if i > 0 {
+ if commas[i-1] == nil {
+ panic(fmt.Sprintf("commas[%d] is nil", i-1))
+ }
+ children = append(children, commas[i-1])
+ }
+ if val == nil {
+ panic(fmt.Sprintf("vals[%d] is nil", i))
+ }
+ children = append(children, val)
+ }
+ children = append(children, closeBracket)
+
+ return &ArrayLiteralNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ OpenBracket: openBracket,
+ Elements: vals,
+ Commas: commas,
+ CloseBracket: closeBracket,
+ }
+}
+
+func (n *ArrayLiteralNode) Value() interface{} {
+ return n.Elements
+}
+
+// MessageLiteralNode represents a message literal, which is compatible with the
+// protobuf text format and can be used for custom options with message types.
+// Example:
+//
+// { foo:1 foo:2 foo:3 bar:<name:"abc" id:123> }
+type MessageLiteralNode struct {
+ compositeNode
+ Open *RuneNode // should be '{' or '<'
+ Elements []*MessageFieldNode
+ // Separator characters between elements, which can be either ','
+ // or ';' if present. This slice must be exactly len(Elements) in
+ // length, with each item in Elements having one corresponding item
+ // in Seps. Separators in message literals are optional, so a given
+ // item in this slice may be nil to indicate absence of a separator.
+ Seps []*RuneNode
+ Close *RuneNode // should be '}' or '>', depending on Open
+}
+
+// NewMessageLiteralNode creates a new *MessageLiteralNode. The openSym and
+// closeSym runes must not be nil and should be "{" and "}" or "<" and ">".
+//
+// Unlike separators (dots and commas) used for other AST nodes that represent
+// a list of elements, the seps arg must be the SAME length as vals, and it may
+// contain nil values to indicate absence of a separator (in fact, it could be
+// all nils).
+func NewMessageLiteralNode(openSym *RuneNode, vals []*MessageFieldNode, seps []*RuneNode, closeSym *RuneNode) *MessageLiteralNode {
+ if openSym == nil {
+ panic("openSym is nil")
+ }
+ if closeSym == nil {
+ panic("closeSym is nil")
+ }
+ if len(seps) != len(vals) {
+ panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals), len(seps)))
+ }
+ numChildren := len(vals) + 2
+ for _, sep := range seps {
+ if sep != nil {
+ numChildren++
+ }
+ }
+ children := make([]Node, 0, numChildren)
+ children = append(children, openSym)
+ for i, val := range vals {
+ if val == nil {
+ panic(fmt.Sprintf("vals[%d] is nil", i))
+ }
+ children = append(children, val)
+ if seps[i] != nil {
+ children = append(children, seps[i])
+ }
+ }
+ children = append(children, closeSym)
+
+ return &MessageLiteralNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Open: openSym,
+ Elements: vals,
+ Seps: seps,
+ Close: closeSym,
+ }
+}
+
+func (n *MessageLiteralNode) Value() interface{} {
+ return n.Elements
+}
+
+// MessageFieldNode represents a single field (name and value) inside of a
+// message literal. Example:
+//
+// foo:"bar"
+type MessageFieldNode struct {
+ compositeNode
+ Name *FieldReferenceNode
+ // Sep represents the ':' separator between the name and value. If
+ // the value is a message literal (and thus starts with '<' or '{')
+ // or an array literal (starting with '[') then the separator is
+ // optional, and thus may be nil.
+ Sep *RuneNode
+ Val ValueNode
+}
+
+// NewMessageFieldNode creates a new *MessageFieldNode. All args except sep
+// must be non-nil.
+func NewMessageFieldNode(name *FieldReferenceNode, sep *RuneNode, val ValueNode) *MessageFieldNode {
+ if name == nil {
+ panic("name is nil")
+ }
+ if val == nil {
+ panic("val is nil")
+ }
+ numChildren := 2
+ if sep != nil {
+ numChildren++
+ }
+ children := make([]Node, 0, numChildren)
+ children = append(children, name)
+ if sep != nil {
+ children = append(children, sep)
+ }
+ children = append(children, val)
+
+ return &MessageFieldNode{
+ compositeNode: compositeNode{
+ children: children,
+ },
+ Name: name,
+ Sep: sep,
+ Val: val,
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go
new file mode 100644
index 0000000..e9b8506
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast/walk.go
@@ -0,0 +1,497 @@
+package ast
+
+// VisitFunc is used to examine a node in the AST when walking the tree.
+// It returns true or false as to whether or not the descendants of the
+// given node should be visited. If it returns true, the node's children
+// will be visisted; if false, they will not. When returning true, it
+// can also return a new VisitFunc to use for the children. If it returns
+// (true, nil), then the current function will be re-used when visiting
+// the children.
+//
+// See also the Visitor type.
+type VisitFunc func(Node) (bool, VisitFunc)
+
+// Walk conducts a walk of the AST rooted at the given root using the
+// given function. It performs a "pre-order traversal", visiting a
+// given AST node before it visits that node's descendants.
+func Walk(root Node, v VisitFunc) {
+ ok, next := v(root)
+ if !ok {
+ return
+ }
+ if next != nil {
+ v = next
+ }
+ if comp, ok := root.(CompositeNode); ok {
+ for _, child := range comp.Children() {
+ Walk(child, v)
+ }
+ }
+}
+
+// Visitor provides a technique for walking the AST that allows for
+// dynamic dispatch, where a particular function is invoked based on
+// the runtime type of the argument.
+//
+// It consists of a number of functions, each of which matches a
+// concrete Node type. It also includes functions for sub-interfaces
+// of Node and the Node interface itself, to be used as broader
+// "catch all" functions.
+//
+// To use a visitor, provide a function for the node types of
+// interest and pass visitor.Visit as the function to a Walk operation.
+// When a node is traversed, the corresponding function field of
+// the visitor is invoked, if not nil. If the function for a node's
+// concrete type is nil/absent but the function for an interface it
+// implements is present, that interface visit function will be used
+// instead. If no matching function is present, the traversal will
+// continue. If a matching function is present, it will be invoked
+// and its response determines how the traversal proceeds.
+//
+// Every visit function returns (bool, *Visitor). If the bool returned
+// is false, the visited node's descendants are skipped. Otherwise,
+// traversal will continue into the node's children. If the returned
+// visitor is nil, the current visitor will continue to be used. But
+// if a non-nil visitor is returned, it will be used to visit the
+// node's children.
+type Visitor struct {
+ // VisitFileNode is invoked when visiting a *FileNode in the AST.
+ VisitFileNode func(*FileNode) (bool, *Visitor)
+ // VisitSyntaxNode is invoked when visiting a *SyntaxNode in the AST.
+ VisitSyntaxNode func(*SyntaxNode) (bool, *Visitor)
+
+ // TODO: add VisitEditionNode
+
+ // VisitPackageNode is invoked when visiting a *PackageNode in the AST.
+ VisitPackageNode func(*PackageNode) (bool, *Visitor)
+ // VisitImportNode is invoked when visiting an *ImportNode in the AST.
+ VisitImportNode func(*ImportNode) (bool, *Visitor)
+ // VisitOptionNode is invoked when visiting an *OptionNode in the AST.
+ VisitOptionNode func(*OptionNode) (bool, *Visitor)
+ // VisitOptionNameNode is invoked when visiting an *OptionNameNode in the AST.
+ VisitOptionNameNode func(*OptionNameNode) (bool, *Visitor)
+ // VisitFieldReferenceNode is invoked when visiting a *FieldReferenceNode in the AST.
+ VisitFieldReferenceNode func(*FieldReferenceNode) (bool, *Visitor)
+ // VisitCompactOptionsNode is invoked when visiting a *CompactOptionsNode in the AST.
+ VisitCompactOptionsNode func(*CompactOptionsNode) (bool, *Visitor)
+ // VisitMessageNode is invoked when visiting a *MessageNode in the AST.
+ VisitMessageNode func(*MessageNode) (bool, *Visitor)
+ // VisitExtendNode is invoked when visiting an *ExtendNode in the AST.
+ VisitExtendNode func(*ExtendNode) (bool, *Visitor)
+ // VisitExtensionRangeNode is invoked when visiting an *ExtensionRangeNode in the AST.
+ VisitExtensionRangeNode func(*ExtensionRangeNode) (bool, *Visitor)
+ // VisitReservedNode is invoked when visiting a *ReservedNode in the AST.
+ VisitReservedNode func(*ReservedNode) (bool, *Visitor)
+ // VisitRangeNode is invoked when visiting a *RangeNode in the AST.
+ VisitRangeNode func(*RangeNode) (bool, *Visitor)
+ // VisitFieldNode is invoked when visiting a *FieldNode in the AST.
+ VisitFieldNode func(*FieldNode) (bool, *Visitor)
+ // VisitGroupNode is invoked when visiting a *GroupNode in the AST.
+ VisitGroupNode func(*GroupNode) (bool, *Visitor)
+ // VisitMapFieldNode is invoked when visiting a *MapFieldNode in the AST.
+ VisitMapFieldNode func(*MapFieldNode) (bool, *Visitor)
+ // VisitMapTypeNode is invoked when visiting a *MapTypeNode in the AST.
+ VisitMapTypeNode func(*MapTypeNode) (bool, *Visitor)
+ // VisitOneOfNode is invoked when visiting a *OneOfNode in the AST.
+ VisitOneOfNode func(*OneOfNode) (bool, *Visitor)
+ // VisitEnumNode is invoked when visiting an *EnumNode in the AST.
+ VisitEnumNode func(*EnumNode) (bool, *Visitor)
+ // VisitEnumValueNode is invoked when visiting an *EnumValueNode in the AST.
+ VisitEnumValueNode func(*EnumValueNode) (bool, *Visitor)
+ // VisitServiceNode is invoked when visiting a *ServiceNode in the AST.
+ VisitServiceNode func(*ServiceNode) (bool, *Visitor)
+ // VisitRPCNode is invoked when visiting an *RPCNode in the AST.
+ VisitRPCNode func(*RPCNode) (bool, *Visitor)
+ // VisitRPCTypeNode is invoked when visiting an *RPCTypeNode in the AST.
+ VisitRPCTypeNode func(*RPCTypeNode) (bool, *Visitor)
+ // VisitIdentNode is invoked when visiting an *IdentNode in the AST.
+ VisitIdentNode func(*IdentNode) (bool, *Visitor)
+ // VisitCompoundIdentNode is invoked when visiting a *CompoundIdentNode in the AST.
+ VisitCompoundIdentNode func(*CompoundIdentNode) (bool, *Visitor)
+ // VisitStringLiteralNode is invoked when visiting a *StringLiteralNode in the AST.
+ VisitStringLiteralNode func(*StringLiteralNode) (bool, *Visitor)
+ // VisitCompoundStringLiteralNode is invoked when visiting a *CompoundStringLiteralNode in the AST.
+ VisitCompoundStringLiteralNode func(*CompoundStringLiteralNode) (bool, *Visitor)
+ // VisitUintLiteralNode is invoked when visiting a *UintLiteralNode in the AST.
+ VisitUintLiteralNode func(*UintLiteralNode) (bool, *Visitor)
+ // VisitPositiveUintLiteralNode is invoked when visiting a *PositiveUintLiteralNode in the AST.
+ //
+ // Deprecated: this node type will not actually be present in an AST.
+ VisitPositiveUintLiteralNode func(*PositiveUintLiteralNode) (bool, *Visitor)
+ // VisitNegativeIntLiteralNode is invoked when visiting a *NegativeIntLiteralNode in the AST.
+ VisitNegativeIntLiteralNode func(*NegativeIntLiteralNode) (bool, *Visitor)
+ // VisitFloatLiteralNode is invoked when visiting a *FloatLiteralNode in the AST.
+ VisitFloatLiteralNode func(*FloatLiteralNode) (bool, *Visitor)
+ // VisitSpecialFloatLiteralNode is invoked when visiting a *SpecialFloatLiteralNode in the AST.
+ VisitSpecialFloatLiteralNode func(*SpecialFloatLiteralNode) (bool, *Visitor)
+ // VisitSignedFloatLiteralNode is invoked when visiting a *SignedFloatLiteralNode in the AST.
+ VisitSignedFloatLiteralNode func(*SignedFloatLiteralNode) (bool, *Visitor)
+ // VisitBoolLiteralNode is invoked when visiting a *BoolLiteralNode in the AST.
+ VisitBoolLiteralNode func(*BoolLiteralNode) (bool, *Visitor)
+ // VisitArrayLiteralNode is invoked when visiting an *ArrayLiteralNode in the AST.
+ VisitArrayLiteralNode func(*ArrayLiteralNode) (bool, *Visitor)
+ // VisitMessageLiteralNode is invoked when visiting a *MessageLiteralNode in the AST.
+ VisitMessageLiteralNode func(*MessageLiteralNode) (bool, *Visitor)
+ // VisitMessageFieldNode is invoked when visiting a *MessageFieldNode in the AST.
+ VisitMessageFieldNode func(*MessageFieldNode) (bool, *Visitor)
+ // VisitKeywordNode is invoked when visiting a *KeywordNode in the AST.
+ VisitKeywordNode func(*KeywordNode) (bool, *Visitor)
+ // VisitRuneNode is invoked when visiting a *RuneNode in the AST.
+ VisitRuneNode func(*RuneNode) (bool, *Visitor)
+ // VisitEmptyDeclNode is invoked when visiting a *EmptyDeclNode in the AST.
+ VisitEmptyDeclNode func(*EmptyDeclNode) (bool, *Visitor)
+
+ // VisitFieldDeclNode is invoked when visiting a FieldDeclNode in the AST.
+ // This function is used when no concrete type function is provided. If
+ // both this and VisitMessageDeclNode are provided, and a node implements
+ // both (such as *GroupNode and *MapFieldNode), this function will be
+ // invoked and not the other.
+ VisitFieldDeclNode func(FieldDeclNode) (bool, *Visitor)
+ // VisitMessageDeclNode is invoked when visiting a MessageDeclNode in the AST.
+ // This function is used when no concrete type function is provided.
+ VisitMessageDeclNode func(MessageDeclNode) (bool, *Visitor)
+
+ // VisitIdentValueNode is invoked when visiting an IdentValueNode in the AST.
+ // This function is used when no concrete type function is provided.
+ VisitIdentValueNode func(IdentValueNode) (bool, *Visitor)
+ // VisitStringValueNode is invoked when visiting a StringValueNode in the AST.
+ // This function is used when no concrete type function is provided.
+ VisitStringValueNode func(StringValueNode) (bool, *Visitor)
+ // VisitIntValueNode is invoked when visiting an IntValueNode in the AST.
+ // This function is used when no concrete type function is provided. If
+ // both this and VisitFloatValueNode are provided, and a node implements
+ // both (such as *UintLiteralNode), this function will be invoked and
+ // not the other.
+ VisitIntValueNode func(IntValueNode) (bool, *Visitor)
+ // VisitFloatValueNode is invoked when visiting a FloatValueNode in the AST.
+ // This function is used when no concrete type function is provided.
+ VisitFloatValueNode func(FloatValueNode) (bool, *Visitor)
+ // VisitValueNode is invoked when visiting a ValueNode in the AST. This
+ // function is used when no concrete type function is provided and no
+ // more specific ValueNode function is provided that matches the node.
+ VisitValueNode func(ValueNode) (bool, *Visitor)
+
+ // VisitTerminalNode is invoked when visiting a TerminalNode in the AST.
+ // This function is used when no concrete type function is provided
+ // no more specific interface type function is provided.
+ VisitTerminalNode func(TerminalNode) (bool, *Visitor)
+ // VisitCompositeNode is invoked when visiting a CompositeNode in the AST.
+ // This function is used when no concrete type function is provided
+ // no more specific interface type function is provided.
+ VisitCompositeNode func(CompositeNode) (bool, *Visitor)
+ // VisitNode is invoked when visiting a Node in the AST. This
+ // function is only used when no other more specific function is
+ // provided.
+ VisitNode func(Node) (bool, *Visitor)
+}
+
+// Visit provides the Visitor's implementation of VisitFunc, to be
+// used with Walk operations.
+func (v *Visitor) Visit(n Node) (bool, VisitFunc) {
+ var ok, matched bool
+ var next *Visitor
+ switch n := n.(type) {
+ case *FileNode:
+ if v.VisitFileNode != nil {
+ matched = true
+ ok, next = v.VisitFileNode(n)
+ }
+ case *SyntaxNode:
+ if v.VisitSyntaxNode != nil {
+ matched = true
+ ok, next = v.VisitSyntaxNode(n)
+ }
+ case *PackageNode:
+ if v.VisitPackageNode != nil {
+ matched = true
+ ok, next = v.VisitPackageNode(n)
+ }
+ case *ImportNode:
+ if v.VisitImportNode != nil {
+ matched = true
+ ok, next = v.VisitImportNode(n)
+ }
+ case *OptionNode:
+ if v.VisitOptionNode != nil {
+ matched = true
+ ok, next = v.VisitOptionNode(n)
+ }
+ case *OptionNameNode:
+ if v.VisitOptionNameNode != nil {
+ matched = true
+ ok, next = v.VisitOptionNameNode(n)
+ }
+ case *FieldReferenceNode:
+ if v.VisitFieldReferenceNode != nil {
+ matched = true
+ ok, next = v.VisitFieldReferenceNode(n)
+ }
+ case *CompactOptionsNode:
+ if v.VisitCompactOptionsNode != nil {
+ matched = true
+ ok, next = v.VisitCompactOptionsNode(n)
+ }
+ case *MessageNode:
+ if v.VisitMessageNode != nil {
+ matched = true
+ ok, next = v.VisitMessageNode(n)
+ }
+ case *ExtendNode:
+ if v.VisitExtendNode != nil {
+ matched = true
+ ok, next = v.VisitExtendNode(n)
+ }
+ case *ExtensionRangeNode:
+ if v.VisitExtensionRangeNode != nil {
+ matched = true
+ ok, next = v.VisitExtensionRangeNode(n)
+ }
+ case *ReservedNode:
+ if v.VisitReservedNode != nil {
+ matched = true
+ ok, next = v.VisitReservedNode(n)
+ }
+ case *RangeNode:
+ if v.VisitRangeNode != nil {
+ matched = true
+ ok, next = v.VisitRangeNode(n)
+ }
+ case *FieldNode:
+ if v.VisitFieldNode != nil {
+ matched = true
+ ok, next = v.VisitFieldNode(n)
+ }
+ case *GroupNode:
+ if v.VisitGroupNode != nil {
+ matched = true
+ ok, next = v.VisitGroupNode(n)
+ }
+ case *MapFieldNode:
+ if v.VisitMapFieldNode != nil {
+ matched = true
+ ok, next = v.VisitMapFieldNode(n)
+ }
+ case *MapTypeNode:
+ if v.VisitMapTypeNode != nil {
+ matched = true
+ ok, next = v.VisitMapTypeNode(n)
+ }
+ case *OneOfNode:
+ if v.VisitOneOfNode != nil {
+ matched = true
+ ok, next = v.VisitOneOfNode(n)
+ }
+ case *EnumNode:
+ if v.VisitEnumNode != nil {
+ matched = true
+ ok, next = v.VisitEnumNode(n)
+ }
+ case *EnumValueNode:
+ if v.VisitEnumValueNode != nil {
+ matched = true
+ ok, next = v.VisitEnumValueNode(n)
+ }
+ case *ServiceNode:
+ if v.VisitServiceNode != nil {
+ matched = true
+ ok, next = v.VisitServiceNode(n)
+ }
+ case *RPCNode:
+ if v.VisitRPCNode != nil {
+ matched = true
+ ok, next = v.VisitRPCNode(n)
+ }
+ case *RPCTypeNode:
+ if v.VisitRPCTypeNode != nil {
+ matched = true
+ ok, next = v.VisitRPCTypeNode(n)
+ }
+ case *IdentNode:
+ if v.VisitIdentNode != nil {
+ matched = true
+ ok, next = v.VisitIdentNode(n)
+ }
+ case *CompoundIdentNode:
+ if v.VisitCompoundIdentNode != nil {
+ matched = true
+ ok, next = v.VisitCompoundIdentNode(n)
+ }
+ case *StringLiteralNode:
+ if v.VisitStringLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitStringLiteralNode(n)
+ }
+ case *CompoundStringLiteralNode:
+ if v.VisitCompoundStringLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitCompoundStringLiteralNode(n)
+ }
+ case *UintLiteralNode:
+ if v.VisitUintLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitUintLiteralNode(n)
+ }
+ case *PositiveUintLiteralNode:
+ if v.VisitPositiveUintLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitPositiveUintLiteralNode(n)
+ }
+ case *NegativeIntLiteralNode:
+ if v.VisitNegativeIntLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitNegativeIntLiteralNode(n)
+ }
+ case *FloatLiteralNode:
+ if v.VisitFloatLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitFloatLiteralNode(n)
+ }
+ case *SpecialFloatLiteralNode:
+ if v.VisitSpecialFloatLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitSpecialFloatLiteralNode(n)
+ }
+ case *SignedFloatLiteralNode:
+ if v.VisitSignedFloatLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitSignedFloatLiteralNode(n)
+ }
+ case *BoolLiteralNode:
+ if v.VisitBoolLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitBoolLiteralNode(n)
+ }
+ case *ArrayLiteralNode:
+ if v.VisitArrayLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitArrayLiteralNode(n)
+ }
+ case *MessageLiteralNode:
+ if v.VisitMessageLiteralNode != nil {
+ matched = true
+ ok, next = v.VisitMessageLiteralNode(n)
+ }
+ case *MessageFieldNode:
+ if v.VisitMessageFieldNode != nil {
+ matched = true
+ ok, next = v.VisitMessageFieldNode(n)
+ }
+ case *KeywordNode:
+ if v.VisitKeywordNode != nil {
+ matched = true
+ ok, next = v.VisitKeywordNode(n)
+ }
+ case *RuneNode:
+ if v.VisitRuneNode != nil {
+ matched = true
+ ok, next = v.VisitRuneNode(n)
+ }
+ case *EmptyDeclNode:
+ if v.VisitEmptyDeclNode != nil {
+ matched = true
+ ok, next = v.VisitEmptyDeclNode(n)
+ }
+ }
+
+ if !matched {
+ // Visitor provided no concrete type visit function, so
+ // check interface types. We do this in several passes
+ // to provide "priority" for matched interfaces for nodes
+ // that actually implement more than one interface.
+ //
+ // For example, StringLiteralNode implements both
+ // StringValueNode and ValueNode. Both cases could match
+ // so the first case is what would match. So if we want
+ // to test against either, they need to be in different
+ // switch statements.
+ switch n := n.(type) {
+ case FieldDeclNode:
+ if v.VisitFieldDeclNode != nil {
+ matched = true
+ ok, next = v.VisitFieldDeclNode(n)
+ }
+ case IdentValueNode:
+ if v.VisitIdentValueNode != nil {
+ matched = true
+ ok, next = v.VisitIdentValueNode(n)
+ }
+ case StringValueNode:
+ if v.VisitStringValueNode != nil {
+ matched = true
+ ok, next = v.VisitStringValueNode(n)
+ }
+ case IntValueNode:
+ if v.VisitIntValueNode != nil {
+ matched = true
+ ok, next = v.VisitIntValueNode(n)
+ }
+ }
+ }
+
+ if !matched {
+ // These two are excluded from the above switch so that
+ // if visitor provides both VisitIntValueNode and
+ // VisitFloatValueNode, we'll prefer VisitIntValueNode
+ // for *UintLiteralNode (which implements both). Similarly,
+ // that way we prefer VisitFieldDeclNode over
+ // VisitMessageDeclNode when visiting a *GroupNode.
+ switch n := n.(type) {
+ case FloatValueNode:
+ if v.VisitFloatValueNode != nil {
+ matched = true
+ ok, next = v.VisitFloatValueNode(n)
+ }
+ case MessageDeclNode:
+ if v.VisitMessageDeclNode != nil {
+ matched = true
+ ok, next = v.VisitMessageDeclNode(n)
+ }
+ }
+ }
+
+ if !matched {
+ switch n := n.(type) {
+ case ValueNode:
+ if v.VisitValueNode != nil {
+ matched = true
+ ok, next = v.VisitValueNode(n)
+ }
+ }
+ }
+
+ if !matched {
+ switch n := n.(type) {
+ case TerminalNode:
+ if v.VisitTerminalNode != nil {
+ matched = true
+ ok, next = v.VisitTerminalNode(n)
+ }
+ case CompositeNode:
+ if v.VisitCompositeNode != nil {
+ matched = true
+ ok, next = v.VisitCompositeNode(n)
+ }
+ }
+ }
+
+ if !matched {
+ // finally, fallback to most generic visit function
+ if v.VisitNode != nil {
+ matched = true
+ ok, next = v.VisitNode(n)
+ }
+ }
+
+ if !matched {
+ // keep descending with the current visitor
+ return true, nil
+ }
+
+ if !ok {
+ return false, nil
+ }
+ if next != nil {
+ return true, next.Visit
+ }
+ return true, v.Visit
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go
new file mode 100644
index 0000000..6642f6a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go
@@ -0,0 +1,16 @@
+// Package protoparse provides functionality for parsing *.proto source files
+// into descriptors that can be used with other protoreflect packages, like
+// dynamic messages and dynamic GRPC clients.
+//
+// This package links in other packages that include compiled descriptors for
+// the various "google/protobuf/*.proto" files that are included with protoc.
+// That way, like when invoking protoc, programs need not supply copies of these
+// "builtin" files. Though if copies of the files are provided, they will be
+// used instead of the builtin descriptors.
+//
+// Deprecated: This protoparse package is now just a thin veneer around a newer
+// replacement parser/compiler: [github.com/bufbuild/protocompile]. Users are
+// highly encouraged to directly use protocompile instead of this package.
+//
+// [github.com/bufbuild/protocompile]: https://pkg.go.dev/github.com/bufbuild/protocompile
+package protoparse
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go
new file mode 100644
index 0000000..0ec70bd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/errors.go
@@ -0,0 +1,122 @@
+package protoparse
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/bufbuild/protocompile/linker"
+ "github.com/bufbuild/protocompile/parser"
+ "github.com/bufbuild/protocompile/reporter"
+
+ "github.com/jhump/protoreflect/desc/protoparse/ast"
+)
+
+// SourcePos is the same as ast.SourcePos. This alias exists for
+// backwards compatibility (SourcePos used to be defined in this package.)
+type SourcePos = ast.SourcePos
+
+// ErrInvalidSource is a sentinel error that is returned by calls to
+// Parser.ParseFiles and Parser.ParseFilesButDoNotLink in the event that syntax
+// or link errors are encountered, but the parser's configured ErrorReporter
+// always returns nil.
+var ErrInvalidSource = reporter.ErrInvalidSource
+
+// ErrNoSyntax is a sentinel error that may be passed to a warning reporter.
+// The error the reporter receives will be wrapped with source position that
+// indicates the file that had no syntax statement.
+var ErrNoSyntax = parser.ErrNoSyntax
+
+// ErrLookupImportAndProtoSet is the error returned if both LookupImport and LookupImportProto are set.
+//
+// Deprecated: This error is no longer used. It is now legal to set both LookupImport and LookupImportProto
+// fields on the Parser.
+var ErrLookupImportAndProtoSet = errors.New("both LookupImport and LookupImportProto set")
+
+// ErrorReporter is responsible for reporting the given error. If the reporter
+// returns a non-nil error, parsing/linking will abort with that error. If the
+// reporter returns nil, parsing will continue, allowing the parser to try to
+// report as many syntax and/or link errors as it can find.
+type ErrorReporter = reporter.ErrorReporter
+
+// WarningReporter is responsible for reporting the given warning. This is used
+// for indicating non-error messages to the calling program for things that do
+// not cause the parse to fail but are considered bad practice. Though they are
+// just warnings, the details are supplied to the reporter via an error type.
+type WarningReporter = reporter.WarningReporter
+
+// ErrorWithPos is an error about a proto source file that includes information
+// about the location in the file that caused the error.
+//
+// The value of Error() will contain both the SourcePos and Underlying error.
+// The value of Unwrap() will only be the Underlying error.
+type ErrorWithPos = reporter.ErrorWithPos
+
+// ErrorWithSourcePos is an error about a proto source file that includes
+// information about the location in the file that caused the error.
+//
+// Errors that include source location information *might* be of this type.
+// However, calling code that is trying to examine errors with location info
+// should instead look for instances of the ErrorWithPos interface, which
+// will find other kinds of errors. This type is only exported for backwards
+// compatibility.
+//
+// SourcePos should always be set and never nil.
+type ErrorWithSourcePos struct {
+ // These fields are present and exported for backwards-compatibility
+ // with v1.4 and earlier.
+ Underlying error
+ Pos *SourcePos
+
+ reporter.ErrorWithPos
+}
+
+// Error implements the error interface
+func (e ErrorWithSourcePos) Error() string {
+ sourcePos := e.GetPosition()
+ return fmt.Sprintf("%s: %v", sourcePos, e.Underlying)
+}
+
+// GetPosition implements the ErrorWithPos interface, supplying a location in
+// proto source that caused the error.
+func (e ErrorWithSourcePos) GetPosition() SourcePos {
+ if e.Pos == nil {
+ return SourcePos{Filename: "<input>"}
+ }
+ return *e.Pos
+}
+
+// Unwrap implements the ErrorWithPos interface, supplying the underlying
+// error. This error will not include location information.
+func (e ErrorWithSourcePos) Unwrap() error {
+ return e.Underlying
+}
+
+var _ ErrorWithPos = ErrorWithSourcePos{}
+
+func toErrorWithSourcePos(err ErrorWithPos) ErrorWithPos {
+ pos := err.GetPosition()
+ return ErrorWithSourcePos{
+ ErrorWithPos: err,
+ Underlying: err.Unwrap(),
+ Pos: &pos,
+ }
+}
+
+// ErrorUnusedImport may be passed to a warning reporter when an unused
+// import is detected. The error the reporter receives will be wrapped
+// with source position that indicates the file and line where the import
+// statement appeared.
+type ErrorUnusedImport = linker.ErrorUnusedImport
+
+type errorWithFilename struct {
+ underlying error
+ filename string
+}
+
+func (e errorWithFilename) Error() string {
+ return fmt.Sprintf("%s: %v", e.filename, e.underlying)
+}
+
+func (e errorWithFilename) Unwrap() error {
+ return e.underlying
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go
new file mode 100644
index 0000000..1a6763d
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go
@@ -0,0 +1,804 @@
+package protoparse
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/bufbuild/protocompile"
+ ast2 "github.com/bufbuild/protocompile/ast"
+ "github.com/bufbuild/protocompile/linker"
+ "github.com/bufbuild/protocompile/options"
+ "github.com/bufbuild/protocompile/parser"
+ "github.com/bufbuild/protocompile/protoutil"
+ "github.com/bufbuild/protocompile/reporter"
+ "github.com/bufbuild/protocompile/sourceinfo"
+ "github.com/bufbuild/protocompile/walk"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+ "github.com/jhump/protoreflect/desc/protoparse/ast"
+)
+
+// FileAccessor is an abstraction for opening proto source files. It takes the
+// name of the file to open and returns either the input reader or an error.
+type FileAccessor func(filename string) (io.ReadCloser, error)
+
+// FileContentsFromMap returns a FileAccessor that uses the given map of file
+// contents. This allows proto source files to be constructed in memory and
+// easily supplied to a parser. The map keys are the paths to the proto source
+// files, and the values are the actual proto source contents.
+func FileContentsFromMap(files map[string]string) FileAccessor {
+ return func(filename string) (io.ReadCloser, error) {
+ contents, ok := files[filename]
+ if !ok {
+ // Try changing path separators since user-provided
+ // map may use different separators.
+ contents, ok = files[filepath.ToSlash(filename)]
+ if !ok {
+ return nil, os.ErrNotExist
+ }
+ }
+ return ioutil.NopCloser(strings.NewReader(contents)), nil
+ }
+}
+
+// Parser parses proto source into descriptors.
+type Parser struct {
+ // The paths used to search for dependencies that are referenced in import
+ // statements in proto source files. If no import paths are provided then
+ // "." (current directory) is assumed to be the only import path.
+ //
+ // This setting is only used during ParseFiles operations. Since calls to
+ // ParseFilesButDoNotLink do not link, there is no need to load and parse
+ // dependencies.
+ ImportPaths []string
+
+ // If true, the supplied file names/paths need not necessarily match how the
+ // files are referenced in import statements. The parser will attempt to
+ // match import statements to supplied paths, "guessing" the import paths
+ // for the files. Note that this inference is not perfect and link errors
+ // could result. It works best when all proto files are organized such that
+ // a single import path can be inferred (e.g. all files under a single tree
+ // with import statements all being relative to the root of this tree).
+ InferImportPaths bool
+
+ // LookupImport is a function that accepts a filename and
+ // returns a file descriptor, which will be consulted when resolving imports.
+ // This allows a compiled Go proto in another Go module to be referenced
+ // in the proto(s) being parsed.
+ //
+ // In the event of a filename collision, Accessor is consulted first,
+ // then LookupImport is consulted, and finally the well-known protos
+ // are used.
+ //
+ // For example, in order to automatically look up compiled Go protos that
+ // have been imported and be able to use them as imports, set this to
+ // desc.LoadFileDescriptor.
+ LookupImport func(string) (*desc.FileDescriptor, error)
+
+ // LookupImportProto has the same functionality as LookupImport, however it returns
+ // a FileDescriptorProto instead of a FileDescriptor.
+ LookupImportProto func(string) (*descriptorpb.FileDescriptorProto, error)
+
+ // Used to create a reader for a given filename, when loading proto source
+ // file contents. If unset, os.Open is used. If ImportPaths is also empty
+ // then relative paths are will be relative to the process's current working
+ // directory.
+ Accessor FileAccessor
+
+ // If true, the resulting file descriptors will retain source code info,
+ // that maps elements to their location in the source files as well as
+ // includes comments found during parsing (and attributed to elements of
+ // the source file).
+ IncludeSourceCodeInfo bool
+
+ // If true, the results from ParseFilesButDoNotLink will be passed through
+ // some additional validations. But only constraints that do not require
+ // linking can be checked. These include proto2 vs. proto3 language features,
+ // looking for incorrect usage of reserved names or tags, and ensuring that
+ // fields have unique tags and that enum values have unique numbers (unless
+ // the enum allows aliases).
+ ValidateUnlinkedFiles bool
+
+ // If true, the results from ParseFilesButDoNotLink will have options
+ // interpreted. Any uninterpretable options (including any custom options or
+ // options that refer to message and enum types, which can only be
+ // interpreted after linking) will be left in uninterpreted_options. Also,
+ // the "default" pseudo-option for fields can only be interpreted for scalar
+ // fields, excluding enums. (Interpreting default values for enum fields
+ // requires resolving enum names, which requires linking.)
+ InterpretOptionsInUnlinkedFiles bool
+
+ // A custom reporter of syntax and link errors. If not specified, the
+ // default reporter just returns the reported error, which causes parsing
+ // to abort after encountering a single error.
+ //
+ // The reporter is not invoked for system or I/O errors, only for syntax and
+ // link errors.
+ ErrorReporter ErrorReporter
+
+ // A custom reporter of warnings. If not specified, warning messages are ignored.
+ WarningReporter WarningReporter
+}
+
+// ParseFiles parses the named files into descriptors. The returned slice has
+// the same number of entries as the give filenames, in the same order. So the
+// first returned descriptor corresponds to the first given name, and so on.
+//
+// All dependencies for all specified files (including transitive dependencies)
+// must be accessible via the parser's Accessor or a link error will occur. The
+// exception to this rule is that files can import standard Google-provided
+// files -- e.g. google/protobuf/*.proto -- without needing to supply sources
+// for these files. Like protoc, this parser has a built-in version of these
+// files it can use if they aren't explicitly supplied.
+//
+// If the Parser has no ErrorReporter set and a syntax or link error occurs,
+// parsing will abort with the first such error encountered. If there is an
+// ErrorReporter configured and it returns non-nil, parsing will abort with the
+// error it returns. If syntax or link errors are encountered but the configured
+// ErrorReporter always returns nil, the parse fails with ErrInvalidSource.
+func (p Parser) ParseFiles(filenames ...string) ([]*desc.FileDescriptor, error) {
+ srcInfoMode := protocompile.SourceInfoNone
+ if p.IncludeSourceCodeInfo {
+ srcInfoMode = protocompile.SourceInfoExtraComments
+ }
+ rep := newReporter(p.ErrorReporter, p.WarningReporter)
+ res, srcSpanAddr := p.getResolver(filenames)
+
+ if p.InferImportPaths {
+ // we must first compile everything to protos
+ results, err := parseToProtosRecursive(res, filenames, reporter.NewHandler(rep), srcSpanAddr)
+ if err != nil {
+ return nil, err
+ }
+ // then we can infer import paths
+ var rewritten map[string]string
+ results, rewritten = fixupFilenames(results)
+ if len(rewritten) > 0 {
+ for i := range filenames {
+ if replace, ok := rewritten[filenames[i]]; ok {
+ filenames[i] = replace
+ }
+ }
+ }
+ resolverFromResults := protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) {
+ res, ok := results[path]
+ if !ok {
+ return protocompile.SearchResult{}, os.ErrNotExist
+ }
+ return protocompile.SearchResult{ParseResult: noCloneParseResult{res}}, nil
+ })
+ res = protocompile.CompositeResolver{resolverFromResults, res}
+ }
+
+ c := protocompile.Compiler{
+ Resolver: res,
+ MaxParallelism: 1,
+ SourceInfoMode: srcInfoMode,
+ Reporter: rep,
+ }
+ results, err := c.Compile(context.Background(), filenames...)
+ if err != nil {
+ return nil, err
+ }
+
+ fds := make([]protoreflect.FileDescriptor, len(results))
+ alreadySeen := make(map[string]struct{}, len(results))
+ for i, res := range results {
+ removeDynamicExtensions(res, alreadySeen)
+ fds[i] = res
+ }
+ return desc.WrapFiles(fds)
+}
+
+type noCloneParseResult struct {
+ parser.Result
+}
+
+func (r noCloneParseResult) Clone() parser.Result {
+ // protocompile will clone parser.Result to make sure it can't be shared
+ // with other compilation operations (which would not be thread-safe).
+ // However, this parse result cannot be shared with another compile
+ // operation. That means the clone is unnecessary; so we skip it, to avoid
+ // the associated performance costs.
+ return r.Result
+}
+
+// ParseFilesButDoNotLink parses the named files into descriptor protos. The
+// results are just protos, not fully-linked descriptors. It is possible that
+// descriptors are invalid and still be returned in parsed form without error
+// due to the fact that the linking step is skipped (and thus many validation
+// steps omitted).
+//
+// There are a few side effects to not linking the descriptors:
+// 1. No options will be interpreted. Options can refer to extensions or have
+// message and enum types. Without linking, these extension and type
+// references are not resolved, so the options may not be interpretable.
+// So all options will appear in UninterpretedOption fields of the various
+// descriptor options messages.
+// 2. Type references will not be resolved. This means that the actual type
+// names in the descriptors may be unqualified and even relative to the
+// scope in which the type reference appears. This goes for fields that
+// have message and enum types. It also applies to methods and their
+// references to request and response message types.
+// 3. Type references are not known. For non-scalar fields, until the type
+// name is resolved (during linking), it is not known whether the type
+// refers to a message or an enum. So all fields with such type references
+// will not have their Type set, only the TypeName.
+//
+// This method will still validate the syntax of parsed files. If the parser's
+// ValidateUnlinkedFiles field is true, additional checks, beyond syntax will
+// also be performed.
+//
+// If the Parser has no ErrorReporter set and a syntax error occurs, parsing
+// will abort with the first such error encountered. If there is an
+// ErrorReporter configured and it returns non-nil, parsing will abort with the
+// error it returns. If syntax errors are encountered but the configured
+// ErrorReporter always returns nil, the parse fails with ErrInvalidSource.
+func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*descriptorpb.FileDescriptorProto, error) {
+ rep := newReporter(p.ErrorReporter, p.WarningReporter)
+ p.ImportPaths = nil // not used for this "do not link" operation.
+ res, _ := p.getResolver(filenames)
+ results, err := parseToProtos(res, filenames, reporter.NewHandler(rep), p.ValidateUnlinkedFiles)
+ if err != nil {
+ return nil, err
+ }
+
+ if p.InferImportPaths {
+ resultsMap := make(map[string]parser.Result, len(results))
+ for _, res := range results {
+ resultsMap[res.FileDescriptorProto().GetName()] = res
+ }
+ var rewritten map[string]string
+ resultsMap, rewritten = fixupFilenames(resultsMap)
+ if len(rewritten) > 0 {
+ for i := range filenames {
+ if replace, ok := rewritten[filenames[i]]; ok {
+ filenames[i] = replace
+ }
+ }
+ }
+ for i := range filenames {
+ results[i] = resultsMap[filenames[i]]
+ }
+ }
+
+ protos := make([]*descriptorpb.FileDescriptorProto, len(results))
+ for i, res := range results {
+ protos[i] = res.FileDescriptorProto()
+ var optsIndex sourceinfo.OptionIndex
+ if p.InterpretOptionsInUnlinkedFiles {
+ var err error
+ optsIndex, err = options.InterpretUnlinkedOptions(res)
+ if err != nil {
+ return nil, err
+ }
+ removeDynamicExtensionsFromProto(protos[i])
+ }
+ if p.IncludeSourceCodeInfo {
+ protos[i].SourceCodeInfo = sourceinfo.GenerateSourceInfo(res.AST(), optsIndex, sourceinfo.WithExtraComments())
+ }
+ }
+
+ return protos, nil
+}
+
+// ParseToAST parses the named files into ASTs, or Abstract Syntax Trees. This
+// is for consumers of proto files that don't care about compiling the files to
+// descriptors, but care deeply about a non-lossy structured representation of
+// the source (since descriptors are lossy). This includes formatting tools and
+// possibly linters, too.
+//
+// If the requested filenames include standard imports (such as
+// "google/protobuf/empty.proto") and no source is provided, the corresponding
+// AST in the returned slice will be nil. These standard imports are only
+// available for use as descriptors; no source is available unless it is
+// provided by the configured Accessor.
+//
+// If the Parser has no ErrorReporter set and a syntax error occurs, parsing
+// will abort with the first such error encountered. If there is an
+// ErrorReporter configured and it returns non-nil, parsing will abort with the
+// error it returns. If syntax errors are encountered but the configured
+// ErrorReporter always returns nil, the parse fails with ErrInvalidSource.
+func (p Parser) ParseToAST(filenames ...string) ([]*ast.FileNode, error) {
+ rep := newReporter(p.ErrorReporter, p.WarningReporter)
+ res, _ := p.getResolver(filenames)
+ asts, _, err := parseToASTs(res, filenames, reporter.NewHandler(rep))
+ if err != nil {
+ return nil, err
+ }
+ results := make([]*ast.FileNode, len(asts))
+ for i := range asts {
+ if asts[i] == nil {
+ // should not be possible but...
+ return nil, fmt.Errorf("resolver did not produce source for %v", filenames[i])
+ }
+ results[i] = convertAST(asts[i])
+ }
+ return results, nil
+}
+
+func parseToAST(res protocompile.Resolver, filename string, rep *reporter.Handler) (*ast2.FileNode, parser.Result, error) {
+ searchResult, err := res.FindFileByPath(filename)
+ if err != nil {
+ _ = rep.HandleError(err)
+ return nil, nil, rep.Error()
+ }
+ switch {
+ case searchResult.ParseResult != nil:
+ return nil, searchResult.ParseResult, nil
+ case searchResult.Proto != nil:
+ return nil, parser.ResultWithoutAST(searchResult.Proto), nil
+ case searchResult.Desc != nil:
+ return nil, parser.ResultWithoutAST(protoutil.ProtoFromFileDescriptor(searchResult.Desc)), nil
+ case searchResult.AST != nil:
+ return searchResult.AST, nil, nil
+ case searchResult.Source != nil:
+ astRoot, err := parser.Parse(filename, searchResult.Source, rep)
+ return astRoot, nil, err
+ default:
+ _ = rep.HandleError(fmt.Errorf("resolver did not produce a result for %v", filename))
+ return nil, nil, rep.Error()
+ }
+}
+
+func parseToASTs(res protocompile.Resolver, filenames []string, rep *reporter.Handler) ([]*ast2.FileNode, []parser.Result, error) {
+ asts := make([]*ast2.FileNode, len(filenames))
+ results := make([]parser.Result, len(filenames))
+ for i := range filenames {
+ asts[i], results[i], _ = parseToAST(res, filenames[i], rep)
+ if rep.ReporterError() != nil {
+ break
+ }
+ }
+ return asts, results, rep.Error()
+}
+
+func parseToProtos(res protocompile.Resolver, filenames []string, rep *reporter.Handler, validate bool) ([]parser.Result, error) {
+ asts, results, err := parseToASTs(res, filenames, rep)
+ if err != nil {
+ return nil, err
+ }
+ for i := range results {
+ if results[i] != nil {
+ continue
+ }
+ var err error
+ results[i], err = parser.ResultFromAST(asts[i], validate, rep)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return results, nil
+}
+
+func parseToProtosRecursive(res protocompile.Resolver, filenames []string, rep *reporter.Handler, srcSpanAddr *ast2.SourceSpan) (map[string]parser.Result, error) {
+ results := make(map[string]parser.Result, len(filenames))
+ for _, filename := range filenames {
+ if err := parseToProtoRecursive(res, filename, rep, srcSpanAddr, results); err != nil {
+ return results, err
+ }
+ }
+ return results, rep.Error()
+}
+
+func parseToProtoRecursive(res protocompile.Resolver, filename string, rep *reporter.Handler, srcSpanAddr *ast2.SourceSpan, results map[string]parser.Result) error {
+ if _, ok := results[filename]; ok {
+ // already processed this one
+ return nil
+ }
+ results[filename] = nil // placeholder entry
+
+ astRoot, parseResult, err := parseToAST(res, filename, rep)
+ if err != nil {
+ return err
+ }
+ if parseResult == nil {
+ parseResult, err = parser.ResultFromAST(astRoot, true, rep)
+ if err != nil {
+ return err
+ }
+ }
+ results[filename] = parseResult
+
+ if astRoot != nil {
+ // We have an AST, so we use it to recursively examine imports.
+ for _, decl := range astRoot.Decls {
+ imp, ok := decl.(*ast2.ImportNode)
+ if !ok {
+ continue
+ }
+ err := func() error {
+ orig := *srcSpanAddr
+ *srcSpanAddr = astRoot.NodeInfo(imp.Name)
+ defer func() {
+ *srcSpanAddr = orig
+ }()
+
+ return parseToProtoRecursive(res, imp.Name.AsString(), rep, srcSpanAddr, results)
+ }()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // Without an AST, we must recursively examine the proto. This makes it harder
+ // (but not necessarily impossible) to get the source location of the import.
+ fd := parseResult.FileDescriptorProto()
+ for i, dep := range fd.Dependency {
+ path := []int32{internal.File_dependencyTag, int32(i)}
+ err := func() error {
+ orig := *srcSpanAddr
+ found := false
+ for _, loc := range fd.GetSourceCodeInfo().GetLocation() {
+ if pathsEqual(loc.Path, path) {
+ start := SourcePos{
+ Filename: dep,
+ Line: int(loc.Span[0]),
+ Col: int(loc.Span[1]),
+ }
+ var end SourcePos
+ if len(loc.Span) > 3 {
+ end = SourcePos{
+ Filename: dep,
+ Line: int(loc.Span[2]),
+ Col: int(loc.Span[3]),
+ }
+ } else {
+ end = SourcePos{
+ Filename: dep,
+ Line: int(loc.Span[0]),
+ Col: int(loc.Span[2]),
+ }
+ }
+ *srcSpanAddr = ast2.NewSourceSpan(start, end)
+ found = true
+ break
+ }
+ }
+ if !found {
+ *srcSpanAddr = ast2.UnknownSpan(dep)
+ }
+ defer func() {
+ *srcSpanAddr = orig
+ }()
+
+ return parseToProtoRecursive(res, dep, rep, srcSpanAddr, results)
+ }()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func pathsEqual(a, b []int32) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func newReporter(errRep ErrorReporter, warnRep WarningReporter) reporter.Reporter {
+ if errRep != nil {
+ delegate := errRep
+ errRep = func(err ErrorWithPos) error {
+ if _, ok := err.(ErrorWithSourcePos); !ok {
+ err = toErrorWithSourcePos(err)
+ }
+ return delegate(err)
+ }
+ }
+ if warnRep != nil {
+ delegate := warnRep
+ warnRep = func(err ErrorWithPos) {
+ if _, ok := err.(ErrorWithSourcePos); !ok {
+ err = toErrorWithSourcePos(err)
+ }
+ delegate(err)
+ }
+ }
+ return reporter.NewReporter(errRep, warnRep)
+}
+
+func (p Parser) getResolver(filenames []string) (protocompile.Resolver, *ast2.SourceSpan) {
+ var srcSpan ast2.SourceSpan
+ accessor := p.Accessor
+ if accessor == nil {
+ accessor = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ }
+ sourceResolver := &protocompile.SourceResolver{
+ Accessor: func(filename string) (io.ReadCloser, error) {
+ in, err := accessor(filename)
+ if err != nil {
+ if !strings.Contains(err.Error(), filename) {
+ // errors that don't include the filename that failed are no bueno
+ err = errorWithFilename{filename: filename, underlying: err}
+ }
+ if srcSpan != nil {
+ err = reporter.Error(srcSpan, err)
+ }
+ }
+ return in, err
+ },
+ ImportPaths: p.ImportPaths,
+ }
+ var importResolver protocompile.CompositeResolver
+ if p.LookupImport != nil {
+ importResolver = append(importResolver, protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) {
+ fd, err := p.LookupImport(path)
+ if err != nil {
+ return protocompile.SearchResult{}, err
+ }
+ return protocompile.SearchResult{Desc: fd.UnwrapFile()}, nil
+ }))
+ }
+ if p.LookupImportProto != nil {
+ importResolver = append(importResolver, protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) {
+ fd, err := p.LookupImportProto(path)
+ if err != nil {
+ return protocompile.SearchResult{}, err
+ }
+ return protocompile.SearchResult{Proto: fd}, nil
+ }))
+ }
+ backupResolver := protocompile.WithStandardImports(importResolver)
+ return protocompile.CompositeResolver{
+ sourceResolver,
+ protocompile.ResolverFunc(func(path string) (protocompile.SearchResult, error) {
+ return backupResolver.FindFileByPath(path)
+ }),
+ }, &srcSpan
+}
+
+func fixupFilenames(protos map[string]parser.Result) (revisedProtos map[string]parser.Result, rewrittenPaths map[string]string) {
+ // In the event that the given filenames (keys in the supplied map) do not
+ // match the actual paths used in 'import' statements in the files, we try
+ // to revise names in the protos so that they will match and be linkable.
+ revisedProtos = make(map[string]parser.Result, len(protos))
+ rewrittenPaths = make(map[string]string, len(protos))
+
+ protoPaths := map[string]struct{}{}
+ // TODO: this is O(n^2) but could likely be O(n) with a clever data structure (prefix tree that is indexed backwards?)
+ importCandidates := map[string]map[string]struct{}{}
+ candidatesAvailable := map[string]struct{}{}
+ for name := range protos {
+ candidatesAvailable[name] = struct{}{}
+ for _, f := range protos {
+ for _, imp := range f.FileDescriptorProto().Dependency {
+ if strings.HasSuffix(name, imp) || strings.HasSuffix(imp, name) {
+ candidates := importCandidates[imp]
+ if candidates == nil {
+ candidates = map[string]struct{}{}
+ importCandidates[imp] = candidates
+ }
+ candidates[name] = struct{}{}
+ }
+ }
+ }
+ }
+ for imp, candidates := range importCandidates {
+ // if we found multiple possible candidates, use the one that is an exact match
+ // if it exists, and otherwise, guess that it's the shortest path (fewest elements)
+ var best string
+ for c := range candidates {
+ if _, ok := candidatesAvailable[c]; !ok {
+ // already used this candidate and re-written its filename accordingly
+ continue
+ }
+ if c == imp {
+ // exact match!
+ best = c
+ break
+ }
+ if best == "" {
+ best = c
+ } else {
+ // NB: We can't actually tell which file is supposed to match
+ // this import. So we prefer the longest name. On a tie, we
+ // choose the lexically earliest match.
+ minLen := strings.Count(best, string(filepath.Separator))
+ cLen := strings.Count(c, string(filepath.Separator))
+ if cLen > minLen || (cLen == minLen && c < best) {
+ best = c
+ }
+ }
+ }
+ if best != "" {
+ if len(best) > len(imp) {
+ prefix := best[:len(best)-len(imp)]
+ protoPaths[prefix] = struct{}{}
+ }
+ f := protos[best]
+ f.FileDescriptorProto().Name = proto.String(imp)
+ revisedProtos[imp] = f
+ rewrittenPaths[best] = imp
+ delete(candidatesAvailable, best)
+
+ // If other candidates are actually references to the same file, remove them.
+ for c := range candidates {
+ if _, ok := candidatesAvailable[c]; !ok {
+ // already used this candidate and re-written its filename accordingly
+ continue
+ }
+ possibleDup := protos[c]
+ prevName := possibleDup.FileDescriptorProto().Name
+ possibleDup.FileDescriptorProto().Name = proto.String(imp)
+ if !proto.Equal(f.FileDescriptorProto(), protos[c].FileDescriptorProto()) {
+ // not equal: restore name and look at next one
+ possibleDup.FileDescriptorProto().Name = prevName
+ continue
+ }
+ // This file used a different name but was actually the same file. So
+ // we prune it from the set.
+ rewrittenPaths[c] = imp
+ delete(candidatesAvailable, c)
+ if len(c) > len(imp) {
+ prefix := c[:len(c)-len(imp)]
+ protoPaths[prefix] = struct{}{}
+ }
+ }
+ }
+ }
+
+ if len(candidatesAvailable) == 0 {
+ return revisedProtos, rewrittenPaths
+ }
+
+ if len(protoPaths) == 0 {
+ for c := range candidatesAvailable {
+ revisedProtos[c] = protos[c]
+ }
+ return revisedProtos, rewrittenPaths
+ }
+
+ // Any remaining candidates are entry-points (not imported by others), so
+ // the best bet to "fixing" their file name is to see if they're in one of
+ // the proto paths we found, and if so strip that prefix.
+ protoPathStrs := make([]string, len(protoPaths))
+ i := 0
+ for p := range protoPaths {
+ protoPathStrs[i] = p
+ i++
+ }
+ sort.Strings(protoPathStrs)
+ // we look at paths in reverse order, so we'll use a longer proto path if
+ // there is more than one match
+ for c := range candidatesAvailable {
+ var imp string
+ for i := len(protoPathStrs) - 1; i >= 0; i-- {
+ p := protoPathStrs[i]
+ if strings.HasPrefix(c, p) {
+ imp = c[len(p):]
+ break
+ }
+ }
+ if imp != "" {
+ f := protos[c]
+ f.FileDescriptorProto().Name = proto.String(imp)
+ f.FileNode()
+ revisedProtos[imp] = f
+ rewrittenPaths[c] = imp
+ } else {
+ revisedProtos[c] = protos[c]
+ }
+ }
+
+ return revisedProtos, rewrittenPaths
+}
+
+func removeDynamicExtensions(fd protoreflect.FileDescriptor, alreadySeen map[string]struct{}) {
+ if _, ok := alreadySeen[fd.Path()]; ok {
+ // already processed
+ return
+ }
+ alreadySeen[fd.Path()] = struct{}{}
+ res, ok := fd.(linker.Result)
+ if ok {
+ removeDynamicExtensionsFromProto(res.FileDescriptorProto())
+ }
+ // also remove extensions from dependencies
+ for i, length := 0, fd.Imports().Len(); i < length; i++ {
+ removeDynamicExtensions(fd.Imports().Get(i).FileDescriptor, alreadySeen)
+ }
+}
+
+func removeDynamicExtensionsFromProto(fd *descriptorpb.FileDescriptorProto) {
+ // protocompile returns descriptors with dynamic extension fields for custom options.
+ // But protoparse only used known custom options and everything else defined in the
+ // sources would be stored as unrecognized fields. So to bridge the difference in
+ // behavior, we need to remove custom options from the given file and add them back
+ // via serializing-then-de-serializing them back into the options messages. That way,
+ // statically known options will be properly typed and others will be unrecognized.
+ //
+ // This is best effort. So if an error occurs, we'll still return a result, but it
+ // may include a dynamic extension.
+ fd.Options = removeDynamicExtensionsFromOptions(fd.Options)
+ _ = walk.DescriptorProtos(fd, func(_ protoreflect.FullName, msg proto.Message) error {
+ switch msg := msg.(type) {
+ case *descriptorpb.DescriptorProto:
+ msg.Options = removeDynamicExtensionsFromOptions(msg.Options)
+ for _, extr := range msg.ExtensionRange {
+ extr.Options = removeDynamicExtensionsFromOptions(extr.Options)
+ }
+ case *descriptorpb.FieldDescriptorProto:
+ msg.Options = removeDynamicExtensionsFromOptions(msg.Options)
+ case *descriptorpb.OneofDescriptorProto:
+ msg.Options = removeDynamicExtensionsFromOptions(msg.Options)
+ case *descriptorpb.EnumDescriptorProto:
+ msg.Options = removeDynamicExtensionsFromOptions(msg.Options)
+ case *descriptorpb.EnumValueDescriptorProto:
+ msg.Options = removeDynamicExtensionsFromOptions(msg.Options)
+ case *descriptorpb.ServiceDescriptorProto:
+ msg.Options = removeDynamicExtensionsFromOptions(msg.Options)
+ case *descriptorpb.MethodDescriptorProto:
+ msg.Options = removeDynamicExtensionsFromOptions(msg.Options)
+ }
+ return nil
+ })
+}
+
+type ptrMsg[T any] interface {
+ *T
+ proto.Message
+}
+
+type fieldValue struct {
+ fd protoreflect.FieldDescriptor
+ val protoreflect.Value
+}
+
+func removeDynamicExtensionsFromOptions[O ptrMsg[T], T any](opts O) O {
+ if opts == nil {
+ return nil
+ }
+ var dynamicExtensions []fieldValue
+ opts.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ if fd.IsExtension() {
+ dynamicExtensions = append(dynamicExtensions, fieldValue{fd: fd, val: val})
+ }
+ return true
+ })
+
+ // serialize only these custom options
+ optsWithOnlyDyn := opts.ProtoReflect().Type().New()
+ for _, fv := range dynamicExtensions {
+ optsWithOnlyDyn.Set(fv.fd, fv.val)
+ }
+ data, err := proto.MarshalOptions{AllowPartial: true}.Marshal(optsWithOnlyDyn.Interface())
+ if err != nil {
+ // oh, well... can't fix this one
+ return opts
+ }
+
+ // and then replace values by clearing these custom options and deserializing
+ optsClone := proto.Clone(opts).ProtoReflect()
+ for _, fv := range dynamicExtensions {
+ optsClone.Clear(fv.fd)
+ }
+ err = proto.UnmarshalOptions{AllowPartial: true, Merge: true}.Unmarshal(data, optsClone.Interface())
+ if err != nil {
+ // bummer, can't fix this one
+ return opts
+ }
+
+ return optsClone.Interface().(O)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go
new file mode 100644
index 0000000..3ae1415
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/resolve_files.go
@@ -0,0 +1,175 @@
+package protoparse
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var errNoImportPathsForAbsoluteFilePath = errors.New("must specify at least one import path if any absolute file paths are given")
+
+// ResolveFilenames tries to resolve fileNames into paths that are relative to
+// directories in the given importPaths. The returned slice has the results in
+// the same order as they are supplied in fileNames.
+//
+// The resulting names should be suitable for passing to Parser.ParseFiles.
+//
+// If no import paths are given and any file name is absolute, this returns an
+// error. If no import paths are given and all file names are relative, this
+// returns the original file names. If a file name is already relative to one
+// of the given import paths, it will be unchanged in the returned slice. If a
+// file name given is relative to the current working directory, it will be made
+// relative to one of the given import paths; but if it cannot be made relative
+// (due to no matching import path), an error will be returned.
+func ResolveFilenames(importPaths []string, fileNames ...string) ([]string, error) {
+ if len(importPaths) == 0 {
+ if containsAbsFilePath(fileNames) {
+ // We have to do this as otherwise parseProtoFiles can result in duplicate symbols.
+ // For example, assume we import "foo/bar/bar.proto" in a file "/home/alice/dev/foo/bar/baz.proto"
+ // as we call ParseFiles("/home/alice/dev/foo/bar/bar.proto","/home/alice/dev/foo/bar/baz.proto")
+ // with "/home/alice/dev" as our current directory. Due to the recursive nature of parseProtoFiles,
+ // it will discover the import "foo/bar/bar.proto" in the input file, and call parse on this,
+ // adding "foo/bar/bar.proto" to the parsed results, as well as "/home/alice/dev/foo/bar/bar.proto"
+ // from the input file list. This will result in a
+ // 'duplicate symbol SYMBOL: already defined as field in "/home/alice/dev/foo/bar/bar.proto'
+ // error being returned from ParseFiles.
+ return nil, errNoImportPathsForAbsoluteFilePath
+ }
+ return fileNames, nil
+ }
+ absImportPaths, err := absoluteFilePaths(importPaths)
+ if err != nil {
+ return nil, err
+ }
+ resolvedFileNames := make([]string, 0, len(fileNames))
+ for _, fileName := range fileNames {
+ resolvedFileName, err := resolveFilename(absImportPaths, fileName)
+ if err != nil {
+ return nil, err
+ }
+ // On Windows, the resolved paths will use "\", but proto imports
+ // require the use of "/". So fix up here.
+ if filepath.Separator != '/' {
+ resolvedFileName = strings.Replace(resolvedFileName, string(filepath.Separator), "/", -1)
+ }
+ resolvedFileNames = append(resolvedFileNames, resolvedFileName)
+ }
+ return resolvedFileNames, nil
+}
+
+func containsAbsFilePath(filePaths []string) bool {
+ for _, filePath := range filePaths {
+ if filepath.IsAbs(filePath) {
+ return true
+ }
+ }
+ return false
+}
+
+func absoluteFilePaths(filePaths []string) ([]string, error) {
+ absFilePaths := make([]string, 0, len(filePaths))
+ for _, filePath := range filePaths {
+ absFilePath, err := canonicalize(filePath)
+ if err != nil {
+ return nil, err
+ }
+ absFilePaths = append(absFilePaths, absFilePath)
+ }
+ return absFilePaths, nil
+}
+
+func canonicalize(filePath string) (string, error) {
+ absPath, err := filepath.Abs(filePath)
+ if err != nil {
+ return "", err
+ }
+ // this is kind of gross, but it lets us construct a resolved path even if some
+ // path elements do not exist (a single call to filepath.EvalSymlinks would just
+ // return an error, ENOENT, in that case).
+ head := absPath
+ tail := ""
+ for {
+ noLinks, err := filepath.EvalSymlinks(head)
+ if err == nil {
+ if tail != "" {
+ return filepath.Join(noLinks, tail), nil
+ }
+ return noLinks, nil
+ }
+
+ if tail == "" {
+ tail = filepath.Base(head)
+ } else {
+ tail = filepath.Join(filepath.Base(head), tail)
+ }
+ head = filepath.Dir(head)
+ if head == "." {
+ // ran out of path elements to try to resolve
+ return absPath, nil
+ }
+ }
+}
+
+const dotPrefix = "." + string(filepath.Separator)
+const dotDotPrefix = ".." + string(filepath.Separator)
+
+func resolveFilename(absImportPaths []string, fileName string) (string, error) {
+ if filepath.IsAbs(fileName) {
+ return resolveAbsFilename(absImportPaths, fileName)
+ }
+
+ if !strings.HasPrefix(fileName, dotPrefix) && !strings.HasPrefix(fileName, dotDotPrefix) {
+ // Use of . and .. are assumed to be relative to current working
+ // directory. So if those aren't present, check to see if the file is
+ // relative to an import path.
+ for _, absImportPath := range absImportPaths {
+ absFileName := filepath.Join(absImportPath, fileName)
+ _, err := os.Stat(absFileName)
+ if err != nil {
+ continue
+ }
+ // found it! it was relative to this import path
+ return fileName, nil
+ }
+ }
+
+ // must be relative to current working dir
+ return resolveAbsFilename(absImportPaths, fileName)
+}
+
+func resolveAbsFilename(absImportPaths []string, fileName string) (string, error) {
+ absFileName, err := canonicalize(fileName)
+ if err != nil {
+ return "", err
+ }
+ for _, absImportPath := range absImportPaths {
+ if isDescendant(absImportPath, absFileName) {
+ resolvedPath, err := filepath.Rel(absImportPath, absFileName)
+ if err != nil {
+ return "", err
+ }
+ return resolvedPath, nil
+ }
+ }
+ return "", fmt.Errorf("%s does not reside in any import path", fileName)
+}
+
+// isDescendant returns true if file is a descendant of dir. Both dir and file must
+// be cleaned, absolute paths.
+func isDescendant(dir, file string) bool {
+ dir = filepath.Clean(dir)
+ cur := file
+ for {
+ d := filepath.Dir(cur)
+ if d == dir {
+ return true
+ }
+ if d == "." || d == cur {
+ // we've run out of path elements
+ return false
+ }
+ cur = d
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt
new file mode 100644
index 0000000..c9bc50b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt
@@ -0,0 +1,6401 @@
+---- desc_test_comments.proto ----
+
+
+:
+desc_test_comments.proto:8:1
+desc_test_comments.proto:156:2
+
+
+ > syntax:
+desc_test_comments.proto:8:1
+desc_test_comments.proto:8:19
+ Leading detached comment [0]:
+ This is the first detached comment for the syntax.
+
+ Leading detached comment [1]:
+
+ This is a second detached comment.
+
+ Leading detached comment [2]:
+ This is a third.
+
+ Leading comments:
+ Syntax comment...
+
+ Trailing comments:
+ Syntax trailer.
+
+
+
+ > package:
+desc_test_comments.proto:12:1
+desc_test_comments.proto:12:17
+ Leading comments:
+ And now the package declaration
+
+
+
+ > options:
+desc_test_comments.proto:15:1
+desc_test_comments.proto:15:75
+
+
+ > options > go_package:
+desc_test_comments.proto:15:1
+desc_test_comments.proto:15:75
+ Leading comments:
+ option comments FTW!!!
+
+
+
+ > dependency[0]:
+desc_test_comments.proto:17:1
+desc_test_comments.proto:17:45
+
+
+ > public_dependency[0]:
+desc_test_comments.proto:17:8
+desc_test_comments.proto:17:14
+
+
+ > dependency[1]:
+desc_test_comments.proto:18:1
+desc_test_comments.proto:18:34
+
+
+ > message_type[0]:
+desc_test_comments.proto:25:1
+desc_test_comments.proto:113:2
+ Leading detached comment [0]:
+ Multiple white space lines (like above) cannot
+ be preserved...
+
+ Leading comments:
+ We need a request for our RPC service below.
+
+
+
+ > message_type[0] > name:
+desc_test_comments.proto:25:68
+desc_test_comments.proto:25:75
+ Leading comments:
+ request with a capital R
+ Trailing comments:
+ trailer
+
+
+
+ > message_type[0] > options:
+desc_test_comments.proto:26:9
+desc_test_comments.proto:26:34
+
+
+ > message_type[0] > options > deprecated:
+desc_test_comments.proto:26:9
+desc_test_comments.proto:26:34
+ Trailing comments:
+ deprecated!
+
+
+
+ > message_type[0] > field[0]:
+desc_test_comments.proto:29:9
+desc_test_comments.proto:32:132
+ Leading comments:
+ A field comment
+
+ Trailing comments:
+ field trailer #1...
+
+
+
+ > message_type[0] > field[0] > label:
+desc_test_comments.proto:29:9
+desc_test_comments.proto:29:17
+
+
+ > message_type[0] > field[0] > type:
+desc_test_comments.proto:29:18
+desc_test_comments.proto:29:23
+
+
+ > message_type[0] > field[0] > name:
+desc_test_comments.proto:29:24
+desc_test_comments.proto:29:27
+
+
+ > message_type[0] > field[0] > number:
+desc_test_comments.proto:29:70
+desc_test_comments.proto:29:71
+ Leading comments:
+ tag numero uno
+ Trailing comments:
+ tag trailer
+that spans multiple lines...
+more than two.
+
+
+ > message_type[0] > field[0] > options:
+desc_test_comments.proto:32:11
+desc_test_comments.proto:32:131
+
+
+ > message_type[0] > field[0] > options > packed:
+desc_test_comments.proto:32:12
+desc_test_comments.proto:32:23
+ Trailing comments:
+ packed!
+
+
+ > message_type[0] > field[0] > json_name:
+desc_test_comments.proto:32:39
+desc_test_comments.proto:32:56
+ Trailing comments:
+ custom JSON!
+
+
+ > message_type[0] > field[0] > options > (testprotos.ffubar)[0]:
+desc_test_comments.proto:32:77
+desc_test_comments.proto:32:102
+
+
+ > message_type[0] > field[0] > options > (testprotos.ffubarb):
+desc_test_comments.proto:32:104
+desc_test_comments.proto:32:130
+
+
+ > message_type[0] > options:
+desc_test_comments.proto:35:27
+desc_test_comments.proto:35:61
+
+
+ > message_type[0] > options > (testprotos.mfubar):
+desc_test_comments.proto:35:27
+desc_test_comments.proto:35:61
+ Leading comments:
+ lead mfubar
+ Trailing comments:
+ trailing mfubar
+
+
+
+ > message_type[0] > field[1]:
+desc_test_comments.proto:42:29
+desc_test_comments.proto:43:77
+ Leading detached comment [0]:
+ some detached comments
+
+ Leading detached comment [1]:
+ some detached comments with unicode 这个是值
+
+ Leading detached comment [2]:
+ Another field comment
+
+ Leading comments:
+ label comment
+
+
+ > message_type[0] > field[1] > label:
+desc_test_comments.proto:42:29
+desc_test_comments.proto:42:37
+
+
+ > message_type[0] > field[1] > type:
+desc_test_comments.proto:42:57
+desc_test_comments.proto:42:63
+ Leading detached comment [0]:
+ type comment
+
+
+ > message_type[0] > field[1] > name:
+desc_test_comments.proto:42:83
+desc_test_comments.proto:42:87
+ Leading detached comment [0]:
+ name comment
+
+
+ > message_type[0] > field[1] > number:
+desc_test_comments.proto:42:90
+desc_test_comments.proto:42:91
+
+
+ > message_type[0] > field[1] > options:
+desc_test_comments.proto:43:17
+desc_test_comments.proto:43:76
+
+
+ > message_type[0] > field[1] > default_value:
+desc_test_comments.proto:43:37
+desc_test_comments.proto:43:54
+ Leading detached comment [0]:
+ default lead
+ Trailing comments:
+ default trail
+
+
+ > message_type[0] > extension_range:
+desc_test_comments.proto:46:9
+desc_test_comments.proto:46:31
+ Leading comments:
+ extension range comments are (sadly) not preserved
+
+
+
+ > message_type[0] > extension_range[0]:
+desc_test_comments.proto:46:20
+desc_test_comments.proto:46:30
+
+
+ > message_type[0] > extension_range[0] > start:
+desc_test_comments.proto:46:20
+desc_test_comments.proto:46:23
+
+
+ > message_type[0] > extension_range[0] > end:
+desc_test_comments.proto:46:27
+desc_test_comments.proto:46:30
+
+
+ > message_type[0] > extension_range:
+desc_test_comments.proto:47:9
+desc_test_comments.proto:47:109
+
+
+ > message_type[0] > extension_range[1]:
+desc_test_comments.proto:47:20
+desc_test_comments.proto:47:30
+
+
+ > message_type[0] > extension_range[1] > start:
+desc_test_comments.proto:47:20
+desc_test_comments.proto:47:23
+
+
+ > message_type[0] > extension_range[1] > end:
+desc_test_comments.proto:47:27
+desc_test_comments.proto:47:30
+
+
+ > message_type[0] > extension_range[1] > options:
+desc_test_comments.proto:47:31
+desc_test_comments.proto:47:108
+
+
+ > message_type[0] > extension_range[1] > options > (testprotos.exfubarb):
+desc_test_comments.proto:47:32
+desc_test_comments.proto:47:74
+
+
+ > message_type[0] > extension_range[1] > options > (testprotos.exfubar)[0]:
+desc_test_comments.proto:47:76
+desc_test_comments.proto:47:107
+
+
+ > message_type[0] > reserved_range:
+desc_test_comments.proto:51:48
+desc_test_comments.proto:51:77
+ Leading detached comment [0]:
+ another detached comment
+
+ Leading comments:
+ same for reserved range comments
+
+
+ > message_type[0] > reserved_range[0]:
+desc_test_comments.proto:51:57
+desc_test_comments.proto:51:65
+
+
+ > message_type[0] > reserved_range[0] > start:
+desc_test_comments.proto:51:57
+desc_test_comments.proto:51:59
+
+
+ > message_type[0] > reserved_range[0] > end:
+desc_test_comments.proto:51:63
+desc_test_comments.proto:51:65
+
+
+ > message_type[0] > reserved_range[1]:
+desc_test_comments.proto:51:67
+desc_test_comments.proto:51:75
+
+
+ > message_type[0] > reserved_range[1] > start:
+desc_test_comments.proto:51:67
+desc_test_comments.proto:51:69
+
+
+ > message_type[0] > reserved_range[1] > end:
+desc_test_comments.proto:51:73
+desc_test_comments.proto:51:75
+
+
+ > message_type[0] > reserved_name:
+desc_test_comments.proto:52:9
+desc_test_comments.proto:52:38
+ Trailing comments:
+ reserved trailers
+
+
+ > message_type[0] > reserved_name[0]:
+desc_test_comments.proto:52:18
+desc_test_comments.proto:52:23
+
+
+ > message_type[0] > reserved_name[1]:
+desc_test_comments.proto:52:25
+desc_test_comments.proto:52:30
+
+
+ > message_type[0] > reserved_name[2]:
+desc_test_comments.proto:52:32
+desc_test_comments.proto:52:37
+
+
+ > message_type[0] > field[2]:
+desc_test_comments.proto:55:9
+desc_test_comments.proto:69:10
+
+
+ > message_type[0] > field[2] > label:
+desc_test_comments.proto:55:9
+desc_test_comments.proto:55:17
+
+
+ > message_type[0] > field[2] > type:
+desc_test_comments.proto:55:18
+desc_test_comments.proto:55:23
+
+
+ > message_type[0] > field[2] > name:
+desc_test_comments.proto:55:41
+desc_test_comments.proto:55:47
+
+
+ > message_type[0] > field[2] > number:
+desc_test_comments.proto:55:50
+desc_test_comments.proto:55:51
+
+
+ > message_type[0] > nested_type[0]:
+desc_test_comments.proto:55:9
+desc_test_comments.proto:69:10
+ Leading comments:
+ Group comment with emoji 😀 😍 👻 ❤ 💯 💥 🐶 🦂 🥑 🍻 🌍 🚕 🪐
+
+ Trailing comments:
+ trailer for Extras
+
+
+
+ > message_type[0] > nested_type[0] > name:
+desc_test_comments.proto:55:41
+desc_test_comments.proto:55:47
+ Leading detached comment [0]:
+ group name
+
+
+ > message_type[0] > field[2] > type_name:
+desc_test_comments.proto:55:41
+desc_test_comments.proto:55:47
+
+
+ > message_type[0] > nested_type[0] > options:
+desc_test_comments.proto:59:17
+desc_test_comments.proto:59:52
+
+
+ > message_type[0] > nested_type[0] > options > (testprotos.mfubar):
+desc_test_comments.proto:59:17
+desc_test_comments.proto:59:52
+ Leading comments:
+ this is a custom option
+
+
+
+ > message_type[0] > nested_type[0] > field[0]:
+desc_test_comments.proto:61:17
+desc_test_comments.proto:61:41
+
+
+ > message_type[0] > nested_type[0] > field[0] > label:
+desc_test_comments.proto:61:17
+desc_test_comments.proto:61:25
+
+
+ > message_type[0] > nested_type[0] > field[0] > type:
+desc_test_comments.proto:61:26
+desc_test_comments.proto:61:32
+
+
+ > message_type[0] > nested_type[0] > field[0] > name:
+desc_test_comments.proto:61:33
+desc_test_comments.proto:61:36
+
+
+ > message_type[0] > nested_type[0] > field[0] > number:
+desc_test_comments.proto:61:39
+desc_test_comments.proto:61:40
+
+
+ > message_type[0] > nested_type[0] > field[1]:
+desc_test_comments.proto:62:17
+desc_test_comments.proto:62:40
+
+
+ > message_type[0] > nested_type[0] > field[1] > label:
+desc_test_comments.proto:62:17
+desc_test_comments.proto:62:25
+
+
+ > message_type[0] > nested_type[0] > field[1] > type:
+desc_test_comments.proto:62:26
+desc_test_comments.proto:62:31
+
+
+ > message_type[0] > nested_type[0] > field[1] > name:
+desc_test_comments.proto:62:32
+desc_test_comments.proto:62:35
+
+
+ > message_type[0] > nested_type[0] > field[1] > number:
+desc_test_comments.proto:62:38
+desc_test_comments.proto:62:39
+
+
+ > message_type[0] > nested_type[0] > options:
+desc_test_comments.proto:64:17
+desc_test_comments.proto:64:64
+
+
+ > message_type[0] > nested_type[0] > options > no_standard_descriptor_accessor:
+desc_test_comments.proto:64:17
+desc_test_comments.proto:64:64
+
+
+ > message_type[0] > nested_type[0] > field[2]:
+desc_test_comments.proto:67:17
+desc_test_comments.proto:67:41
+ Leading comments:
+ Leading comment...
+
+ Trailing comments:
+ Trailing comment...
+
+
+
+ > message_type[0] > nested_type[0] > field[2] > label:
+desc_test_comments.proto:67:17
+desc_test_comments.proto:67:25
+
+
+ > message_type[0] > nested_type[0] > field[2] > type:
+desc_test_comments.proto:67:26
+desc_test_comments.proto:67:32
+
+
+ > message_type[0] > nested_type[0] > field[2] > name:
+desc_test_comments.proto:67:33
+desc_test_comments.proto:67:36
+
+
+ > message_type[0] > nested_type[0] > field[2] > number:
+desc_test_comments.proto:67:39
+desc_test_comments.proto:67:40
+
+
+ > message_type[0] > enum_type[0]:
+desc_test_comments.proto:71:9
+desc_test_comments.proto:93:10
+ Trailing comments:
+ trailer for enum
+
+
+
+ > message_type[0] > enum_type[0] > name:
+desc_test_comments.proto:71:14
+desc_test_comments.proto:71:29
+ Trailing comments:
+ "super"!
+
+
+
+ > message_type[0] > enum_type[0] > options:
+desc_test_comments.proto:75:17
+desc_test_comments.proto:75:43
+
+
+ > message_type[0] > enum_type[0] > options > allow_alias:
+desc_test_comments.proto:75:17
+desc_test_comments.proto:75:43
+ Leading comments:
+ allow_alias comments!
+
+
+
+ > message_type[0] > enum_type[0] > value[0]:
+desc_test_comments.proto:77:17
+desc_test_comments.proto:77:86
+
+
+ > message_type[0] > enum_type[0] > value[0] > name:
+desc_test_comments.proto:77:17
+desc_test_comments.proto:77:22
+
+
+ > message_type[0] > enum_type[0] > value[0] > number:
+desc_test_comments.proto:77:25
+desc_test_comments.proto:77:26
+
+
+ > message_type[0] > enum_type[0] > value[0] > options:
+desc_test_comments.proto:77:27
+desc_test_comments.proto:77:85
+
+
+ > message_type[0] > enum_type[0] > value[0] > options > (testprotos.evfubars):
+desc_test_comments.proto:77:28
+desc_test_comments.proto:77:56
+
+
+ > message_type[0] > enum_type[0] > value[0] > options > (testprotos.evfubar):
+desc_test_comments.proto:77:58
+desc_test_comments.proto:77:84
+
+
+ > message_type[0] > enum_type[0] > value[1]:
+desc_test_comments.proto:78:17
+desc_test_comments.proto:78:100
+
+
+ > message_type[0] > enum_type[0] > value[1] > name:
+desc_test_comments.proto:78:17
+desc_test_comments.proto:78:22
+
+
+ > message_type[0] > enum_type[0] > value[1] > number:
+desc_test_comments.proto:78:25
+desc_test_comments.proto:78:26
+
+
+ > message_type[0] > enum_type[0] > value[1] > options:
+desc_test_comments.proto:78:27
+desc_test_comments.proto:78:99
+
+
+ > message_type[0] > enum_type[0] > value[1] > options > (testprotos.evfubaruf):
+desc_test_comments.proto:78:29
+desc_test_comments.proto:78:57
+
+
+ > message_type[0] > enum_type[0] > value[1] > options > (testprotos.evfubaru):
+desc_test_comments.proto:78:73
+desc_test_comments.proto:78:98
+ Leading detached comment [0]:
+ swoosh!
+
+
+ > message_type[0] > enum_type[0] > value[2]:
+desc_test_comments.proto:79:17
+desc_test_comments.proto:79:27
+
+
+ > message_type[0] > enum_type[0] > value[2] > name:
+desc_test_comments.proto:79:17
+desc_test_comments.proto:79:22
+
+
+ > message_type[0] > enum_type[0] > value[2] > number:
+desc_test_comments.proto:79:25
+desc_test_comments.proto:79:26
+
+
+ > message_type[0] > enum_type[0] > value[3]:
+desc_test_comments.proto:80:17
+desc_test_comments.proto:80:28
+
+
+ > message_type[0] > enum_type[0] > value[3] > name:
+desc_test_comments.proto:80:17
+desc_test_comments.proto:80:23
+
+
+ > message_type[0] > enum_type[0] > value[3] > number:
+desc_test_comments.proto:80:26
+desc_test_comments.proto:80:27
+
+
+ > message_type[0] > enum_type[0] > options:
+desc_test_comments.proto:82:17
+desc_test_comments.proto:82:52
+
+
+ > message_type[0] > enum_type[0] > options > (testprotos.efubars):
+desc_test_comments.proto:82:17
+desc_test_comments.proto:82:52
+
+
+ > message_type[0] > enum_type[0] > value[4]:
+desc_test_comments.proto:84:17
+desc_test_comments.proto:84:27
+
+
+ > message_type[0] > enum_type[0] > value[4] > name:
+desc_test_comments.proto:84:17
+desc_test_comments.proto:84:22
+
+
+ > message_type[0] > enum_type[0] > value[4] > number:
+desc_test_comments.proto:84:25
+desc_test_comments.proto:84:26
+
+
+ > message_type[0] > enum_type[0] > value[5]:
+desc_test_comments.proto:85:17
+desc_test_comments.proto:85:29
+
+
+ > message_type[0] > enum_type[0] > value[5] > name:
+desc_test_comments.proto:85:17
+desc_test_comments.proto:85:24
+
+
+ > message_type[0] > enum_type[0] > value[5] > number:
+desc_test_comments.proto:85:27
+desc_test_comments.proto:85:28
+
+
+ > message_type[0] > enum_type[0] > value[6]:
+desc_test_comments.proto:86:17
+desc_test_comments.proto:86:60
+
+
+ > message_type[0] > enum_type[0] > value[6] > name:
+desc_test_comments.proto:86:17
+desc_test_comments.proto:86:24
+
+
+ > message_type[0] > enum_type[0] > value[6] > number:
+desc_test_comments.proto:86:27
+desc_test_comments.proto:86:28
+
+
+ > message_type[0] > enum_type[0] > value[6] > options:
+desc_test_comments.proto:86:29
+desc_test_comments.proto:86:59
+
+
+ > message_type[0] > enum_type[0] > value[6] > options > (testprotos.evfubarsf):
+desc_test_comments.proto:86:30
+desc_test_comments.proto:86:58
+
+
+ > message_type[0] > enum_type[0] > value[7]:
+desc_test_comments.proto:87:17
+desc_test_comments.proto:87:28
+
+
+ > message_type[0] > enum_type[0] > value[7] > name:
+desc_test_comments.proto:87:17
+desc_test_comments.proto:87:23
+
+
+ > message_type[0] > enum_type[0] > value[7] > number:
+desc_test_comments.proto:87:26
+desc_test_comments.proto:87:27
+
+
+ > message_type[0] > enum_type[0] > value[8]:
+desc_test_comments.proto:88:17
+desc_test_comments.proto:88:31
+
+
+ > message_type[0] > enum_type[0] > value[8] > name:
+desc_test_comments.proto:88:17
+desc_test_comments.proto:88:26
+
+
+ > message_type[0] > enum_type[0] > value[8] > number:
+desc_test_comments.proto:88:29
+desc_test_comments.proto:88:30
+
+
+ > message_type[0] > enum_type[0] > value[9]:
+desc_test_comments.proto:89:17
+desc_test_comments.proto:89:27
+
+
+ > message_type[0] > enum_type[0] > value[9] > name:
+desc_test_comments.proto:89:17
+desc_test_comments.proto:89:22
+
+
+ > message_type[0] > enum_type[0] > value[9] > number:
+desc_test_comments.proto:89:25
+desc_test_comments.proto:89:26
+
+
+ > message_type[0] > enum_type[0] > value[10]:
+desc_test_comments.proto:90:17
+desc_test_comments.proto:90:31
+
+
+ > message_type[0] > enum_type[0] > value[10] > name:
+desc_test_comments.proto:90:17
+desc_test_comments.proto:90:23
+
+
+ > message_type[0] > enum_type[0] > value[10] > number:
+desc_test_comments.proto:90:26
+desc_test_comments.proto:90:30
+
+
+ > message_type[0] > enum_type[0] > options:
+desc_test_comments.proto:92:17
+desc_test_comments.proto:92:50
+
+
+ > message_type[0] > enum_type[0] > options > (testprotos.efubar):
+desc_test_comments.proto:92:17
+desc_test_comments.proto:92:50
+
+
+ > message_type[0] > oneof_decl[0]:
+desc_test_comments.proto:96:9
+desc_test_comments.proto:101:10
+ Leading comments:
+ can be this or that
+
+ Trailing comments:
+ trailer for oneof abc
+
+
+
+ > message_type[0] > oneof_decl[0] > name:
+desc_test_comments.proto:96:15
+desc_test_comments.proto:96:18
+
+
+ > message_type[0] > field[3]:
+desc_test_comments.proto:99:17
+desc_test_comments.proto:99:33
+
+
+ > message_type[0] > field[3] > type:
+desc_test_comments.proto:99:17
+desc_test_comments.proto:99:23
+
+
+ > message_type[0] > field[3] > name:
+desc_test_comments.proto:99:24
+desc_test_comments.proto:99:28
+
+
+ > message_type[0] > field[3] > number:
+desc_test_comments.proto:99:31
+desc_test_comments.proto:99:32
+
+
+ > message_type[0] > field[4]:
+desc_test_comments.proto:100:17
+desc_test_comments.proto:100:32
+
+
+ > message_type[0] > field[4] > type:
+desc_test_comments.proto:100:17
+desc_test_comments.proto:100:22
+
+
+ > message_type[0] > field[4] > name:
+desc_test_comments.proto:100:23
+desc_test_comments.proto:100:27
+
+
+ > message_type[0] > field[4] > number:
+desc_test_comments.proto:100:30
+desc_test_comments.proto:100:31
+
+
+ > message_type[0] > oneof_decl[1]:
+desc_test_comments.proto:103:9
+desc_test_comments.proto:109:10
+ Leading comments:
+ can be these or those
+
+
+
+ > message_type[0] > oneof_decl[1] > name:
+desc_test_comments.proto:103:15
+desc_test_comments.proto:103:18
+
+
+ > message_type[0] > oneof_decl[1] > options:
+desc_test_comments.proto:105:17
+desc_test_comments.proto:105:89
+
+
+ > message_type[0] > oneof_decl[1] > options > (testprotos.oofubar)[0]:
+desc_test_comments.proto:105:17
+desc_test_comments.proto:105:89
+ Leading comments:
+ whoops?
+
+
+
+ > message_type[0] > field[5]:
+desc_test_comments.proto:107:17
+desc_test_comments.proto:107:34
+
+
+ > message_type[0] > field[5] > type:
+desc_test_comments.proto:107:17
+desc_test_comments.proto:107:23
+
+
+ > message_type[0] > field[5] > name:
+desc_test_comments.proto:107:24
+desc_test_comments.proto:107:29
+
+
+ > message_type[0] > field[5] > number:
+desc_test_comments.proto:107:32
+desc_test_comments.proto:107:33
+
+
+ > message_type[0] > field[6]:
+desc_test_comments.proto:108:17
+desc_test_comments.proto:108:33
+
+
+ > message_type[0] > field[6] > type:
+desc_test_comments.proto:108:17
+desc_test_comments.proto:108:22
+
+
+ > message_type[0] > field[6] > name:
+desc_test_comments.proto:108:23
+desc_test_comments.proto:108:28
+
+
+ > message_type[0] > field[6] > number:
+desc_test_comments.proto:108:31
+desc_test_comments.proto:108:32
+
+
+ > message_type[0] > field[7]:
+desc_test_comments.proto:112:9
+desc_test_comments.proto:112:40
+ Leading comments:
+ map field
+
+
+
+ > message_type[0] > field[7] > type_name:
+desc_test_comments.proto:112:9
+desc_test_comments.proto:112:28
+
+
+ > message_type[0] > field[7] > name:
+desc_test_comments.proto:112:29
+desc_test_comments.proto:112:35
+
+
+ > message_type[0] > field[7] > number:
+desc_test_comments.proto:112:38
+desc_test_comments.proto:112:39
+
+
+ > extension:
+desc_test_comments.proto:117:1
+desc_test_comments.proto:128:2
+ Leading detached comment [0]:
+ And next we'll need some extensions...
+
+ Trailing comments:
+ trailer for extend block
+
+
+
+ > extension[0]:
+desc_test_comments.proto:125:9
+desc_test_comments.proto:125:37
+ Leading comments:
+ comment for guid1
+
+
+
+ > extension[0] > extendee:
+desc_test_comments.proto:119:1
+desc_test_comments.proto:119:8
+ Leading comments:
+ extendee comment
+
+
+
+ > extension[0] > label:
+desc_test_comments.proto:125:9
+desc_test_comments.proto:125:17
+
+
+ > extension[0] > type:
+desc_test_comments.proto:125:18
+desc_test_comments.proto:125:24
+
+
+ > extension[0] > name:
+desc_test_comments.proto:125:25
+desc_test_comments.proto:125:30
+
+
+ > extension[0] > number:
+desc_test_comments.proto:125:33
+desc_test_comments.proto:125:36
+
+
+ > extension[1]:
+desc_test_comments.proto:127:9
+desc_test_comments.proto:127:37
+ Leading comments:
+ ... and a comment for guid2
+
+
+
+ > extension[1] > extendee:
+desc_test_comments.proto:119:1
+desc_test_comments.proto:119:8
+
+
+ > extension[1] > label:
+desc_test_comments.proto:127:9
+desc_test_comments.proto:127:17
+
+
+ > extension[1] > type:
+desc_test_comments.proto:127:18
+desc_test_comments.proto:127:24
+
+
+ > extension[1] > name:
+desc_test_comments.proto:127:25
+desc_test_comments.proto:127:30
+
+
+ > extension[1] > number:
+desc_test_comments.proto:127:33
+desc_test_comments.proto:127:36
+
+
+ > message_type[1]:
+desc_test_comments.proto:131:1
+desc_test_comments.proto:131:115
+ Trailing comments:
+ trailer for AnEmptyMessage
+
+
+ > message_type[1] > name:
+desc_test_comments.proto:131:36
+desc_test_comments.proto:131:50
+ Leading detached comment [0]:
+ name leading comment
+
+
+ > service[0]:
+desc_test_comments.proto:134:1
+desc_test_comments.proto:156:2
+ Leading comments:
+ Service comment
+
+ Trailing comments:
+ service trailer
+ that spans multiple lines
+
+
+
+ > service[0] > name:
+desc_test_comments.proto:134:28
+desc_test_comments.proto:134:38
+ Leading detached comment [0]:
+ service name
+
+
+ > service[0] > options:
+desc_test_comments.proto:139:9
+desc_test_comments.proto:139:43
+
+
+ > service[0] > options > (testprotos.sfubar) > id:
+desc_test_comments.proto:139:9
+desc_test_comments.proto:139:43
+ Leading comments:
+ option that sets field
+
+
+
+ > service[0] > options:
+desc_test_comments.proto:141:9
+desc_test_comments.proto:141:47
+
+
+ > service[0] > options > (testprotos.sfubar) > name:
+desc_test_comments.proto:141:9
+desc_test_comments.proto:141:47
+ Leading comments:
+ another option that sets field
+
+
+
+ > service[0] > options:
+desc_test_comments.proto:142:9
+desc_test_comments.proto:142:35
+
+
+ > service[0] > options > deprecated:
+desc_test_comments.proto:142:9
+desc_test_comments.proto:142:35
+ Trailing comments:
+ DEPRECATED!
+
+
+
+ > service[0] > options:
+desc_test_comments.proto:144:9
+desc_test_comments.proto:144:45
+
+
+ > service[0] > options > (testprotos.sfubare):
+desc_test_comments.proto:144:9
+desc_test_comments.proto:144:45
+
+
+ > service[0] > method[0]:
+desc_test_comments.proto:147:9
+desc_test_comments.proto:148:84
+ Leading comments:
+ Method comment
+
+ Trailing comments:
+ compact method trailer
+
+
+
+ > service[0] > method[0] > name:
+desc_test_comments.proto:147:28
+desc_test_comments.proto:147:40
+ Leading detached comment [0]:
+ rpc name
+
+
+ > service[0] > method[0] > client_streaming:
+desc_test_comments.proto:147:73
+desc_test_comments.proto:147:79
+ Leading detached comment [0]:
+ comment B
+
+
+ > service[0] > method[0] > input_type:
+desc_test_comments.proto:147:96
+desc_test_comments.proto:147:103
+ Leading detached comment [0]:
+ comment C
+
+
+ > service[0] > method[0] > output_type:
+desc_test_comments.proto:148:57
+desc_test_comments.proto:148:64
+ Leading detached comment [0]:
+comment E
+
+
+ > service[0] > method[1]:
+desc_test_comments.proto:150:9
+desc_test_comments.proto:155:10
+ Trailing comments:
+ trailer for method
+
+
+
+ > service[0] > method[1] > name:
+desc_test_comments.proto:150:13
+desc_test_comments.proto:150:21
+
+
+ > service[0] > method[1] > input_type:
+desc_test_comments.proto:150:23
+desc_test_comments.proto:150:30
+
+
+ > service[0] > method[1] > output_type:
+desc_test_comments.proto:150:41
+desc_test_comments.proto:150:62
+
+
+ > service[0] > method[1] > options:
+desc_test_comments.proto:152:17
+desc_test_comments.proto:152:42
+
+
+ > service[0] > method[1] > options > deprecated:
+desc_test_comments.proto:152:17
+desc_test_comments.proto:152:42
+ Leading comments:
+ this RPC is deprecated!
+
+
+
+ > service[0] > method[1] > options:
+desc_test_comments.proto:153:17
+desc_test_comments.proto:153:53
+
+
+ > service[0] > method[1] > options > (testprotos.mtfubar)[0]:
+desc_test_comments.proto:153:17
+desc_test_comments.proto:153:53
+
+
+ > service[0] > method[1] > options:
+desc_test_comments.proto:154:17
+desc_test_comments.proto:154:56
+
+
+ > service[0] > method[1] > options > (testprotos.mtfubard):
+desc_test_comments.proto:154:17
+desc_test_comments.proto:154:56
+---- desc_test_complex.proto ----
+
+
+:
+desc_test_complex.proto:1:1
+desc_test_complex.proto:298:2
+
+
+ > syntax:
+desc_test_complex.proto:1:1
+desc_test_complex.proto:1:19
+
+
+ > package:
+desc_test_complex.proto:3:1
+desc_test_complex.proto:3:17
+
+
+ > options:
+desc_test_complex.proto:5:1
+desc_test_complex.proto:5:73
+
+
+ > options > go_package:
+desc_test_complex.proto:5:1
+desc_test_complex.proto:5:73
+
+
+ > dependency[0]:
+desc_test_complex.proto:7:1
+desc_test_complex.proto:7:43
+
+
+ > message_type[0]:
+desc_test_complex.proto:9:1
+desc_test_complex.proto:14:2
+
+
+ > message_type[0] > name:
+desc_test_complex.proto:9:9
+desc_test_complex.proto:9:15
+
+
+ > message_type[0] > field[0]:
+desc_test_complex.proto:10:9
+desc_test_complex.proto:10:34
+
+
+ > message_type[0] > field[0] > label:
+desc_test_complex.proto:10:9
+desc_test_complex.proto:10:17
+
+
+ > message_type[0] > field[0] > type:
+desc_test_complex.proto:10:18
+desc_test_complex.proto:10:24
+
+
+ > message_type[0] > field[0] > name:
+desc_test_complex.proto:10:25
+desc_test_complex.proto:10:29
+
+
+ > message_type[0] > field[0] > number:
+desc_test_complex.proto:10:32
+desc_test_complex.proto:10:33
+
+
+ > message_type[0] > field[1]:
+desc_test_complex.proto:11:9
+desc_test_complex.proto:11:32
+
+
+ > message_type[0] > field[1] > label:
+desc_test_complex.proto:11:9
+desc_test_complex.proto:11:17
+
+
+ > message_type[0] > field[1] > type:
+desc_test_complex.proto:11:18
+desc_test_complex.proto:11:24
+
+
+ > message_type[0] > field[1] > name:
+desc_test_complex.proto:11:25
+desc_test_complex.proto:11:27
+
+
+ > message_type[0] > field[1] > number:
+desc_test_complex.proto:11:30
+desc_test_complex.proto:11:31
+
+
+ > message_type[0] > field[2]:
+desc_test_complex.proto:12:9
+desc_test_complex.proto:12:35
+ Trailing comments:
+ default JSON name will be capitalized
+
+
+
+ > message_type[0] > field[2] > label:
+desc_test_complex.proto:12:9
+desc_test_complex.proto:12:17
+
+
+ > message_type[0] > field[2] > type:
+desc_test_complex.proto:12:18
+desc_test_complex.proto:12:23
+
+
+ > message_type[0] > field[2] > name:
+desc_test_complex.proto:12:24
+desc_test_complex.proto:12:30
+
+
+ > message_type[0] > field[2] > number:
+desc_test_complex.proto:12:33
+desc_test_complex.proto:12:34
+
+
+ > message_type[0] > field[3]:
+desc_test_complex.proto:13:9
+desc_test_complex.proto:13:29
+ Trailing comments:
+ default JSON name will be empty(!)
+
+
+
+ > message_type[0] > field[3] > label:
+desc_test_complex.proto:13:9
+desc_test_complex.proto:13:17
+
+
+ > message_type[0] > field[3] > type:
+desc_test_complex.proto:13:18
+desc_test_complex.proto:13:22
+
+
+ > message_type[0] > field[3] > name:
+desc_test_complex.proto:13:23
+desc_test_complex.proto:13:24
+
+
+ > message_type[0] > field[3] > number:
+desc_test_complex.proto:13:27
+desc_test_complex.proto:13:28
+
+
+ > extension:
+desc_test_complex.proto:16:1
+desc_test_complex.proto:20:2
+
+
+ > extension[0]:
+desc_test_complex.proto:19:9
+desc_test_complex.proto:19:39
+
+
+ > extension[0] > extendee:
+desc_test_complex.proto:16:8
+desc_test_complex.proto:18:25
+
+
+ > extension[0] > label:
+desc_test_complex.proto:19:9
+desc_test_complex.proto:19:17
+
+
+ > extension[0] > type:
+desc_test_complex.proto:19:18
+desc_test_complex.proto:19:24
+
+
+ > extension[0] > name:
+desc_test_complex.proto:19:25
+desc_test_complex.proto:19:30
+
+
+ > extension[0] > number:
+desc_test_complex.proto:19:33
+desc_test_complex.proto:19:38
+
+
+ > message_type[1]:
+desc_test_complex.proto:22:1
+desc_test_complex.proto:61:2
+
+
+ > message_type[1] > name:
+desc_test_complex.proto:22:9
+desc_test_complex.proto:22:13
+
+
+ > message_type[1] > field[0]:
+desc_test_complex.proto:23:9
+desc_test_complex.proto:23:55
+
+
+ > message_type[1] > field[0] > label:
+desc_test_complex.proto:23:9
+desc_test_complex.proto:23:17
+
+
+ > message_type[1] > field[0] > type:
+desc_test_complex.proto:23:18
+desc_test_complex.proto:23:24
+
+
+ > message_type[1] > field[0] > name:
+desc_test_complex.proto:23:25
+desc_test_complex.proto:23:28
+
+
+ > message_type[1] > field[0] > number:
+desc_test_complex.proto:23:31
+desc_test_complex.proto:23:32
+
+
+ > message_type[1] > field[0] > options:
+desc_test_complex.proto:23:33
+desc_test_complex.proto:23:54
+
+
+ > message_type[1] > field[0] > json_name:
+desc_test_complex.proto:23:34
+desc_test_complex.proto:23:53
+
+
+ > message_type[1] > field[1]:
+desc_test_complex.proto:24:9
+desc_test_complex.proto:24:34
+
+
+ > message_type[1] > field[1] > label:
+desc_test_complex.proto:24:9
+desc_test_complex.proto:24:17
+
+
+ > message_type[1] > field[1] > type:
+desc_test_complex.proto:24:18
+desc_test_complex.proto:24:23
+
+
+ > message_type[1] > field[1] > name:
+desc_test_complex.proto:24:24
+desc_test_complex.proto:24:29
+
+
+ > message_type[1] > field[1] > number:
+desc_test_complex.proto:24:32
+desc_test_complex.proto:24:33
+
+
+ > message_type[1] > field[2]:
+desc_test_complex.proto:25:9
+desc_test_complex.proto:25:31
+
+
+ > message_type[1] > field[2] > label:
+desc_test_complex.proto:25:9
+desc_test_complex.proto:25:17
+
+
+ > message_type[1] > field[2] > type_name:
+desc_test_complex.proto:25:18
+desc_test_complex.proto:25:24
+
+
+ > message_type[1] > field[2] > name:
+desc_test_complex.proto:25:25
+desc_test_complex.proto:25:26
+
+
+ > message_type[1] > field[2] > number:
+desc_test_complex.proto:25:29
+desc_test_complex.proto:25:30
+
+
+ > message_type[1] > field[3]:
+desc_test_complex.proto:26:9
+desc_test_complex.proto:26:31
+
+
+ > message_type[1] > field[3] > label:
+desc_test_complex.proto:26:9
+desc_test_complex.proto:26:17
+
+
+ > message_type[1] > field[3] > type_name:
+desc_test_complex.proto:26:18
+desc_test_complex.proto:26:24
+
+
+ > message_type[1] > field[3] > name:
+desc_test_complex.proto:26:25
+desc_test_complex.proto:26:26
+
+
+ > message_type[1] > field[3] > number:
+desc_test_complex.proto:26:29
+desc_test_complex.proto:26:30
+
+
+ > message_type[1] > field[4]:
+desc_test_complex.proto:27:9
+desc_test_complex.proto:27:34
+
+
+ > message_type[1] > field[4] > type_name:
+desc_test_complex.proto:27:9
+desc_test_complex.proto:27:27
+
+
+ > message_type[1] > field[4] > name:
+desc_test_complex.proto:27:28
+desc_test_complex.proto:27:29
+
+
+ > message_type[1] > field[4] > number:
+desc_test_complex.proto:27:32
+desc_test_complex.proto:27:33
+
+
+ > message_type[1] > field[5]:
+desc_test_complex.proto:29:9
+desc_test_complex.proto:29:67
+
+
+ > message_type[1] > field[5] > label:
+desc_test_complex.proto:29:9
+desc_test_complex.proto:29:17
+
+
+ > message_type[1] > field[5] > type:
+desc_test_complex.proto:29:18
+desc_test_complex.proto:29:23
+
+
+ > message_type[1] > field[5] > name:
+desc_test_complex.proto:29:24
+desc_test_complex.proto:29:25
+
+
+ > message_type[1] > field[5] > number:
+desc_test_complex.proto:29:28
+desc_test_complex.proto:29:29
+
+
+ > message_type[1] > field[5] > options:
+desc_test_complex.proto:29:30
+desc_test_complex.proto:29:66
+
+
+ > message_type[1] > field[5] > default_value:
+desc_test_complex.proto:29:31
+desc_test_complex.proto:29:65
+
+
+ > message_type[1] > extension_range:
+desc_test_complex.proto:31:9
+desc_test_complex.proto:31:31
+
+
+ > message_type[1] > extension_range[0]:
+desc_test_complex.proto:31:20
+desc_test_complex.proto:31:30
+
+
+ > message_type[1] > extension_range[0] > start:
+desc_test_complex.proto:31:20
+desc_test_complex.proto:31:23
+
+
+ > message_type[1] > extension_range[0] > end:
+desc_test_complex.proto:31:27
+desc_test_complex.proto:31:30
+
+
+ > message_type[1] > extension_range:
+desc_test_complex.proto:33:9
+desc_test_complex.proto:33:91
+
+
+ > message_type[1] > extension_range[1]:
+desc_test_complex.proto:33:20
+desc_test_complex.proto:33:23
+
+
+ > message_type[1] > extension_range[1] > start:
+desc_test_complex.proto:33:20
+desc_test_complex.proto:33:23
+
+
+ > message_type[1] > extension_range[1] > end:
+desc_test_complex.proto:33:20
+desc_test_complex.proto:33:23
+
+
+ > message_type[1] > extension_range[2]:
+desc_test_complex.proto:33:25
+desc_test_complex.proto:33:35
+
+
+ > message_type[1] > extension_range[2] > start:
+desc_test_complex.proto:33:25
+desc_test_complex.proto:33:28
+
+
+ > message_type[1] > extension_range[2] > end:
+desc_test_complex.proto:33:32
+desc_test_complex.proto:33:35
+
+
+ > message_type[1] > extension_range[3]:
+desc_test_complex.proto:33:37
+desc_test_complex.proto:33:47
+
+
+ > message_type[1] > extension_range[3] > start:
+desc_test_complex.proto:33:37
+desc_test_complex.proto:33:40
+
+
+ > message_type[1] > extension_range[3] > end:
+desc_test_complex.proto:33:44
+desc_test_complex.proto:33:47
+
+
+ > message_type[1] > extension_range[4]:
+desc_test_complex.proto:33:49
+desc_test_complex.proto:33:61
+
+
+ > message_type[1] > extension_range[4] > start:
+desc_test_complex.proto:33:49
+desc_test_complex.proto:33:54
+
+
+ > message_type[1] > extension_range[4] > end:
+desc_test_complex.proto:33:58
+desc_test_complex.proto:33:61
+
+
+ > message_type[1] > extension_range[1] > options:
+desc_test_complex.proto:33:62
+desc_test_complex.proto:33:90
+
+
+ > message_type[1] > extension_range[1] > options > (foo.bar.label):
+desc_test_complex.proto:33:63
+desc_test_complex.proto:33:89
+
+
+ > message_type[1] > extension_range[2] > options:
+desc_test_complex.proto:33:62
+desc_test_complex.proto:33:90
+
+
+ > message_type[1] > extension_range[2] > options > (foo.bar.label):
+desc_test_complex.proto:33:63
+desc_test_complex.proto:33:89
+
+
+ > message_type[1] > extension_range[3] > options:
+desc_test_complex.proto:33:62
+desc_test_complex.proto:33:90
+
+
+ > message_type[1] > extension_range[3] > options > (foo.bar.label):
+desc_test_complex.proto:33:63
+desc_test_complex.proto:33:89
+
+
+ > message_type[1] > extension_range[4] > options:
+desc_test_complex.proto:33:62
+desc_test_complex.proto:33:90
+
+
+ > message_type[1] > extension_range[4] > options > (foo.bar.label):
+desc_test_complex.proto:33:63
+desc_test_complex.proto:33:89
+
+
+ > message_type[1] > nested_type[1]:
+desc_test_complex.proto:35:9
+desc_test_complex.proto:60:10
+
+
+ > message_type[1] > nested_type[1] > name:
+desc_test_complex.proto:35:17
+desc_test_complex.proto:35:23
+
+
+ > message_type[1] > nested_type[1] > extension:
+desc_test_complex.proto:36:17
+desc_test_complex.proto:38:18
+
+
+ > message_type[1] > nested_type[1] > extension[0]:
+desc_test_complex.proto:37:25
+desc_test_complex.proto:37:56
+
+
+ > message_type[1] > nested_type[1] > extension[0] > extendee:
+desc_test_complex.proto:36:24
+desc_test_complex.proto:36:54
+
+
+ > message_type[1] > nested_type[1] > extension[0] > label:
+desc_test_complex.proto:37:25
+desc_test_complex.proto:37:33
+
+
+ > message_type[1] > nested_type[1] > extension[0] > type:
+desc_test_complex.proto:37:34
+desc_test_complex.proto:37:39
+
+
+ > message_type[1] > nested_type[1] > extension[0] > name:
+desc_test_complex.proto:37:40
+desc_test_complex.proto:37:47
+
+
+ > message_type[1] > nested_type[1] > extension[0] > number:
+desc_test_complex.proto:37:50
+desc_test_complex.proto:37:55
+
+
+ > message_type[1] > nested_type[1] > nested_type[0]:
+desc_test_complex.proto:39:17
+desc_test_complex.proto:59:18
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > name:
+desc_test_complex.proto:39:25
+desc_test_complex.proto:39:38
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0]:
+desc_test_complex.proto:40:25
+desc_test_complex.proto:48:26
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > name:
+desc_test_complex.proto:40:30
+desc_test_complex.proto:40:33
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0]:
+desc_test_complex.proto:41:33
+desc_test_complex.proto:41:40
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0] > name:
+desc_test_complex.proto:41:33
+desc_test_complex.proto:41:35
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[0] > number:
+desc_test_complex.proto:41:38
+desc_test_complex.proto:41:39
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1]:
+desc_test_complex.proto:42:33
+desc_test_complex.proto:42:40
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1] > name:
+desc_test_complex.proto:42:33
+desc_test_complex.proto:42:35
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[1] > number:
+desc_test_complex.proto:42:38
+desc_test_complex.proto:42:39
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2]:
+desc_test_complex.proto:43:33
+desc_test_complex.proto:43:40
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2] > name:
+desc_test_complex.proto:43:33
+desc_test_complex.proto:43:35
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[2] > number:
+desc_test_complex.proto:43:38
+desc_test_complex.proto:43:39
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3]:
+desc_test_complex.proto:44:33
+desc_test_complex.proto:44:40
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3] > name:
+desc_test_complex.proto:44:33
+desc_test_complex.proto:44:35
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[3] > number:
+desc_test_complex.proto:44:38
+desc_test_complex.proto:44:39
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4]:
+desc_test_complex.proto:45:33
+desc_test_complex.proto:45:40
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4] > name:
+desc_test_complex.proto:45:33
+desc_test_complex.proto:45:35
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[4] > number:
+desc_test_complex.proto:45:38
+desc_test_complex.proto:45:39
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5]:
+desc_test_complex.proto:46:33
+desc_test_complex.proto:46:40
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5] > name:
+desc_test_complex.proto:46:33
+desc_test_complex.proto:46:35
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[5] > number:
+desc_test_complex.proto:46:38
+desc_test_complex.proto:46:39
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6]:
+desc_test_complex.proto:47:33
+desc_test_complex.proto:47:40
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6] > name:
+desc_test_complex.proto:47:33
+desc_test_complex.proto:47:35
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > enum_type[0] > value[6] > number:
+desc_test_complex.proto:47:38
+desc_test_complex.proto:47:39
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > options:
+desc_test_complex.proto:49:25
+desc_test_complex.proto:49:50
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > options > (foo.bar.Test.Nested.fooblez):
+desc_test_complex.proto:49:25
+desc_test_complex.proto:49:50
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > extension:
+desc_test_complex.proto:50:25
+desc_test_complex.proto:52:26
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > extension[0]:
+desc_test_complex.proto:51:33
+desc_test_complex.proto:51:64
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > extendee:
+desc_test_complex.proto:50:32
+desc_test_complex.proto:50:36
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > label:
+desc_test_complex.proto:51:33
+desc_test_complex.proto:51:41
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > type:
+desc_test_complex.proto:51:42
+desc_test_complex.proto:51:48
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > name:
+desc_test_complex.proto:51:49
+desc_test_complex.proto:51:57
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > extension[0] > number:
+desc_test_complex.proto:51:60
+desc_test_complex.proto:51:63
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > options:
+desc_test_complex.proto:53:25
+desc_test_complex.proto:53:108
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > options > (foo.bar.rept)[0]:
+desc_test_complex.proto:53:25
+desc_test_complex.proto:53:108
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0]:
+desc_test_complex.proto:54:25
+desc_test_complex.proto:58:26
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > name:
+desc_test_complex.proto:54:33
+desc_test_complex.proto:54:51
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > options:
+desc_test_complex.proto:55:33
+desc_test_complex.proto:55:109
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > options > (foo.bar.rept)[0]:
+desc_test_complex.proto:55:33
+desc_test_complex.proto:55:109
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0]:
+desc_test_complex.proto:57:33
+desc_test_complex.proto:57:56
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > label:
+desc_test_complex.proto:57:33
+desc_test_complex.proto:57:41
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > type_name:
+desc_test_complex.proto:57:42
+desc_test_complex.proto:57:46
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > name:
+desc_test_complex.proto:57:47
+desc_test_complex.proto:57:51
+
+
+ > message_type[1] > nested_type[1] > nested_type[0] > nested_type[0] > field[0] > number:
+desc_test_complex.proto:57:54
+desc_test_complex.proto:57:55
+
+
+ > enum_type[0]:
+desc_test_complex.proto:63:1
+desc_test_complex.proto:72:2
+
+
+ > enum_type[0] > name:
+desc_test_complex.proto:63:6
+desc_test_complex.proto:63:26
+
+
+ > enum_type[0] > value[0]:
+desc_test_complex.proto:64:9
+desc_test_complex.proto:64:15
+
+
+ > enum_type[0] > value[0] > name:
+desc_test_complex.proto:64:9
+desc_test_complex.proto:64:10
+
+
+ > enum_type[0] > value[0] > number:
+desc_test_complex.proto:64:13
+desc_test_complex.proto:64:14
+
+
+ > enum_type[0] > value[1]:
+desc_test_complex.proto:65:9
+desc_test_complex.proto:65:15
+
+
+ > enum_type[0] > value[1] > name:
+desc_test_complex.proto:65:9
+desc_test_complex.proto:65:10
+
+
+ > enum_type[0] > value[1] > number:
+desc_test_complex.proto:65:13
+desc_test_complex.proto:65:14
+
+
+ > enum_type[0] > value[2]:
+desc_test_complex.proto:66:9
+desc_test_complex.proto:66:15
+
+
+ > enum_type[0] > value[2] > name:
+desc_test_complex.proto:66:9
+desc_test_complex.proto:66:10
+
+
+ > enum_type[0] > value[2] > number:
+desc_test_complex.proto:66:13
+desc_test_complex.proto:66:14
+
+
+ > enum_type[0] > reserved_range:
+desc_test_complex.proto:67:9
+desc_test_complex.proto:67:30
+
+
+ > enum_type[0] > reserved_range[0]:
+desc_test_complex.proto:67:18
+desc_test_complex.proto:67:29
+
+
+ > enum_type[0] > reserved_range[0] > start:
+desc_test_complex.proto:67:18
+desc_test_complex.proto:67:22
+
+
+ > enum_type[0] > reserved_range[0] > end:
+desc_test_complex.proto:67:26
+desc_test_complex.proto:67:29
+
+
+ > enum_type[0] > reserved_range:
+desc_test_complex.proto:68:9
+desc_test_complex.proto:68:26
+
+
+ > enum_type[0] > reserved_range[1]:
+desc_test_complex.proto:68:18
+desc_test_complex.proto:68:25
+
+
+ > enum_type[0] > reserved_range[1] > start:
+desc_test_complex.proto:68:18
+desc_test_complex.proto:68:20
+
+
+ > enum_type[0] > reserved_range[1] > end:
+desc_test_complex.proto:68:24
+desc_test_complex.proto:68:25
+
+
+ > enum_type[0] > reserved_range:
+desc_test_complex.proto:69:9
+desc_test_complex.proto:69:40
+
+
+ > enum_type[0] > reserved_range[2]:
+desc_test_complex.proto:69:18
+desc_test_complex.proto:69:25
+
+
+ > enum_type[0] > reserved_range[2] > start:
+desc_test_complex.proto:69:18
+desc_test_complex.proto:69:19
+
+
+ > enum_type[0] > reserved_range[2] > end:
+desc_test_complex.proto:69:23
+desc_test_complex.proto:69:25
+
+
+ > enum_type[0] > reserved_range[3]:
+desc_test_complex.proto:69:27
+desc_test_complex.proto:69:35
+
+
+ > enum_type[0] > reserved_range[3] > start:
+desc_test_complex.proto:69:27
+desc_test_complex.proto:69:29
+
+
+ > enum_type[0] > reserved_range[3] > end:
+desc_test_complex.proto:69:33
+desc_test_complex.proto:69:35
+
+
+ > enum_type[0] > reserved_range[4]:
+desc_test_complex.proto:69:37
+desc_test_complex.proto:69:39
+
+
+ > enum_type[0] > reserved_range[4] > start:
+desc_test_complex.proto:69:37
+desc_test_complex.proto:69:39
+
+
+ > enum_type[0] > reserved_range[4] > end:
+desc_test_complex.proto:69:37
+desc_test_complex.proto:69:39
+
+
+ > enum_type[0] > reserved_range:
+desc_test_complex.proto:70:9
+desc_test_complex.proto:70:27
+
+
+ > enum_type[0] > reserved_range[5]:
+desc_test_complex.proto:70:18
+desc_test_complex.proto:70:26
+
+
+ > enum_type[0] > reserved_range[5] > start:
+desc_test_complex.proto:70:18
+desc_test_complex.proto:70:20
+
+
+ > enum_type[0] > reserved_range[5] > end:
+desc_test_complex.proto:70:24
+desc_test_complex.proto:70:26
+
+
+ > enum_type[0] > reserved_name:
+desc_test_complex.proto:71:9
+desc_test_complex.proto:71:32
+
+
+ > enum_type[0] > reserved_name[0]:
+desc_test_complex.proto:71:18
+desc_test_complex.proto:71:21
+
+
+ > enum_type[0] > reserved_name[1]:
+desc_test_complex.proto:71:23
+desc_test_complex.proto:71:26
+
+
+ > enum_type[0] > reserved_name[2]:
+desc_test_complex.proto:71:28
+desc_test_complex.proto:71:31
+
+
+ > message_type[2]:
+desc_test_complex.proto:74:1
+desc_test_complex.proto:78:2
+
+
+ > message_type[2] > name:
+desc_test_complex.proto:74:9
+desc_test_complex.proto:74:32
+
+
+ > message_type[2] > reserved_range:
+desc_test_complex.proto:75:9
+desc_test_complex.proto:75:40
+
+
+ > message_type[2] > reserved_range[0]:
+desc_test_complex.proto:75:18
+desc_test_complex.proto:75:25
+
+
+ > message_type[2] > reserved_range[0] > start:
+desc_test_complex.proto:75:18
+desc_test_complex.proto:75:19
+
+
+ > message_type[2] > reserved_range[0] > end:
+desc_test_complex.proto:75:23
+desc_test_complex.proto:75:25
+
+
+ > message_type[2] > reserved_range[1]:
+desc_test_complex.proto:75:27
+desc_test_complex.proto:75:35
+
+
+ > message_type[2] > reserved_range[1] > start:
+desc_test_complex.proto:75:27
+desc_test_complex.proto:75:29
+
+
+ > message_type[2] > reserved_range[1] > end:
+desc_test_complex.proto:75:33
+desc_test_complex.proto:75:35
+
+
+ > message_type[2] > reserved_range[2]:
+desc_test_complex.proto:75:37
+desc_test_complex.proto:75:39
+
+
+ > message_type[2] > reserved_range[2] > start:
+desc_test_complex.proto:75:37
+desc_test_complex.proto:75:39
+
+
+ > message_type[2] > reserved_range[2] > end:
+desc_test_complex.proto:75:37
+desc_test_complex.proto:75:39
+
+
+ > message_type[2] > reserved_range:
+desc_test_complex.proto:76:9
+desc_test_complex.proto:76:30
+
+
+ > message_type[2] > reserved_range[3]:
+desc_test_complex.proto:76:18
+desc_test_complex.proto:76:29
+
+
+ > message_type[2] > reserved_range[3] > start:
+desc_test_complex.proto:76:18
+desc_test_complex.proto:76:22
+
+
+ > message_type[2] > reserved_range[3] > end:
+desc_test_complex.proto:76:26
+desc_test_complex.proto:76:29
+
+
+ > message_type[2] > reserved_name:
+desc_test_complex.proto:77:9
+desc_test_complex.proto:77:32
+
+
+ > message_type[2] > reserved_name[0]:
+desc_test_complex.proto:77:18
+desc_test_complex.proto:77:21
+
+
+ > message_type[2] > reserved_name[1]:
+desc_test_complex.proto:77:23
+desc_test_complex.proto:77:26
+
+
+ > message_type[2] > reserved_name[2]:
+desc_test_complex.proto:77:28
+desc_test_complex.proto:77:31
+
+
+ > message_type[3]:
+desc_test_complex.proto:80:1
+desc_test_complex.proto:82:2
+
+
+ > message_type[3] > name:
+desc_test_complex.proto:80:9
+desc_test_complex.proto:80:23
+
+
+ > message_type[3] > field[0]:
+desc_test_complex.proto:81:9
+desc_test_complex.proto:81:38
+
+
+ > message_type[3] > field[0] > type_name:
+desc_test_complex.proto:81:9
+desc_test_complex.proto:81:28
+
+
+ > message_type[3] > field[0] > name:
+desc_test_complex.proto:81:29
+desc_test_complex.proto:81:33
+
+
+ > message_type[3] > field[0] > number:
+desc_test_complex.proto:81:36
+desc_test_complex.proto:81:37
+
+
+ > extension:
+desc_test_complex.proto:84:1
+desc_test_complex.proto:89:2
+
+
+ > extension[1]:
+desc_test_complex.proto:85:9
+desc_test_complex.proto:85:36
+
+
+ > extension[1] > extendee:
+desc_test_complex.proto:84:8
+desc_test_complex.proto:84:38
+
+
+ > extension[1] > label:
+desc_test_complex.proto:85:9
+desc_test_complex.proto:85:17
+
+
+ > extension[1] > type_name:
+desc_test_complex.proto:85:18
+desc_test_complex.proto:85:22
+
+
+ > extension[1] > name:
+desc_test_complex.proto:85:23
+desc_test_complex.proto:85:27
+
+
+ > extension[1] > number:
+desc_test_complex.proto:85:30
+desc_test_complex.proto:85:35
+
+
+ > extension[2]:
+desc_test_complex.proto:86:9
+desc_test_complex.proto:86:60
+
+
+ > extension[2] > extendee:
+desc_test_complex.proto:84:8
+desc_test_complex.proto:84:38
+
+
+ > extension[2] > label:
+desc_test_complex.proto:86:9
+desc_test_complex.proto:86:17
+
+
+ > extension[2] > type_name:
+desc_test_complex.proto:86:18
+desc_test_complex.proto:86:47
+
+
+ > extension[2] > name:
+desc_test_complex.proto:86:48
+desc_test_complex.proto:86:51
+
+
+ > extension[2] > number:
+desc_test_complex.proto:86:54
+desc_test_complex.proto:86:59
+
+
+ > extension[3]:
+desc_test_complex.proto:87:9
+desc_test_complex.proto:87:36
+
+
+ > extension[3] > extendee:
+desc_test_complex.proto:84:8
+desc_test_complex.proto:84:38
+
+
+ > extension[3] > label:
+desc_test_complex.proto:87:9
+desc_test_complex.proto:87:17
+
+
+ > extension[3] > type_name:
+desc_test_complex.proto:87:18
+desc_test_complex.proto:87:25
+
+
+ > extension[3] > name:
+desc_test_complex.proto:87:26
+desc_test_complex.proto:87:27
+
+
+ > extension[3] > number:
+desc_test_complex.proto:87:30
+desc_test_complex.proto:87:35
+
+
+ > extension[4]:
+desc_test_complex.proto:88:9
+desc_test_complex.proto:88:50
+
+
+ > extension[4] > extendee:
+desc_test_complex.proto:84:8
+desc_test_complex.proto:84:38
+
+
+ > extension[4] > label:
+desc_test_complex.proto:88:9
+desc_test_complex.proto:88:17
+
+
+ > extension[4] > type_name:
+desc_test_complex.proto:88:18
+desc_test_complex.proto:88:32
+
+
+ > extension[4] > name:
+desc_test_complex.proto:88:33
+desc_test_complex.proto:88:41
+
+
+ > extension[4] > number:
+desc_test_complex.proto:88:44
+desc_test_complex.proto:88:49
+
+
+ > message_type[4]:
+desc_test_complex.proto:91:1
+desc_test_complex.proto:111:2
+
+
+ > message_type[4] > name:
+desc_test_complex.proto:91:9
+desc_test_complex.proto:91:16
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:92:5
+desc_test_complex.proto:92:130
+
+
+ > message_type[4] > options > (foo.bar.rept)[0]:
+desc_test_complex.proto:92:5
+desc_test_complex.proto:92:130
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:93:5
+desc_test_complex.proto:93:115
+
+
+ > message_type[4] > options > (foo.bar.rept)[1]:
+desc_test_complex.proto:93:5
+desc_test_complex.proto:93:115
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:94:5
+desc_test_complex.proto:94:36
+
+
+ > message_type[4] > options > (foo.bar.rept)[2]:
+desc_test_complex.proto:94:5
+desc_test_complex.proto:94:36
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:95:5
+desc_test_complex.proto:95:23
+
+
+ > message_type[4] > options > (foo.bar.eee):
+desc_test_complex.proto:95:5
+desc_test_complex.proto:95:23
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:96:9
+desc_test_complex.proto:96:34
+
+
+ > message_type[4] > options > (foo.bar.a):
+desc_test_complex.proto:96:9
+desc_test_complex.proto:96:34
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:97:9
+desc_test_complex.proto:97:86
+
+
+ > message_type[4] > options > (foo.bar.a) > test:
+desc_test_complex.proto:97:9
+desc_test_complex.proto:97:86
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:98:9
+desc_test_complex.proto:98:37
+
+
+ > message_type[4] > options > (foo.bar.a) > test > foo:
+desc_test_complex.proto:98:9
+desc_test_complex.proto:98:37
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:99:9
+desc_test_complex.proto:99:41
+
+
+ > message_type[4] > options > (foo.bar.a) > test > s > name:
+desc_test_complex.proto:99:9
+desc_test_complex.proto:99:41
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:100:5
+desc_test_complex.proto:100:34
+
+
+ > message_type[4] > options > (foo.bar.a) > test > s > id:
+desc_test_complex.proto:100:5
+desc_test_complex.proto:100:34
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:101:5
+desc_test_complex.proto:101:31
+
+
+ > message_type[4] > options > (foo.bar.a) > test > array[0]:
+desc_test_complex.proto:101:5
+desc_test_complex.proto:101:31
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:102:5
+desc_test_complex.proto:102:31
+
+
+ > message_type[4] > options > (foo.bar.a) > test > array[1]:
+desc_test_complex.proto:102:5
+desc_test_complex.proto:102:31
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:103:5
+desc_test_complex.proto:103:78
+
+
+ > message_type[4] > options > (foo.bar.a) > test > (foo.bar.Test.Nested._NestedNested._garblez):
+desc_test_complex.proto:103:5
+desc_test_complex.proto:103:78
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:105:9
+desc_test_complex.proto:105:37
+
+
+ > message_type[4] > options > (foo.bar.map_vals) > vals[0]:
+desc_test_complex.proto:105:9
+desc_test_complex.proto:105:37
+ Trailing comments:
+ no key, no value
+
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:106:9
+desc_test_complex.proto:106:47
+
+
+ > message_type[4] > options > (foo.bar.map_vals) > vals[1]:
+desc_test_complex.proto:106:9
+desc_test_complex.proto:106:47
+ Trailing comments:
+ no value
+
+
+
+ > message_type[4] > options:
+desc_test_complex.proto:107:9
+desc_test_complex.proto:107:69
+
+
+ > message_type[4] > options > (foo.bar.map_vals) > vals[2]:
+desc_test_complex.proto:107:9
+desc_test_complex.proto:107:69
+
+
+ > message_type[4] > field[0]:
+desc_test_complex.proto:109:5
+desc_test_complex.proto:109:28
+
+
+ > message_type[4] > field[0] > label:
+desc_test_complex.proto:109:5
+desc_test_complex.proto:109:13
+
+
+ > message_type[4] > field[0] > type_name:
+desc_test_complex.proto:109:14
+desc_test_complex.proto:109:18
+
+
+ > message_type[4] > field[0] > name:
+desc_test_complex.proto:109:19
+desc_test_complex.proto:109:23
+
+
+ > message_type[4] > field[0] > number:
+desc_test_complex.proto:109:26
+desc_test_complex.proto:109:27
+
+
+ > message_type[4] > field[1]:
+desc_test_complex.proto:110:5
+desc_test_complex.proto:110:67
+
+
+ > message_type[4] > field[1] > label:
+desc_test_complex.proto:110:5
+desc_test_complex.proto:110:13
+
+
+ > message_type[4] > field[1] > type_name:
+desc_test_complex.proto:110:14
+desc_test_complex.proto:110:43
+
+
+ > message_type[4] > field[1] > name:
+desc_test_complex.proto:110:44
+desc_test_complex.proto:110:47
+
+
+ > message_type[4] > field[1] > number:
+desc_test_complex.proto:110:50
+desc_test_complex.proto:110:51
+
+
+ > message_type[4] > field[1] > options:
+desc_test_complex.proto:110:52
+desc_test_complex.proto:110:66
+
+
+ > message_type[4] > field[1] > default_value:
+desc_test_complex.proto:110:53
+desc_test_complex.proto:110:65
+
+
+ > message_type[5]:
+desc_test_complex.proto:113:1
+desc_test_complex.proto:127:2
+
+
+ > message_type[5] > name:
+desc_test_complex.proto:113:9
+desc_test_complex.proto:113:18
+
+
+ > message_type[5] > field[0]:
+desc_test_complex.proto:114:9
+desc_test_complex.proto:114:41
+
+
+ > message_type[5] > field[0] > label:
+desc_test_complex.proto:114:9
+desc_test_complex.proto:114:17
+
+
+ > message_type[5] > field[0] > type:
+desc_test_complex.proto:114:18
+desc_test_complex.proto:114:22
+
+
+ > message_type[5] > field[0] > name:
+desc_test_complex.proto:114:23
+desc_test_complex.proto:114:36
+
+
+ > message_type[5] > field[0] > number:
+desc_test_complex.proto:114:39
+desc_test_complex.proto:114:40
+
+
+ > message_type[5] > enum_type[0]:
+desc_test_complex.proto:116:9
+desc_test_complex.proto:120:10
+
+
+ > message_type[5] > enum_type[0] > name:
+desc_test_complex.proto:116:14
+desc_test_complex.proto:116:20
+
+
+ > message_type[5] > enum_type[0] > value[0]:
+desc_test_complex.proto:117:17
+desc_test_complex.proto:117:27
+
+
+ > message_type[5] > enum_type[0] > value[0] > name:
+desc_test_complex.proto:117:17
+desc_test_complex.proto:117:22
+
+
+ > message_type[5] > enum_type[0] > value[0] > number:
+desc_test_complex.proto:117:25
+desc_test_complex.proto:117:26
+
+
+ > message_type[5] > enum_type[0] > value[1]:
+desc_test_complex.proto:118:17
+desc_test_complex.proto:118:26
+
+
+ > message_type[5] > enum_type[0] > value[1] > name:
+desc_test_complex.proto:118:17
+desc_test_complex.proto:118:21
+
+
+ > message_type[5] > enum_type[0] > value[1] > number:
+desc_test_complex.proto:118:24
+desc_test_complex.proto:118:25
+
+
+ > message_type[5] > enum_type[0] > value[2]:
+desc_test_complex.proto:119:17
+desc_test_complex.proto:119:27
+
+
+ > message_type[5] > enum_type[0] > value[2] > name:
+desc_test_complex.proto:119:17
+desc_test_complex.proto:119:22
+
+
+ > message_type[5] > enum_type[0] > value[2] > number:
+desc_test_complex.proto:119:25
+desc_test_complex.proto:119:26
+
+
+ > message_type[5] > nested_type[0]:
+desc_test_complex.proto:121:9
+desc_test_complex.proto:124:10
+
+
+ > message_type[5] > nested_type[0] > name:
+desc_test_complex.proto:121:17
+desc_test_complex.proto:121:27
+
+
+ > message_type[5] > nested_type[0] > field[0]:
+desc_test_complex.proto:122:17
+desc_test_complex.proto:122:44
+
+
+ > message_type[5] > nested_type[0] > field[0] > label:
+desc_test_complex.proto:122:17
+desc_test_complex.proto:122:25
+
+
+ > message_type[5] > nested_type[0] > field[0] > type_name:
+desc_test_complex.proto:122:26
+desc_test_complex.proto:122:32
+
+
+ > message_type[5] > nested_type[0] > field[0] > name:
+desc_test_complex.proto:122:33
+desc_test_complex.proto:122:39
+
+
+ > message_type[5] > nested_type[0] > field[0] > number:
+desc_test_complex.proto:122:42
+desc_test_complex.proto:122:43
+
+
+ > message_type[5] > nested_type[0] > field[1]:
+desc_test_complex.proto:123:17
+desc_test_complex.proto:123:44
+
+
+ > message_type[5] > nested_type[0] > field[1] > label:
+desc_test_complex.proto:123:17
+desc_test_complex.proto:123:25
+
+
+ > message_type[5] > nested_type[0] > field[1] > type:
+desc_test_complex.proto:123:26
+desc_test_complex.proto:123:32
+
+
+ > message_type[5] > nested_type[0] > field[1] > name:
+desc_test_complex.proto:123:33
+desc_test_complex.proto:123:39
+
+
+ > message_type[5] > nested_type[0] > field[1] > number:
+desc_test_complex.proto:123:42
+desc_test_complex.proto:123:43
+
+
+ > message_type[5] > field[1]:
+desc_test_complex.proto:126:9
+desc_test_complex.proto:126:44
+
+
+ > message_type[5] > field[1] > label:
+desc_test_complex.proto:126:9
+desc_test_complex.proto:126:17
+
+
+ > message_type[5] > field[1] > type_name:
+desc_test_complex.proto:126:18
+desc_test_complex.proto:126:28
+
+
+ > message_type[5] > field[1] > name:
+desc_test_complex.proto:126:29
+desc_test_complex.proto:126:39
+
+
+ > message_type[5] > field[1] > number:
+desc_test_complex.proto:126:42
+desc_test_complex.proto:126:43
+
+
+ > extension:
+desc_test_complex.proto:129:1
+desc_test_complex.proto:131:2
+
+
+ > extension[5]:
+desc_test_complex.proto:130:9
+desc_test_complex.proto:130:46
+
+
+ > extension[5] > extendee:
+desc_test_complex.proto:129:8
+desc_test_complex.proto:129:37
+
+
+ > extension[5] > label:
+desc_test_complex.proto:130:9
+desc_test_complex.proto:130:17
+
+
+ > extension[5] > type_name:
+desc_test_complex.proto:130:18
+desc_test_complex.proto:130:27
+
+
+ > extension[5] > name:
+desc_test_complex.proto:130:28
+desc_test_complex.proto:130:37
+
+
+ > extension[5] > number:
+desc_test_complex.proto:130:40
+desc_test_complex.proto:130:45
+
+
+ > service[0]:
+desc_test_complex.proto:133:1
+desc_test_complex.proto:152:2
+
+
+ > service[0] > name:
+desc_test_complex.proto:133:9
+desc_test_complex.proto:133:24
+
+
+ > service[0] > method[0]:
+desc_test_complex.proto:134:9
+desc_test_complex.proto:142:10
+
+
+ > service[0] > method[0] > name:
+desc_test_complex.proto:134:13
+desc_test_complex.proto:134:21
+
+
+ > service[0] > method[0] > input_type:
+desc_test_complex.proto:134:22
+desc_test_complex.proto:134:26
+
+
+ > service[0] > method[0] > output_type:
+desc_test_complex.proto:134:37
+desc_test_complex.proto:134:41
+
+
+ > service[0] > method[0] > options:
+desc_test_complex.proto:135:17
+desc_test_complex.proto:141:19
+
+
+ > service[0] > method[0] > options > (foo.bar.validator):
+desc_test_complex.proto:135:17
+desc_test_complex.proto:141:19
+
+
+ > service[0] > method[1]:
+desc_test_complex.proto:143:9
+desc_test_complex.proto:151:10
+
+
+ > service[0] > method[1] > name:
+desc_test_complex.proto:143:13
+desc_test_complex.proto:143:16
+
+
+ > service[0] > method[1] > input_type:
+desc_test_complex.proto:143:17
+desc_test_complex.proto:143:21
+
+
+ > service[0] > method[1] > output_type:
+desc_test_complex.proto:143:32
+desc_test_complex.proto:143:36
+
+
+ > service[0] > method[1] > options:
+desc_test_complex.proto:144:17
+desc_test_complex.proto:150:19
+
+
+ > service[0] > method[1] > options > (foo.bar.validator):
+desc_test_complex.proto:144:17
+desc_test_complex.proto:150:19
+
+
+ > message_type[6]:
+desc_test_complex.proto:154:1
+desc_test_complex.proto:180:2
+
+
+ > message_type[6] > name:
+desc_test_complex.proto:154:9
+desc_test_complex.proto:154:13
+
+
+ > message_type[6] > nested_type[0]:
+desc_test_complex.proto:155:3
+desc_test_complex.proto:160:4
+
+
+ > message_type[6] > nested_type[0] > name:
+desc_test_complex.proto:155:11
+desc_test_complex.proto:155:21
+
+
+ > message_type[6] > nested_type[0] > field[0]:
+desc_test_complex.proto:156:5
+desc_test_complex.proto:156:33
+
+
+ > message_type[6] > nested_type[0] > field[0] > label:
+desc_test_complex.proto:156:5
+desc_test_complex.proto:156:13
+
+
+ > message_type[6] > nested_type[0] > field[0] > type:
+desc_test_complex.proto:156:14
+desc_test_complex.proto:156:20
+
+
+ > message_type[6] > nested_type[0] > field[0] > name:
+desc_test_complex.proto:156:21
+desc_test_complex.proto:156:28
+
+
+ > message_type[6] > nested_type[0] > field[0] > number:
+desc_test_complex.proto:156:31
+desc_test_complex.proto:156:32
+
+
+ > message_type[6] > nested_type[0] > field[1]:
+desc_test_complex.proto:157:5
+desc_test_complex.proto:157:35
+
+
+ > message_type[6] > nested_type[0] > field[1] > label:
+desc_test_complex.proto:157:5
+desc_test_complex.proto:157:13
+
+
+ > message_type[6] > nested_type[0] > field[1] > type:
+desc_test_complex.proto:157:14
+desc_test_complex.proto:157:18
+
+
+ > message_type[6] > nested_type[0] > field[1] > name:
+desc_test_complex.proto:157:19
+desc_test_complex.proto:157:30
+
+
+ > message_type[6] > nested_type[0] > field[1] > number:
+desc_test_complex.proto:157:33
+desc_test_complex.proto:157:34
+
+
+ > message_type[6] > nested_type[0] > field[2]:
+desc_test_complex.proto:158:5
+desc_test_complex.proto:158:32
+
+
+ > message_type[6] > nested_type[0] > field[2] > label:
+desc_test_complex.proto:158:5
+desc_test_complex.proto:158:13
+
+
+ > message_type[6] > nested_type[0] > field[2] > type:
+desc_test_complex.proto:158:14
+desc_test_complex.proto:158:19
+
+
+ > message_type[6] > nested_type[0] > field[2] > name:
+desc_test_complex.proto:158:20
+desc_test_complex.proto:158:27
+
+
+ > message_type[6] > nested_type[0] > field[2] > number:
+desc_test_complex.proto:158:30
+desc_test_complex.proto:158:31
+
+
+ > message_type[6] > nested_type[0] > field[3]:
+desc_test_complex.proto:159:5
+desc_test_complex.proto:159:32
+
+
+ > message_type[6] > nested_type[0] > field[3] > label:
+desc_test_complex.proto:159:5
+desc_test_complex.proto:159:13
+
+
+ > message_type[6] > nested_type[0] > field[3] > type:
+desc_test_complex.proto:159:14
+desc_test_complex.proto:159:19
+
+
+ > message_type[6] > nested_type[0] > field[3] > name:
+desc_test_complex.proto:159:20
+desc_test_complex.proto:159:27
+
+
+ > message_type[6] > nested_type[0] > field[3] > number:
+desc_test_complex.proto:159:30
+desc_test_complex.proto:159:31
+
+
+ > message_type[6] > nested_type[1]:
+desc_test_complex.proto:161:3
+desc_test_complex.proto:164:4
+
+
+ > message_type[6] > nested_type[1] > name:
+desc_test_complex.proto:161:11
+desc_test_complex.proto:161:18
+
+
+ > message_type[6] > nested_type[1] > field[0]:
+desc_test_complex.proto:162:5
+desc_test_complex.proto:162:32
+
+
+ > message_type[6] > nested_type[1] > field[0] > label:
+desc_test_complex.proto:162:5
+desc_test_complex.proto:162:13
+
+
+ > message_type[6] > nested_type[1] > field[0] > type:
+desc_test_complex.proto:162:14
+desc_test_complex.proto:162:19
+
+
+ > message_type[6] > nested_type[1] > field[0] > name:
+desc_test_complex.proto:162:20
+desc_test_complex.proto:162:27
+
+
+ > message_type[6] > nested_type[1] > field[0] > number:
+desc_test_complex.proto:162:30
+desc_test_complex.proto:162:31
+
+
+ > message_type[6] > nested_type[1] > field[1]:
+desc_test_complex.proto:163:5
+desc_test_complex.proto:163:33
+
+
+ > message_type[6] > nested_type[1] > field[1] > label:
+desc_test_complex.proto:163:5
+desc_test_complex.proto:163:13
+
+
+ > message_type[6] > nested_type[1] > field[1] > type:
+desc_test_complex.proto:163:14
+desc_test_complex.proto:163:20
+
+
+ > message_type[6] > nested_type[1] > field[1] > name:
+desc_test_complex.proto:163:21
+desc_test_complex.proto:163:28
+
+
+ > message_type[6] > nested_type[1] > field[1] > number:
+desc_test_complex.proto:163:31
+desc_test_complex.proto:163:32
+
+
+ > message_type[6] > nested_type[2]:
+desc_test_complex.proto:165:3
+desc_test_complex.proto:170:4
+
+
+ > message_type[6] > nested_type[2] > name:
+desc_test_complex.proto:165:11
+desc_test_complex.proto:165:23
+
+
+ > message_type[6] > nested_type[2] > field[0]:
+desc_test_complex.proto:166:5
+desc_test_complex.proto:166:35
+
+
+ > message_type[6] > nested_type[2] > field[0] > label:
+desc_test_complex.proto:166:5
+desc_test_complex.proto:166:13
+
+
+ > message_type[6] > nested_type[2] > field[0] > type:
+desc_test_complex.proto:166:14
+desc_test_complex.proto:166:18
+
+
+ > message_type[6] > nested_type[2] > field[0] > name:
+desc_test_complex.proto:166:19
+desc_test_complex.proto:166:30
+
+
+ > message_type[6] > nested_type[2] > field[0] > number:
+desc_test_complex.proto:166:33
+desc_test_complex.proto:166:34
+
+
+ > message_type[6] > nested_type[2] > field[1]:
+desc_test_complex.proto:167:5
+desc_test_complex.proto:167:34
+
+
+ > message_type[6] > nested_type[2] > field[1] > label:
+desc_test_complex.proto:167:5
+desc_test_complex.proto:167:13
+
+
+ > message_type[6] > nested_type[2] > field[1] > type:
+desc_test_complex.proto:167:14
+desc_test_complex.proto:167:19
+
+
+ > message_type[6] > nested_type[2] > field[1] > name:
+desc_test_complex.proto:167:20
+desc_test_complex.proto:167:29
+
+
+ > message_type[6] > nested_type[2] > field[1] > number:
+desc_test_complex.proto:167:32
+desc_test_complex.proto:167:33
+
+
+ > message_type[6] > nested_type[2] > field[2]:
+desc_test_complex.proto:168:5
+desc_test_complex.proto:168:34
+
+
+ > message_type[6] > nested_type[2] > field[2] > label:
+desc_test_complex.proto:168:5
+desc_test_complex.proto:168:13
+
+
+ > message_type[6] > nested_type[2] > field[2] > type:
+desc_test_complex.proto:168:14
+desc_test_complex.proto:168:19
+
+
+ > message_type[6] > nested_type[2] > field[2] > name:
+desc_test_complex.proto:168:20
+desc_test_complex.proto:168:29
+
+
+ > message_type[6] > nested_type[2] > field[2] > number:
+desc_test_complex.proto:168:32
+desc_test_complex.proto:168:33
+
+
+ > message_type[6] > nested_type[2] > field[3]:
+desc_test_complex.proto:169:5
+desc_test_complex.proto:169:29
+
+
+ > message_type[6] > nested_type[2] > field[3] > label:
+desc_test_complex.proto:169:5
+desc_test_complex.proto:169:13
+
+
+ > message_type[6] > nested_type[2] > field[3] > type_name:
+desc_test_complex.proto:169:14
+desc_test_complex.proto:169:18
+
+
+ > message_type[6] > nested_type[2] > field[3] > name:
+desc_test_complex.proto:169:19
+desc_test_complex.proto:169:24
+
+
+ > message_type[6] > nested_type[2] > field[3] > number:
+desc_test_complex.proto:169:27
+desc_test_complex.proto:169:28
+
+
+ > message_type[6] > oneof_decl[0]:
+desc_test_complex.proto:171:3
+desc_test_complex.proto:179:4
+
+
+ > message_type[6] > oneof_decl[0] > name:
+desc_test_complex.proto:171:9
+desc_test_complex.proto:171:13
+
+
+ > message_type[6] > field[0]:
+desc_test_complex.proto:172:5
+desc_test_complex.proto:172:27
+
+
+ > message_type[6] > field[0] > type_name:
+desc_test_complex.proto:172:5
+desc_test_complex.proto:172:15
+
+
+ > message_type[6] > field[0] > name:
+desc_test_complex.proto:172:16
+desc_test_complex.proto:172:22
+
+
+ > message_type[6] > field[0] > number:
+desc_test_complex.proto:172:25
+desc_test_complex.proto:172:26
+
+
+ > message_type[6] > field[1]:
+desc_test_complex.proto:173:5
+desc_test_complex.proto:173:31
+
+
+ > message_type[6] > field[1] > type_name:
+desc_test_complex.proto:173:5
+desc_test_complex.proto:173:17
+
+
+ > message_type[6] > field[1] > name:
+desc_test_complex.proto:173:18
+desc_test_complex.proto:173:26
+
+
+ > message_type[6] > field[1] > number:
+desc_test_complex.proto:173:29
+desc_test_complex.proto:173:30
+
+
+ > message_type[6] > field[2]:
+desc_test_complex.proto:174:5
+desc_test_complex.proto:174:21
+
+
+ > message_type[6] > field[2] > type_name:
+desc_test_complex.proto:174:5
+desc_test_complex.proto:174:12
+
+
+ > message_type[6] > field[2] > name:
+desc_test_complex.proto:174:13
+desc_test_complex.proto:174:16
+
+
+ > message_type[6] > field[2] > number:
+desc_test_complex.proto:174:19
+desc_test_complex.proto:174:20
+
+
+ > message_type[6] > field[3]:
+desc_test_complex.proto:175:9
+desc_test_complex.proto:178:10
+
+
+ > message_type[6] > field[3] > type:
+desc_test_complex.proto:175:9
+desc_test_complex.proto:175:14
+
+
+ > message_type[6] > field[3] > name:
+desc_test_complex.proto:175:15
+desc_test_complex.proto:175:24
+
+
+ > message_type[6] > field[3] > number:
+desc_test_complex.proto:175:27
+desc_test_complex.proto:175:28
+
+
+ > message_type[6] > nested_type[3]:
+desc_test_complex.proto:175:9
+desc_test_complex.proto:178:10
+
+
+ > message_type[6] > nested_type[3] > name:
+desc_test_complex.proto:175:15
+desc_test_complex.proto:175:24
+
+
+ > message_type[6] > field[3] > type_name:
+desc_test_complex.proto:175:15
+desc_test_complex.proto:175:24
+
+
+ > message_type[6] > nested_type[3] > field[0]:
+desc_test_complex.proto:176:17
+desc_test_complex.proto:176:45
+
+
+ > message_type[6] > nested_type[3] > field[0] > label:
+desc_test_complex.proto:176:17
+desc_test_complex.proto:176:25
+
+
+ > message_type[6] > nested_type[3] > field[0] > type:
+desc_test_complex.proto:176:26
+desc_test_complex.proto:176:32
+
+
+ > message_type[6] > nested_type[3] > field[0] > name:
+desc_test_complex.proto:176:33
+desc_test_complex.proto:176:40
+
+
+ > message_type[6] > nested_type[3] > field[0] > number:
+desc_test_complex.proto:176:43
+desc_test_complex.proto:176:44
+
+
+ > message_type[6] > nested_type[3] > field[1]:
+desc_test_complex.proto:177:17
+desc_test_complex.proto:177:45
+
+
+ > message_type[6] > nested_type[3] > field[1] > label:
+desc_test_complex.proto:177:17
+desc_test_complex.proto:177:25
+
+
+ > message_type[6] > nested_type[3] > field[1] > type:
+desc_test_complex.proto:177:26
+desc_test_complex.proto:177:32
+
+
+ > message_type[6] > nested_type[3] > field[1] > name:
+desc_test_complex.proto:177:33
+desc_test_complex.proto:177:40
+
+
+ > message_type[6] > nested_type[3] > field[1] > number:
+desc_test_complex.proto:177:43
+desc_test_complex.proto:177:44
+
+
+ > extension:
+desc_test_complex.proto:182:1
+desc_test_complex.proto:184:2
+
+
+ > extension[6]:
+desc_test_complex.proto:183:3
+desc_test_complex.proto:183:30
+
+
+ > extension[6] > extendee:
+desc_test_complex.proto:182:8
+desc_test_complex.proto:182:36
+
+
+ > extension[6] > label:
+desc_test_complex.proto:183:3
+desc_test_complex.proto:183:11
+
+
+ > extension[6] > type_name:
+desc_test_complex.proto:183:12
+desc_test_complex.proto:183:16
+
+
+ > extension[6] > name:
+desc_test_complex.proto:183:17
+desc_test_complex.proto:183:22
+
+
+ > extension[6] > number:
+desc_test_complex.proto:183:25
+desc_test_complex.proto:183:29
+
+
+ > message_type[7]:
+desc_test_complex.proto:186:1
+desc_test_complex.proto:192:2
+
+
+ > message_type[7] > name:
+desc_test_complex.proto:186:9
+desc_test_complex.proto:186:24
+
+
+ > message_type[7] > field[0]:
+desc_test_complex.proto:187:5
+desc_test_complex.proto:191:11
+
+
+ > message_type[7] > field[0] > label:
+desc_test_complex.proto:187:5
+desc_test_complex.proto:187:13
+
+
+ > message_type[7] > field[0] > type:
+desc_test_complex.proto:187:14
+desc_test_complex.proto:187:20
+
+
+ > message_type[7] > field[0] > name:
+desc_test_complex.proto:187:21
+desc_test_complex.proto:187:29
+
+
+ > message_type[7] > field[0] > number:
+desc_test_complex.proto:187:32
+desc_test_complex.proto:187:33
+
+
+ > message_type[7] > field[0] > options:
+desc_test_complex.proto:188:7
+desc_test_complex.proto:191:10
+
+
+ > message_type[7] > field[0] > options > (foo.bar.rules) > repeated:
+desc_test_complex.proto:188:8
+desc_test_complex.proto:191:9
+
+
+ > message_type[8]:
+desc_test_complex.proto:196:1
+desc_test_complex.proto:232:2
+ Leading detached comment [0]:
+ tests cases where field names collide with keywords
+
+
+
+ > message_type[8] > name:
+desc_test_complex.proto:196:9
+desc_test_complex.proto:196:26
+
+
+ > message_type[8] > field[0]:
+desc_test_complex.proto:197:9
+desc_test_complex.proto:197:34
+
+
+ > message_type[8] > field[0] > label:
+desc_test_complex.proto:197:9
+desc_test_complex.proto:197:17
+
+
+ > message_type[8] > field[0] > type:
+desc_test_complex.proto:197:18
+desc_test_complex.proto:197:22
+
+
+ > message_type[8] > field[0] > name:
+desc_test_complex.proto:197:23
+desc_test_complex.proto:197:29
+
+
+ > message_type[8] > field[0] > number:
+desc_test_complex.proto:197:32
+desc_test_complex.proto:197:33
+
+
+ > message_type[8] > field[1]:
+desc_test_complex.proto:198:9
+desc_test_complex.proto:198:34
+
+
+ > message_type[8] > field[1] > label:
+desc_test_complex.proto:198:9
+desc_test_complex.proto:198:17
+
+
+ > message_type[8] > field[1] > type:
+desc_test_complex.proto:198:18
+desc_test_complex.proto:198:22
+
+
+ > message_type[8] > field[1] > name:
+desc_test_complex.proto:198:23
+desc_test_complex.proto:198:29
+
+
+ > message_type[8] > field[1] > number:
+desc_test_complex.proto:198:32
+desc_test_complex.proto:198:33
+
+
+ > message_type[8] > field[2]:
+desc_test_complex.proto:199:9
+desc_test_complex.proto:199:34
+
+
+ > message_type[8] > field[2] > label:
+desc_test_complex.proto:199:9
+desc_test_complex.proto:199:17
+
+
+ > message_type[8] > field[2] > type:
+desc_test_complex.proto:199:18
+desc_test_complex.proto:199:22
+
+
+ > message_type[8] > field[2] > name:
+desc_test_complex.proto:199:23
+desc_test_complex.proto:199:29
+
+
+ > message_type[8] > field[2] > number:
+desc_test_complex.proto:199:32
+desc_test_complex.proto:199:33
+
+
+ > message_type[8] > field[3]:
+desc_test_complex.proto:200:9
+desc_test_complex.proto:200:32
+
+
+ > message_type[8] > field[3] > label:
+desc_test_complex.proto:200:9
+desc_test_complex.proto:200:17
+
+
+ > message_type[8] > field[3] > type:
+desc_test_complex.proto:200:18
+desc_test_complex.proto:200:22
+
+
+ > message_type[8] > field[3] > name:
+desc_test_complex.proto:200:23
+desc_test_complex.proto:200:27
+
+
+ > message_type[8] > field[3] > number:
+desc_test_complex.proto:200:30
+desc_test_complex.proto:200:31
+
+
+ > message_type[8] > field[4]:
+desc_test_complex.proto:201:9
+desc_test_complex.proto:201:35
+
+
+ > message_type[8] > field[4] > label:
+desc_test_complex.proto:201:9
+desc_test_complex.proto:201:17
+
+
+ > message_type[8] > field[4] > type:
+desc_test_complex.proto:201:18
+desc_test_complex.proto:201:22
+
+
+ > message_type[8] > field[4] > name:
+desc_test_complex.proto:201:23
+desc_test_complex.proto:201:30
+
+
+ > message_type[8] > field[4] > number:
+desc_test_complex.proto:201:33
+desc_test_complex.proto:201:34
+
+
+ > message_type[8] > field[5]:
+desc_test_complex.proto:202:9
+desc_test_complex.proto:202:36
+
+
+ > message_type[8] > field[5] > label:
+desc_test_complex.proto:202:9
+desc_test_complex.proto:202:17
+
+
+ > message_type[8] > field[5] > type:
+desc_test_complex.proto:202:18
+desc_test_complex.proto:202:24
+
+
+ > message_type[8] > field[5] > name:
+desc_test_complex.proto:202:25
+desc_test_complex.proto:202:31
+
+
+ > message_type[8] > field[5] > number:
+desc_test_complex.proto:202:34
+desc_test_complex.proto:202:35
+
+
+ > message_type[8] > field[6]:
+desc_test_complex.proto:203:9
+desc_test_complex.proto:203:34
+
+
+ > message_type[8] > field[6] > label:
+desc_test_complex.proto:203:9
+desc_test_complex.proto:203:17
+
+
+ > message_type[8] > field[6] > type:
+desc_test_complex.proto:203:18
+desc_test_complex.proto:203:23
+
+
+ > message_type[8] > field[6] > name:
+desc_test_complex.proto:203:24
+desc_test_complex.proto:203:29
+
+
+ > message_type[8] > field[6] > number:
+desc_test_complex.proto:203:32
+desc_test_complex.proto:203:33
+
+
+ > message_type[8] > field[7]:
+desc_test_complex.proto:204:9
+desc_test_complex.proto:204:34
+
+
+ > message_type[8] > field[7] > label:
+desc_test_complex.proto:204:9
+desc_test_complex.proto:204:17
+
+
+ > message_type[8] > field[7] > type:
+desc_test_complex.proto:204:18
+desc_test_complex.proto:204:23
+
+
+ > message_type[8] > field[7] > name:
+desc_test_complex.proto:204:24
+desc_test_complex.proto:204:29
+
+
+ > message_type[8] > field[7] > number:
+desc_test_complex.proto:204:32
+desc_test_complex.proto:204:33
+
+
+ > message_type[8] > field[8]:
+desc_test_complex.proto:205:9
+desc_test_complex.proto:205:34
+
+
+ > message_type[8] > field[8] > label:
+desc_test_complex.proto:205:9
+desc_test_complex.proto:205:17
+
+
+ > message_type[8] > field[8] > type:
+desc_test_complex.proto:205:18
+desc_test_complex.proto:205:23
+
+
+ > message_type[8] > field[8] > name:
+desc_test_complex.proto:205:24
+desc_test_complex.proto:205:29
+
+
+ > message_type[8] > field[8] > number:
+desc_test_complex.proto:205:32
+desc_test_complex.proto:205:33
+
+
+ > message_type[8] > field[9]:
+desc_test_complex.proto:206:9
+desc_test_complex.proto:206:37
+
+
+ > message_type[8] > field[9] > label:
+desc_test_complex.proto:206:9
+desc_test_complex.proto:206:17
+
+
+ > message_type[8] > field[9] > type:
+desc_test_complex.proto:206:18
+desc_test_complex.proto:206:24
+
+
+ > message_type[8] > field[9] > name:
+desc_test_complex.proto:206:25
+desc_test_complex.proto:206:31
+
+
+ > message_type[8] > field[9] > number:
+desc_test_complex.proto:206:34
+desc_test_complex.proto:206:36
+
+
+ > message_type[8] > field[10]:
+desc_test_complex.proto:207:9
+desc_test_complex.proto:207:37
+
+
+ > message_type[8] > field[10] > label:
+desc_test_complex.proto:207:9
+desc_test_complex.proto:207:17
+
+
+ > message_type[8] > field[10] > type:
+desc_test_complex.proto:207:18
+desc_test_complex.proto:207:24
+
+
+ > message_type[8] > field[10] > name:
+desc_test_complex.proto:207:25
+desc_test_complex.proto:207:31
+
+
+ > message_type[8] > field[10] > number:
+desc_test_complex.proto:207:34
+desc_test_complex.proto:207:36
+
+
+ > message_type[8] > field[11]:
+desc_test_complex.proto:208:9
+desc_test_complex.proto:208:37
+
+
+ > message_type[8] > field[11] > label:
+desc_test_complex.proto:208:9
+desc_test_complex.proto:208:17
+
+
+ > message_type[8] > field[11] > type:
+desc_test_complex.proto:208:18
+desc_test_complex.proto:208:24
+
+
+ > message_type[8] > field[11] > name:
+desc_test_complex.proto:208:25
+desc_test_complex.proto:208:31
+
+
+ > message_type[8] > field[11] > number:
+desc_test_complex.proto:208:34
+desc_test_complex.proto:208:36
+
+
+ > message_type[8] > field[12]:
+desc_test_complex.proto:209:9
+desc_test_complex.proto:209:37
+
+
+ > message_type[8] > field[12] > label:
+desc_test_complex.proto:209:9
+desc_test_complex.proto:209:17
+
+
+ > message_type[8] > field[12] > type:
+desc_test_complex.proto:209:18
+desc_test_complex.proto:209:24
+
+
+ > message_type[8] > field[12] > name:
+desc_test_complex.proto:209:25
+desc_test_complex.proto:209:31
+
+
+ > message_type[8] > field[12] > number:
+desc_test_complex.proto:209:34
+desc_test_complex.proto:209:36
+
+
+ > message_type[8] > field[13]:
+desc_test_complex.proto:210:9
+desc_test_complex.proto:210:39
+
+
+ > message_type[8] > field[13] > label:
+desc_test_complex.proto:210:9
+desc_test_complex.proto:210:17
+
+
+ > message_type[8] > field[13] > type:
+desc_test_complex.proto:210:18
+desc_test_complex.proto:210:25
+
+
+ > message_type[8] > field[13] > name:
+desc_test_complex.proto:210:26
+desc_test_complex.proto:210:33
+
+
+ > message_type[8] > field[13] > number:
+desc_test_complex.proto:210:36
+desc_test_complex.proto:210:38
+
+
+ > message_type[8] > field[14]:
+desc_test_complex.proto:211:9
+desc_test_complex.proto:211:39
+
+
+ > message_type[8] > field[14] > label:
+desc_test_complex.proto:211:9
+desc_test_complex.proto:211:17
+
+
+ > message_type[8] > field[14] > type:
+desc_test_complex.proto:211:18
+desc_test_complex.proto:211:25
+
+
+ > message_type[8] > field[14] > name:
+desc_test_complex.proto:211:26
+desc_test_complex.proto:211:33
+
+
+ > message_type[8] > field[14] > number:
+desc_test_complex.proto:211:36
+desc_test_complex.proto:211:38
+
+
+ > message_type[8] > field[15]:
+desc_test_complex.proto:212:9
+desc_test_complex.proto:212:41
+
+
+ > message_type[8] > field[15] > label:
+desc_test_complex.proto:212:9
+desc_test_complex.proto:212:17
+
+
+ > message_type[8] > field[15] > type:
+desc_test_complex.proto:212:18
+desc_test_complex.proto:212:26
+
+
+ > message_type[8] > field[15] > name:
+desc_test_complex.proto:212:27
+desc_test_complex.proto:212:35
+
+
+ > message_type[8] > field[15] > number:
+desc_test_complex.proto:212:38
+desc_test_complex.proto:212:40
+
+
+ > message_type[8] > field[16]:
+desc_test_complex.proto:213:9
+desc_test_complex.proto:213:41
+
+
+ > message_type[8] > field[16] > label:
+desc_test_complex.proto:213:9
+desc_test_complex.proto:213:17
+
+
+ > message_type[8] > field[16] > type:
+desc_test_complex.proto:213:18
+desc_test_complex.proto:213:26
+
+
+ > message_type[8] > field[16] > name:
+desc_test_complex.proto:213:27
+desc_test_complex.proto:213:35
+
+
+ > message_type[8] > field[16] > number:
+desc_test_complex.proto:213:38
+desc_test_complex.proto:213:40
+
+
+ > message_type[8] > field[17]:
+desc_test_complex.proto:214:9
+desc_test_complex.proto:214:33
+
+
+ > message_type[8] > field[17] > label:
+desc_test_complex.proto:214:9
+desc_test_complex.proto:214:17
+
+
+ > message_type[8] > field[17] > type:
+desc_test_complex.proto:214:18
+desc_test_complex.proto:214:22
+
+
+ > message_type[8] > field[17] > name:
+desc_test_complex.proto:214:23
+desc_test_complex.proto:214:27
+
+
+ > message_type[8] > field[17] > number:
+desc_test_complex.proto:214:30
+desc_test_complex.proto:214:32
+
+
+ > message_type[8] > field[18]:
+desc_test_complex.proto:215:9
+desc_test_complex.proto:215:35
+
+
+ > message_type[8] > field[18] > label:
+desc_test_complex.proto:215:9
+desc_test_complex.proto:215:17
+
+
+ > message_type[8] > field[18] > type:
+desc_test_complex.proto:215:18
+desc_test_complex.proto:215:23
+
+
+ > message_type[8] > field[18] > name:
+desc_test_complex.proto:215:24
+desc_test_complex.proto:215:29
+
+
+ > message_type[8] > field[18] > number:
+desc_test_complex.proto:215:32
+desc_test_complex.proto:215:34
+
+
+ > message_type[8] > field[19]:
+desc_test_complex.proto:216:9
+desc_test_complex.proto:216:37
+
+
+ > message_type[8] > field[19] > label:
+desc_test_complex.proto:216:9
+desc_test_complex.proto:216:17
+
+
+ > message_type[8] > field[19] > type:
+desc_test_complex.proto:216:18
+desc_test_complex.proto:216:24
+
+
+ > message_type[8] > field[19] > name:
+desc_test_complex.proto:216:25
+desc_test_complex.proto:216:31
+
+
+ > message_type[8] > field[19] > number:
+desc_test_complex.proto:216:34
+desc_test_complex.proto:216:36
+
+
+ > message_type[8] > field[20]:
+desc_test_complex.proto:217:9
+desc_test_complex.proto:217:37
+
+
+ > message_type[8] > field[20] > label:
+desc_test_complex.proto:217:9
+desc_test_complex.proto:217:17
+
+
+ > message_type[8] > field[20] > type:
+desc_test_complex.proto:217:18
+desc_test_complex.proto:217:22
+
+
+ > message_type[8] > field[20] > name:
+desc_test_complex.proto:217:23
+desc_test_complex.proto:217:31
+
+
+ > message_type[8] > field[20] > number:
+desc_test_complex.proto:217:34
+desc_test_complex.proto:217:36
+
+
+ > message_type[8] > field[21]:
+desc_test_complex.proto:218:9
+desc_test_complex.proto:218:37
+
+
+ > message_type[8] > field[21] > label:
+desc_test_complex.proto:218:9
+desc_test_complex.proto:218:17
+
+
+ > message_type[8] > field[21] > type:
+desc_test_complex.proto:218:18
+desc_test_complex.proto:218:22
+
+
+ > message_type[8] > field[21] > name:
+desc_test_complex.proto:218:23
+desc_test_complex.proto:218:31
+
+
+ > message_type[8] > field[21] > number:
+desc_test_complex.proto:218:34
+desc_test_complex.proto:218:36
+
+
+ > message_type[8] > field[22]:
+desc_test_complex.proto:219:9
+desc_test_complex.proto:219:37
+
+
+ > message_type[8] > field[22] > label:
+desc_test_complex.proto:219:9
+desc_test_complex.proto:219:17
+
+
+ > message_type[8] > field[22] > type:
+desc_test_complex.proto:219:18
+desc_test_complex.proto:219:22
+
+
+ > message_type[8] > field[22] > name:
+desc_test_complex.proto:219:23
+desc_test_complex.proto:219:31
+
+
+ > message_type[8] > field[22] > number:
+desc_test_complex.proto:219:34
+desc_test_complex.proto:219:36
+
+
+ > message_type[8] > field[23]:
+desc_test_complex.proto:220:9
+desc_test_complex.proto:220:36
+
+
+ > message_type[8] > field[23] > label:
+desc_test_complex.proto:220:9
+desc_test_complex.proto:220:17
+
+
+ > message_type[8] > field[23] > type:
+desc_test_complex.proto:220:18
+desc_test_complex.proto:220:22
+
+
+ > message_type[8] > field[23] > name:
+desc_test_complex.proto:220:23
+desc_test_complex.proto:220:30
+
+
+ > message_type[8] > field[23] > number:
+desc_test_complex.proto:220:33
+desc_test_complex.proto:220:35
+
+
+ > message_type[8] > field[24]:
+desc_test_complex.proto:221:9
+desc_test_complex.proto:221:33
+
+
+ > message_type[8] > field[24] > label:
+desc_test_complex.proto:221:9
+desc_test_complex.proto:221:17
+
+
+ > message_type[8] > field[24] > type:
+desc_test_complex.proto:221:18
+desc_test_complex.proto:221:22
+
+
+ > message_type[8] > field[24] > name:
+desc_test_complex.proto:221:23
+desc_test_complex.proto:221:27
+
+
+ > message_type[8] > field[24] > number:
+desc_test_complex.proto:221:30
+desc_test_complex.proto:221:32
+
+
+ > message_type[8] > field[25]:
+desc_test_complex.proto:222:9
+desc_test_complex.proto:222:36
+
+
+ > message_type[8] > field[25] > label:
+desc_test_complex.proto:222:9
+desc_test_complex.proto:222:17
+
+
+ > message_type[8] > field[25] > type:
+desc_test_complex.proto:222:18
+desc_test_complex.proto:222:22
+
+
+ > message_type[8] > field[25] > name:
+desc_test_complex.proto:222:23
+desc_test_complex.proto:222:30
+
+
+ > message_type[8] > field[25] > number:
+desc_test_complex.proto:222:33
+desc_test_complex.proto:222:35
+
+
+ > message_type[8] > field[26]:
+desc_test_complex.proto:223:9
+desc_test_complex.proto:223:32
+
+
+ > message_type[8] > field[26] > label:
+desc_test_complex.proto:223:9
+desc_test_complex.proto:223:17
+
+
+ > message_type[8] > field[26] > type:
+desc_test_complex.proto:223:18
+desc_test_complex.proto:223:22
+
+
+ > message_type[8] > field[26] > name:
+desc_test_complex.proto:223:23
+desc_test_complex.proto:223:26
+
+
+ > message_type[8] > field[26] > number:
+desc_test_complex.proto:223:29
+desc_test_complex.proto:223:31
+
+
+ > message_type[8] > field[27]:
+desc_test_complex.proto:224:9
+desc_test_complex.proto:224:35
+
+
+ > message_type[8] > field[27] > label:
+desc_test_complex.proto:224:9
+desc_test_complex.proto:224:17
+
+
+ > message_type[8] > field[27] > type:
+desc_test_complex.proto:224:18
+desc_test_complex.proto:224:22
+
+
+ > message_type[8] > field[27] > name:
+desc_test_complex.proto:224:23
+desc_test_complex.proto:224:29
+
+
+ > message_type[8] > field[27] > number:
+desc_test_complex.proto:224:32
+desc_test_complex.proto:224:34
+
+
+ > message_type[8] > field[28]:
+desc_test_complex.proto:225:9
+desc_test_complex.proto:225:35
+
+
+ > message_type[8] > field[28] > label:
+desc_test_complex.proto:225:9
+desc_test_complex.proto:225:17
+
+
+ > message_type[8] > field[28] > type:
+desc_test_complex.proto:225:18
+desc_test_complex.proto:225:22
+
+
+ > message_type[8] > field[28] > name:
+desc_test_complex.proto:225:23
+desc_test_complex.proto:225:29
+
+
+ > message_type[8] > field[28] > number:
+desc_test_complex.proto:225:32
+desc_test_complex.proto:225:34
+
+
+ > message_type[8] > field[29]:
+desc_test_complex.proto:226:9
+desc_test_complex.proto:226:39
+
+
+ > message_type[8] > field[29] > label:
+desc_test_complex.proto:226:9
+desc_test_complex.proto:226:17
+
+
+ > message_type[8] > field[29] > type:
+desc_test_complex.proto:226:18
+desc_test_complex.proto:226:22
+
+
+ > message_type[8] > field[29] > name:
+desc_test_complex.proto:226:23
+desc_test_complex.proto:226:33
+
+
+ > message_type[8] > field[29] > number:
+desc_test_complex.proto:226:36
+desc_test_complex.proto:226:38
+
+
+ > message_type[8] > field[30]:
+desc_test_complex.proto:227:9
+desc_test_complex.proto:227:37
+
+
+ > message_type[8] > field[30] > label:
+desc_test_complex.proto:227:9
+desc_test_complex.proto:227:17
+
+
+ > message_type[8] > field[30] > type:
+desc_test_complex.proto:227:18
+desc_test_complex.proto:227:22
+
+
+ > message_type[8] > field[30] > name:
+desc_test_complex.proto:227:23
+desc_test_complex.proto:227:31
+
+
+ > message_type[8] > field[30] > number:
+desc_test_complex.proto:227:34
+desc_test_complex.proto:227:36
+
+
+ > message_type[8] > field[31]:
+desc_test_complex.proto:228:9
+desc_test_complex.proto:228:31
+
+
+ > message_type[8] > field[31] > label:
+desc_test_complex.proto:228:9
+desc_test_complex.proto:228:17
+
+
+ > message_type[8] > field[31] > type:
+desc_test_complex.proto:228:18
+desc_test_complex.proto:228:22
+
+
+ > message_type[8] > field[31] > name:
+desc_test_complex.proto:228:23
+desc_test_complex.proto:228:25
+
+
+ > message_type[8] > field[31] > number:
+desc_test_complex.proto:228:28
+desc_test_complex.proto:228:30
+
+
+ > message_type[8] > field[32]:
+desc_test_complex.proto:229:9
+desc_test_complex.proto:229:34
+
+
+ > message_type[8] > field[32] > label:
+desc_test_complex.proto:229:9
+desc_test_complex.proto:229:17
+
+
+ > message_type[8] > field[32] > type:
+desc_test_complex.proto:229:18
+desc_test_complex.proto:229:23
+
+
+ > message_type[8] > field[32] > name:
+desc_test_complex.proto:229:24
+desc_test_complex.proto:229:28
+
+
+ > message_type[8] > field[32] > number:
+desc_test_complex.proto:229:31
+desc_test_complex.proto:229:33
+
+
+ > message_type[8] > field[33]:
+desc_test_complex.proto:230:9
+desc_test_complex.proto:230:35
+
+
+ > message_type[8] > field[33] > label:
+desc_test_complex.proto:230:9
+desc_test_complex.proto:230:17
+
+
+ > message_type[8] > field[33] > type:
+desc_test_complex.proto:230:18
+desc_test_complex.proto:230:23
+
+
+ > message_type[8] > field[33] > name:
+desc_test_complex.proto:230:24
+desc_test_complex.proto:230:29
+
+
+ > message_type[8] > field[33] > number:
+desc_test_complex.proto:230:32
+desc_test_complex.proto:230:34
+
+
+ > message_type[8] > field[34]:
+desc_test_complex.proto:231:9
+desc_test_complex.proto:231:37
+
+
+ > message_type[8] > field[34] > label:
+desc_test_complex.proto:231:9
+desc_test_complex.proto:231:17
+
+
+ > message_type[8] > field[34] > type:
+desc_test_complex.proto:231:18
+desc_test_complex.proto:231:23
+
+
+ > message_type[8] > field[34] > name:
+desc_test_complex.proto:231:24
+desc_test_complex.proto:231:31
+
+
+ > message_type[8] > field[34] > number:
+desc_test_complex.proto:231:34
+desc_test_complex.proto:231:36
+
+
+ > extension:
+desc_test_complex.proto:234:1
+desc_test_complex.proto:271:2
+
+
+ > extension[7]:
+desc_test_complex.proto:235:9
+desc_test_complex.proto:235:38
+
+
+ > extension[7] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[7] > label:
+desc_test_complex.proto:235:9
+desc_test_complex.proto:235:17
+
+
+ > extension[7] > type:
+desc_test_complex.proto:235:18
+desc_test_complex.proto:235:22
+
+
+ > extension[7] > name:
+desc_test_complex.proto:235:23
+desc_test_complex.proto:235:29
+
+
+ > extension[7] > number:
+desc_test_complex.proto:235:32
+desc_test_complex.proto:235:37
+
+
+ > extension[8]:
+desc_test_complex.proto:236:9
+desc_test_complex.proto:236:38
+
+
+ > extension[8] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[8] > label:
+desc_test_complex.proto:236:9
+desc_test_complex.proto:236:17
+
+
+ > extension[8] > type:
+desc_test_complex.proto:236:18
+desc_test_complex.proto:236:22
+
+
+ > extension[8] > name:
+desc_test_complex.proto:236:23
+desc_test_complex.proto:236:29
+
+
+ > extension[8] > number:
+desc_test_complex.proto:236:32
+desc_test_complex.proto:236:37
+
+
+ > extension[9]:
+desc_test_complex.proto:237:9
+desc_test_complex.proto:237:38
+
+
+ > extension[9] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[9] > label:
+desc_test_complex.proto:237:9
+desc_test_complex.proto:237:17
+
+
+ > extension[9] > type:
+desc_test_complex.proto:237:18
+desc_test_complex.proto:237:22
+
+
+ > extension[9] > name:
+desc_test_complex.proto:237:23
+desc_test_complex.proto:237:29
+
+
+ > extension[9] > number:
+desc_test_complex.proto:237:32
+desc_test_complex.proto:237:37
+
+
+ > extension[10]:
+desc_test_complex.proto:238:9
+desc_test_complex.proto:238:36
+
+
+ > extension[10] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[10] > label:
+desc_test_complex.proto:238:9
+desc_test_complex.proto:238:17
+
+
+ > extension[10] > type:
+desc_test_complex.proto:238:18
+desc_test_complex.proto:238:22
+
+
+ > extension[10] > name:
+desc_test_complex.proto:238:23
+desc_test_complex.proto:238:27
+
+
+ > extension[10] > number:
+desc_test_complex.proto:238:30
+desc_test_complex.proto:238:35
+
+
+ > extension[11]:
+desc_test_complex.proto:239:9
+desc_test_complex.proto:239:39
+
+
+ > extension[11] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[11] > label:
+desc_test_complex.proto:239:9
+desc_test_complex.proto:239:17
+
+
+ > extension[11] > type:
+desc_test_complex.proto:239:18
+desc_test_complex.proto:239:22
+
+
+ > extension[11] > name:
+desc_test_complex.proto:239:23
+desc_test_complex.proto:239:30
+
+
+ > extension[11] > number:
+desc_test_complex.proto:239:33
+desc_test_complex.proto:239:38
+
+
+ > extension[12]:
+desc_test_complex.proto:240:9
+desc_test_complex.proto:240:40
+
+
+ > extension[12] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[12] > label:
+desc_test_complex.proto:240:9
+desc_test_complex.proto:240:17
+
+
+ > extension[12] > type:
+desc_test_complex.proto:240:18
+desc_test_complex.proto:240:24
+
+
+ > extension[12] > name:
+desc_test_complex.proto:240:25
+desc_test_complex.proto:240:31
+
+
+ > extension[12] > number:
+desc_test_complex.proto:240:34
+desc_test_complex.proto:240:39
+
+
+ > extension[13]:
+desc_test_complex.proto:241:9
+desc_test_complex.proto:241:38
+
+
+ > extension[13] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[13] > label:
+desc_test_complex.proto:241:9
+desc_test_complex.proto:241:17
+
+
+ > extension[13] > type:
+desc_test_complex.proto:241:18
+desc_test_complex.proto:241:23
+
+
+ > extension[13] > name:
+desc_test_complex.proto:241:24
+desc_test_complex.proto:241:29
+
+
+ > extension[13] > number:
+desc_test_complex.proto:241:32
+desc_test_complex.proto:241:37
+
+
+ > extension[14]:
+desc_test_complex.proto:242:9
+desc_test_complex.proto:242:38
+
+
+ > extension[14] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[14] > label:
+desc_test_complex.proto:242:9
+desc_test_complex.proto:242:17
+
+
+ > extension[14] > type:
+desc_test_complex.proto:242:18
+desc_test_complex.proto:242:23
+
+
+ > extension[14] > name:
+desc_test_complex.proto:242:24
+desc_test_complex.proto:242:29
+
+
+ > extension[14] > number:
+desc_test_complex.proto:242:32
+desc_test_complex.proto:242:37
+
+
+ > extension[15]:
+desc_test_complex.proto:243:9
+desc_test_complex.proto:243:38
+
+
+ > extension[15] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[15] > label:
+desc_test_complex.proto:243:9
+desc_test_complex.proto:243:17
+
+
+ > extension[15] > type:
+desc_test_complex.proto:243:18
+desc_test_complex.proto:243:23
+
+
+ > extension[15] > name:
+desc_test_complex.proto:243:24
+desc_test_complex.proto:243:29
+
+
+ > extension[15] > number:
+desc_test_complex.proto:243:32
+desc_test_complex.proto:243:37
+
+
+ > extension[16]:
+desc_test_complex.proto:244:9
+desc_test_complex.proto:244:40
+
+
+ > extension[16] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[16] > label:
+desc_test_complex.proto:244:9
+desc_test_complex.proto:244:17
+
+
+ > extension[16] > type:
+desc_test_complex.proto:244:18
+desc_test_complex.proto:244:24
+
+
+ > extension[16] > name:
+desc_test_complex.proto:244:25
+desc_test_complex.proto:244:31
+
+
+ > extension[16] > number:
+desc_test_complex.proto:244:34
+desc_test_complex.proto:244:39
+
+
+ > extension[17]:
+desc_test_complex.proto:245:9
+desc_test_complex.proto:245:40
+
+
+ > extension[17] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[17] > label:
+desc_test_complex.proto:245:9
+desc_test_complex.proto:245:17
+
+
+ > extension[17] > type:
+desc_test_complex.proto:245:18
+desc_test_complex.proto:245:24
+
+
+ > extension[17] > name:
+desc_test_complex.proto:245:25
+desc_test_complex.proto:245:31
+
+
+ > extension[17] > number:
+desc_test_complex.proto:245:34
+desc_test_complex.proto:245:39
+
+
+ > extension[18]:
+desc_test_complex.proto:246:9
+desc_test_complex.proto:246:40
+
+
+ > extension[18] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[18] > label:
+desc_test_complex.proto:246:9
+desc_test_complex.proto:246:17
+
+
+ > extension[18] > type:
+desc_test_complex.proto:246:18
+desc_test_complex.proto:246:24
+
+
+ > extension[18] > name:
+desc_test_complex.proto:246:25
+desc_test_complex.proto:246:31
+
+
+ > extension[18] > number:
+desc_test_complex.proto:246:34
+desc_test_complex.proto:246:39
+
+
+ > extension[19]:
+desc_test_complex.proto:247:9
+desc_test_complex.proto:247:40
+
+
+ > extension[19] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[19] > label:
+desc_test_complex.proto:247:9
+desc_test_complex.proto:247:17
+
+
+ > extension[19] > type:
+desc_test_complex.proto:247:18
+desc_test_complex.proto:247:24
+
+
+ > extension[19] > name:
+desc_test_complex.proto:247:25
+desc_test_complex.proto:247:31
+
+
+ > extension[19] > number:
+desc_test_complex.proto:247:34
+desc_test_complex.proto:247:39
+
+
+ > extension[20]:
+desc_test_complex.proto:248:9
+desc_test_complex.proto:248:42
+
+
+ > extension[20] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[20] > label:
+desc_test_complex.proto:248:9
+desc_test_complex.proto:248:17
+
+
+ > extension[20] > type:
+desc_test_complex.proto:248:18
+desc_test_complex.proto:248:25
+
+
+ > extension[20] > name:
+desc_test_complex.proto:248:26
+desc_test_complex.proto:248:33
+
+
+ > extension[20] > number:
+desc_test_complex.proto:248:36
+desc_test_complex.proto:248:41
+
+
+ > extension[21]:
+desc_test_complex.proto:249:9
+desc_test_complex.proto:249:42
+
+
+ > extension[21] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[21] > label:
+desc_test_complex.proto:249:9
+desc_test_complex.proto:249:17
+
+
+ > extension[21] > type:
+desc_test_complex.proto:249:18
+desc_test_complex.proto:249:25
+
+
+ > extension[21] > name:
+desc_test_complex.proto:249:26
+desc_test_complex.proto:249:33
+
+
+ > extension[21] > number:
+desc_test_complex.proto:249:36
+desc_test_complex.proto:249:41
+
+
+ > extension[22]:
+desc_test_complex.proto:250:9
+desc_test_complex.proto:250:44
+
+
+ > extension[22] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[22] > label:
+desc_test_complex.proto:250:9
+desc_test_complex.proto:250:17
+
+
+ > extension[22] > type:
+desc_test_complex.proto:250:18
+desc_test_complex.proto:250:26
+
+
+ > extension[22] > name:
+desc_test_complex.proto:250:27
+desc_test_complex.proto:250:35
+
+
+ > extension[22] > number:
+desc_test_complex.proto:250:38
+desc_test_complex.proto:250:43
+
+
+ > extension[23]:
+desc_test_complex.proto:251:9
+desc_test_complex.proto:251:44
+
+
+ > extension[23] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[23] > label:
+desc_test_complex.proto:251:9
+desc_test_complex.proto:251:17
+
+
+ > extension[23] > type:
+desc_test_complex.proto:251:18
+desc_test_complex.proto:251:26
+
+
+ > extension[23] > name:
+desc_test_complex.proto:251:27
+desc_test_complex.proto:251:35
+
+
+ > extension[23] > number:
+desc_test_complex.proto:251:38
+desc_test_complex.proto:251:43
+
+
+ > extension[24]:
+desc_test_complex.proto:252:9
+desc_test_complex.proto:252:36
+
+
+ > extension[24] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[24] > label:
+desc_test_complex.proto:252:9
+desc_test_complex.proto:252:17
+
+
+ > extension[24] > type:
+desc_test_complex.proto:252:18
+desc_test_complex.proto:252:22
+
+
+ > extension[24] > name:
+desc_test_complex.proto:252:23
+desc_test_complex.proto:252:27
+
+
+ > extension[24] > number:
+desc_test_complex.proto:252:30
+desc_test_complex.proto:252:35
+
+
+ > extension[25]:
+desc_test_complex.proto:253:9
+desc_test_complex.proto:253:38
+
+
+ > extension[25] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[25] > label:
+desc_test_complex.proto:253:9
+desc_test_complex.proto:253:17
+
+
+ > extension[25] > type:
+desc_test_complex.proto:253:18
+desc_test_complex.proto:253:23
+
+
+ > extension[25] > name:
+desc_test_complex.proto:253:24
+desc_test_complex.proto:253:29
+
+
+ > extension[25] > number:
+desc_test_complex.proto:253:32
+desc_test_complex.proto:253:37
+
+
+ > extension[26]:
+desc_test_complex.proto:254:9
+desc_test_complex.proto:254:40
+
+
+ > extension[26] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[26] > label:
+desc_test_complex.proto:254:9
+desc_test_complex.proto:254:17
+
+
+ > extension[26] > type:
+desc_test_complex.proto:254:18
+desc_test_complex.proto:254:24
+
+
+ > extension[26] > name:
+desc_test_complex.proto:254:25
+desc_test_complex.proto:254:31
+
+
+ > extension[26] > number:
+desc_test_complex.proto:254:34
+desc_test_complex.proto:254:39
+
+
+ > extension[27]:
+desc_test_complex.proto:255:9
+desc_test_complex.proto:255:40
+
+
+ > extension[27] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[27] > label:
+desc_test_complex.proto:255:9
+desc_test_complex.proto:255:17
+
+
+ > extension[27] > type:
+desc_test_complex.proto:255:18
+desc_test_complex.proto:255:22
+
+
+ > extension[27] > name:
+desc_test_complex.proto:255:23
+desc_test_complex.proto:255:31
+
+
+ > extension[27] > number:
+desc_test_complex.proto:255:34
+desc_test_complex.proto:255:39
+
+
+ > extension[28]:
+desc_test_complex.proto:256:9
+desc_test_complex.proto:256:40
+
+
+ > extension[28] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[28] > label:
+desc_test_complex.proto:256:9
+desc_test_complex.proto:256:17
+
+
+ > extension[28] > type:
+desc_test_complex.proto:256:18
+desc_test_complex.proto:256:22
+
+
+ > extension[28] > name:
+desc_test_complex.proto:256:23
+desc_test_complex.proto:256:31
+
+
+ > extension[28] > number:
+desc_test_complex.proto:256:34
+desc_test_complex.proto:256:39
+
+
+ > extension[29]:
+desc_test_complex.proto:257:9
+desc_test_complex.proto:257:40
+
+
+ > extension[29] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[29] > label:
+desc_test_complex.proto:257:9
+desc_test_complex.proto:257:17
+
+
+ > extension[29] > type:
+desc_test_complex.proto:257:18
+desc_test_complex.proto:257:22
+
+
+ > extension[29] > name:
+desc_test_complex.proto:257:23
+desc_test_complex.proto:257:31
+
+
+ > extension[29] > number:
+desc_test_complex.proto:257:34
+desc_test_complex.proto:257:39
+
+
+ > extension[30]:
+desc_test_complex.proto:258:9
+desc_test_complex.proto:258:39
+
+
+ > extension[30] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[30] > label:
+desc_test_complex.proto:258:9
+desc_test_complex.proto:258:17
+
+
+ > extension[30] > type:
+desc_test_complex.proto:258:18
+desc_test_complex.proto:258:22
+
+
+ > extension[30] > name:
+desc_test_complex.proto:258:23
+desc_test_complex.proto:258:30
+
+
+ > extension[30] > number:
+desc_test_complex.proto:258:33
+desc_test_complex.proto:258:38
+
+
+ > extension[31]:
+desc_test_complex.proto:259:9
+desc_test_complex.proto:259:36
+
+
+ > extension[31] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[31] > label:
+desc_test_complex.proto:259:9
+desc_test_complex.proto:259:17
+
+
+ > extension[31] > type:
+desc_test_complex.proto:259:18
+desc_test_complex.proto:259:22
+
+
+ > extension[31] > name:
+desc_test_complex.proto:259:23
+desc_test_complex.proto:259:27
+
+
+ > extension[31] > number:
+desc_test_complex.proto:259:30
+desc_test_complex.proto:259:35
+
+
+ > extension[32]:
+desc_test_complex.proto:260:9
+desc_test_complex.proto:260:39
+
+
+ > extension[32] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[32] > label:
+desc_test_complex.proto:260:9
+desc_test_complex.proto:260:17
+
+
+ > extension[32] > type:
+desc_test_complex.proto:260:18
+desc_test_complex.proto:260:22
+
+
+ > extension[32] > name:
+desc_test_complex.proto:260:23
+desc_test_complex.proto:260:30
+
+
+ > extension[32] > number:
+desc_test_complex.proto:260:33
+desc_test_complex.proto:260:38
+
+
+ > extension[33]:
+desc_test_complex.proto:261:9
+desc_test_complex.proto:261:35
+
+
+ > extension[33] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[33] > label:
+desc_test_complex.proto:261:9
+desc_test_complex.proto:261:17
+
+
+ > extension[33] > type:
+desc_test_complex.proto:261:18
+desc_test_complex.proto:261:22
+
+
+ > extension[33] > name:
+desc_test_complex.proto:261:23
+desc_test_complex.proto:261:26
+
+
+ > extension[33] > number:
+desc_test_complex.proto:261:29
+desc_test_complex.proto:261:34
+
+
+ > extension[34]:
+desc_test_complex.proto:262:9
+desc_test_complex.proto:262:38
+
+
+ > extension[34] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[34] > label:
+desc_test_complex.proto:262:9
+desc_test_complex.proto:262:17
+
+
+ > extension[34] > type:
+desc_test_complex.proto:262:18
+desc_test_complex.proto:262:22
+
+
+ > extension[34] > name:
+desc_test_complex.proto:262:23
+desc_test_complex.proto:262:29
+
+
+ > extension[34] > number:
+desc_test_complex.proto:262:32
+desc_test_complex.proto:262:37
+
+
+ > extension[35]:
+desc_test_complex.proto:263:9
+desc_test_complex.proto:263:38
+
+
+ > extension[35] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[35] > label:
+desc_test_complex.proto:263:9
+desc_test_complex.proto:263:17
+
+
+ > extension[35] > type:
+desc_test_complex.proto:263:18
+desc_test_complex.proto:263:22
+
+
+ > extension[35] > name:
+desc_test_complex.proto:263:23
+desc_test_complex.proto:263:29
+
+
+ > extension[35] > number:
+desc_test_complex.proto:263:32
+desc_test_complex.proto:263:37
+
+
+ > extension[36]:
+desc_test_complex.proto:264:9
+desc_test_complex.proto:264:42
+
+
+ > extension[36] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[36] > label:
+desc_test_complex.proto:264:9
+desc_test_complex.proto:264:17
+
+
+ > extension[36] > type:
+desc_test_complex.proto:264:18
+desc_test_complex.proto:264:22
+
+
+ > extension[36] > name:
+desc_test_complex.proto:264:23
+desc_test_complex.proto:264:33
+
+
+ > extension[36] > number:
+desc_test_complex.proto:264:36
+desc_test_complex.proto:264:41
+
+
+ > extension[37]:
+desc_test_complex.proto:265:9
+desc_test_complex.proto:265:40
+
+
+ > extension[37] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[37] > label:
+desc_test_complex.proto:265:9
+desc_test_complex.proto:265:17
+
+
+ > extension[37] > type:
+desc_test_complex.proto:265:18
+desc_test_complex.proto:265:22
+
+
+ > extension[37] > name:
+desc_test_complex.proto:265:23
+desc_test_complex.proto:265:31
+
+
+ > extension[37] > number:
+desc_test_complex.proto:265:34
+desc_test_complex.proto:265:39
+
+
+ > extension[38]:
+desc_test_complex.proto:266:9
+desc_test_complex.proto:266:34
+
+
+ > extension[38] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[38] > label:
+desc_test_complex.proto:266:9
+desc_test_complex.proto:266:17
+
+
+ > extension[38] > type:
+desc_test_complex.proto:266:18
+desc_test_complex.proto:266:22
+
+
+ > extension[38] > name:
+desc_test_complex.proto:266:23
+desc_test_complex.proto:266:25
+
+
+ > extension[38] > number:
+desc_test_complex.proto:266:28
+desc_test_complex.proto:266:33
+
+
+ > extension[39]:
+desc_test_complex.proto:267:9
+desc_test_complex.proto:267:37
+
+
+ > extension[39] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[39] > label:
+desc_test_complex.proto:267:9
+desc_test_complex.proto:267:17
+
+
+ > extension[39] > type:
+desc_test_complex.proto:267:18
+desc_test_complex.proto:267:23
+
+
+ > extension[39] > name:
+desc_test_complex.proto:267:24
+desc_test_complex.proto:267:28
+
+
+ > extension[39] > number:
+desc_test_complex.proto:267:31
+desc_test_complex.proto:267:36
+
+
+ > extension[40]:
+desc_test_complex.proto:268:9
+desc_test_complex.proto:268:38
+
+
+ > extension[40] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[40] > label:
+desc_test_complex.proto:268:9
+desc_test_complex.proto:268:17
+
+
+ > extension[40] > type:
+desc_test_complex.proto:268:18
+desc_test_complex.proto:268:23
+
+
+ > extension[40] > name:
+desc_test_complex.proto:268:24
+desc_test_complex.proto:268:29
+
+
+ > extension[40] > number:
+desc_test_complex.proto:268:32
+desc_test_complex.proto:268:37
+
+
+ > extension[41]:
+desc_test_complex.proto:269:9
+desc_test_complex.proto:269:40
+
+
+ > extension[41] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[41] > label:
+desc_test_complex.proto:269:9
+desc_test_complex.proto:269:17
+
+
+ > extension[41] > type:
+desc_test_complex.proto:269:18
+desc_test_complex.proto:269:23
+
+
+ > extension[41] > name:
+desc_test_complex.proto:269:24
+desc_test_complex.proto:269:31
+
+
+ > extension[41] > number:
+desc_test_complex.proto:269:34
+desc_test_complex.proto:269:39
+
+
+ > extension[42]:
+desc_test_complex.proto:270:9
+desc_test_complex.proto:270:49
+
+
+ > extension[42] > extendee:
+desc_test_complex.proto:234:8
+desc_test_complex.proto:234:36
+
+
+ > extension[42] > label:
+desc_test_complex.proto:270:9
+desc_test_complex.proto:270:17
+
+
+ > extension[42] > type_name:
+desc_test_complex.proto:270:18
+desc_test_complex.proto:270:35
+
+
+ > extension[42] > name:
+desc_test_complex.proto:270:36
+desc_test_complex.proto:270:40
+
+
+ > extension[42] > number:
+desc_test_complex.proto:270:43
+desc_test_complex.proto:270:48
+
+
+ > message_type[9]:
+desc_test_complex.proto:273:1
+desc_test_complex.proto:298:2
+
+
+ > message_type[9] > name:
+desc_test_complex.proto:273:9
+desc_test_complex.proto:273:32
+
+
+ > message_type[9] > field[0]:
+desc_test_complex.proto:274:9
+desc_test_complex.proto:284:11
+
+
+ > message_type[9] > field[0] > label:
+desc_test_complex.proto:274:9
+desc_test_complex.proto:274:17
+
+
+ > message_type[9] > field[0] > type:
+desc_test_complex.proto:274:18
+desc_test_complex.proto:274:24
+
+
+ > message_type[9] > field[0] > name:
+desc_test_complex.proto:274:25
+desc_test_complex.proto:274:27
+
+
+ > message_type[9] > field[0] > number:
+desc_test_complex.proto:274:30
+desc_test_complex.proto:274:31
+
+
+ > message_type[9] > field[0] > options:
+desc_test_complex.proto:274:32
+desc_test_complex.proto:284:10
+
+
+ > message_type[9] > field[0] > options > (foo.bar.syntax):
+desc_test_complex.proto:275:17
+desc_test_complex.proto:275:32
+
+
+ > message_type[9] > field[0] > options > (foo.bar.import):
+desc_test_complex.proto:275:34
+desc_test_complex.proto:275:49
+
+
+ > message_type[9] > field[0] > options > (foo.bar.public):
+desc_test_complex.proto:275:51
+desc_test_complex.proto:275:66
+
+
+ > message_type[9] > field[0] > options > (foo.bar.weak):
+desc_test_complex.proto:275:68
+desc_test_complex.proto:275:81
+
+
+ > message_type[9] > field[0] > options > (foo.bar.package):
+desc_test_complex.proto:275:83
+desc_test_complex.proto:275:99
+
+
+ > message_type[9] > field[0] > options > (foo.bar.string):
+desc_test_complex.proto:276:17
+desc_test_complex.proto:276:78
+
+
+ > message_type[9] > field[0] > options > (foo.bar.bytes):
+desc_test_complex.proto:276:80
+desc_test_complex.proto:276:139
+
+
+ > message_type[9] > field[0] > options > (foo.bar.bool):
+desc_test_complex.proto:276:141
+desc_test_complex.proto:276:154
+
+
+ > message_type[9] > field[0] > options > (foo.bar.float):
+desc_test_complex.proto:277:17
+desc_test_complex.proto:277:31
+
+
+ > message_type[9] > field[0] > options > (foo.bar.double):
+desc_test_complex.proto:277:33
+desc_test_complex.proto:277:51
+
+
+ > message_type[9] > field[0] > options > (foo.bar.int32):
+desc_test_complex.proto:278:17
+desc_test_complex.proto:278:29
+
+
+ > message_type[9] > field[0] > options > (foo.bar.int64):
+desc_test_complex.proto:278:31
+desc_test_complex.proto:278:43
+
+
+ > message_type[9] > field[0] > options > (foo.bar.uint32):
+desc_test_complex.proto:278:45
+desc_test_complex.proto:278:60
+
+
+ > message_type[9] > field[0] > options > (foo.bar.uint64):
+desc_test_complex.proto:278:62
+desc_test_complex.proto:278:77
+
+
+ > message_type[9] > field[0] > options > (foo.bar.sint32):
+desc_test_complex.proto:278:79
+desc_test_complex.proto:278:93
+
+
+ > message_type[9] > field[0] > options > (foo.bar.sint64):
+desc_test_complex.proto:278:95
+desc_test_complex.proto:278:109
+
+
+ > message_type[9] > field[0] > options > (foo.bar.fixed32):
+desc_test_complex.proto:279:17
+desc_test_complex.proto:279:33
+
+
+ > message_type[9] > field[0] > options > (foo.bar.fixed64):
+desc_test_complex.proto:279:35
+desc_test_complex.proto:279:51
+
+
+ > message_type[9] > field[0] > options > (foo.bar.sfixed32):
+desc_test_complex.proto:279:53
+desc_test_complex.proto:279:71
+
+
+ > message_type[9] > field[0] > options > (foo.bar.sfixed64):
+desc_test_complex.proto:279:73
+desc_test_complex.proto:279:91
+
+
+ > message_type[9] > field[0] > options > (foo.bar.optional):
+desc_test_complex.proto:280:17
+desc_test_complex.proto:280:34
+
+
+ > message_type[9] > field[0] > options > (foo.bar.repeated):
+desc_test_complex.proto:280:36
+desc_test_complex.proto:280:53
+
+
+ > message_type[9] > field[0] > options > (foo.bar.required):
+desc_test_complex.proto:280:55
+desc_test_complex.proto:280:72
+
+
+ > message_type[9] > field[0] > options > (foo.bar.message):
+desc_test_complex.proto:281:17
+desc_test_complex.proto:281:33
+
+
+ > message_type[9] > field[0] > options > (foo.bar.enum):
+desc_test_complex.proto:281:35
+desc_test_complex.proto:281:48
+
+
+ > message_type[9] > field[0] > options > (foo.bar.service):
+desc_test_complex.proto:281:50
+desc_test_complex.proto:281:66
+
+
+ > message_type[9] > field[0] > options > (foo.bar.rpc):
+desc_test_complex.proto:281:68
+desc_test_complex.proto:281:80
+
+
+ > message_type[9] > field[0] > options > (foo.bar.option):
+desc_test_complex.proto:282:17
+desc_test_complex.proto:282:32
+
+
+ > message_type[9] > field[0] > options > (foo.bar.extend):
+desc_test_complex.proto:282:34
+desc_test_complex.proto:282:49
+
+
+ > message_type[9] > field[0] > options > (foo.bar.extensions):
+desc_test_complex.proto:282:51
+desc_test_complex.proto:282:70
+
+
+ > message_type[9] > field[0] > options > (foo.bar.reserved):
+desc_test_complex.proto:282:72
+desc_test_complex.proto:282:89
+
+
+ > message_type[9] > field[0] > options > (foo.bar.to):
+desc_test_complex.proto:283:17
+desc_test_complex.proto:283:28
+
+
+ > message_type[9] > field[0] > options > (foo.bar.true):
+desc_test_complex.proto:283:30
+desc_test_complex.proto:283:42
+
+
+ > message_type[9] > field[0] > options > (foo.bar.false):
+desc_test_complex.proto:283:44
+desc_test_complex.proto:283:58
+
+
+ > message_type[9] > field[0] > options > (foo.bar.default):
+desc_test_complex.proto:283:60
+desc_test_complex.proto:283:75
+
+
+ > message_type[9] > field[1]:
+desc_test_complex.proto:285:9
+desc_test_complex.proto:297:11
+
+
+ > message_type[9] > field[1] > label:
+desc_test_complex.proto:285:9
+desc_test_complex.proto:285:17
+
+
+ > message_type[9] > field[1] > type:
+desc_test_complex.proto:285:18
+desc_test_complex.proto:285:24
+
+
+ > message_type[9] > field[1] > name:
+desc_test_complex.proto:285:25
+desc_test_complex.proto:285:29
+
+
+ > message_type[9] > field[1] > number:
+desc_test_complex.proto:285:32
+desc_test_complex.proto:285:33
+
+
+ > message_type[9] > field[1] > options:
+desc_test_complex.proto:285:34
+desc_test_complex.proto:297:10
+
+
+ > message_type[9] > field[1] > options > (foo.bar.boom):
+desc_test_complex.proto:286:17
+desc_test_complex.proto:296:18
+---- desc_test_options.proto ----
+
+
+:
+desc_test_options.proto:1:1
+desc_test_options.proto:63:2
+
+
+ > syntax:
+desc_test_options.proto:1:1
+desc_test_options.proto:1:19
+
+
+ > options:
+desc_test_options.proto:3:1
+desc_test_options.proto:3:73
+
+
+ > options > go_package:
+desc_test_options.proto:3:1
+desc_test_options.proto:3:73
+
+
+ > package:
+desc_test_options.proto:5:1
+desc_test_options.proto:5:20
+
+
+ > dependency[0]:
+desc_test_options.proto:7:1
+desc_test_options.proto:7:43
+
+
+ > extension:
+desc_test_options.proto:9:1
+desc_test_options.proto:11:2
+
+
+ > extension[0]:
+desc_test_options.proto:10:9
+desc_test_options.proto:10:38
+
+
+ > extension[0] > extendee:
+desc_test_options.proto:9:8
+desc_test_options.proto:9:38
+
+
+ > extension[0] > label:
+desc_test_options.proto:10:9
+desc_test_options.proto:10:17
+
+
+ > extension[0] > type:
+desc_test_options.proto:10:18
+desc_test_options.proto:10:22
+
+
+ > extension[0] > name:
+desc_test_options.proto:10:23
+desc_test_options.proto:10:29
+
+
+ > extension[0] > number:
+desc_test_options.proto:10:32
+desc_test_options.proto:10:37
+
+
+ > extension:
+desc_test_options.proto:13:1
+desc_test_options.proto:16:2
+
+
+ > extension[1]:
+desc_test_options.proto:14:9
+desc_test_options.proto:14:40
+
+
+ > extension[1] > extendee:
+desc_test_options.proto:13:8
+desc_test_options.proto:13:36
+
+
+ > extension[1] > label:
+desc_test_options.proto:14:9
+desc_test_options.proto:14:17
+
+
+ > extension[1] > type:
+desc_test_options.proto:14:18
+desc_test_options.proto:14:24
+
+
+ > extension[1] > name:
+desc_test_options.proto:14:25
+desc_test_options.proto:14:31
+
+
+ > extension[1] > number:
+desc_test_options.proto:14:34
+desc_test_options.proto:14:39
+
+
+ > extension[2]:
+desc_test_options.proto:15:9
+desc_test_options.proto:15:40
+
+
+ > extension[2] > extendee:
+desc_test_options.proto:13:8
+desc_test_options.proto:13:36
+
+
+ > extension[2] > label:
+desc_test_options.proto:15:9
+desc_test_options.proto:15:17
+
+
+ > extension[2] > type:
+desc_test_options.proto:15:18
+desc_test_options.proto:15:23
+
+
+ > extension[2] > name:
+desc_test_options.proto:15:24
+desc_test_options.proto:15:31
+
+
+ > extension[2] > number:
+desc_test_options.proto:15:34
+desc_test_options.proto:15:39
+
+
+ > extension:
+desc_test_options.proto:18:1
+desc_test_options.proto:24:2
+
+
+ > extension[3]:
+desc_test_options.proto:19:9
+desc_test_options.proto:19:39
+
+
+ > extension[3] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[3] > label:
+desc_test_options.proto:19:9
+desc_test_options.proto:19:17
+
+
+ > extension[3] > type:
+desc_test_options.proto:19:18
+desc_test_options.proto:19:23
+
+
+ > extension[3] > name:
+desc_test_options.proto:19:24
+desc_test_options.proto:19:30
+
+
+ > extension[3] > number:
+desc_test_options.proto:19:33
+desc_test_options.proto:19:38
+
+
+ > extension[4]:
+desc_test_options.proto:20:9
+desc_test_options.proto:20:41
+
+
+ > extension[4] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[4] > label:
+desc_test_options.proto:20:9
+desc_test_options.proto:20:17
+
+
+ > extension[4] > type:
+desc_test_options.proto:20:18
+desc_test_options.proto:20:24
+
+
+ > extension[4] > name:
+desc_test_options.proto:20:25
+desc_test_options.proto:20:32
+
+
+ > extension[4] > number:
+desc_test_options.proto:20:35
+desc_test_options.proto:20:40
+
+
+ > extension[5]:
+desc_test_options.proto:21:9
+desc_test_options.proto:21:44
+
+
+ > extension[5] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[5] > label:
+desc_test_options.proto:21:9
+desc_test_options.proto:21:17
+
+
+ > extension[5] > type:
+desc_test_options.proto:21:18
+desc_test_options.proto:21:26
+
+
+ > extension[5] > name:
+desc_test_options.proto:21:27
+desc_test_options.proto:21:35
+
+
+ > extension[5] > number:
+desc_test_options.proto:21:38
+desc_test_options.proto:21:43
+
+
+ > extension[6]:
+desc_test_options.proto:22:9
+desc_test_options.proto:22:41
+
+
+ > extension[6] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[6] > label:
+desc_test_options.proto:22:9
+desc_test_options.proto:22:17
+
+
+ > extension[6] > type:
+desc_test_options.proto:22:18
+desc_test_options.proto:22:24
+
+
+ > extension[6] > name:
+desc_test_options.proto:22:25
+desc_test_options.proto:22:32
+
+
+ > extension[6] > number:
+desc_test_options.proto:22:35
+desc_test_options.proto:22:40
+
+
+ > extension[7]:
+desc_test_options.proto:23:9
+desc_test_options.proto:23:43
+
+
+ > extension[7] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[7] > label:
+desc_test_options.proto:23:9
+desc_test_options.proto:23:17
+
+
+ > extension[7] > type:
+desc_test_options.proto:23:18
+desc_test_options.proto:23:25
+
+
+ > extension[7] > name:
+desc_test_options.proto:23:26
+desc_test_options.proto:23:34
+
+
+ > extension[7] > number:
+desc_test_options.proto:23:37
+desc_test_options.proto:23:42
+
+
+ > extension:
+desc_test_options.proto:26:1
+desc_test_options.proto:32:2
+
+
+ > extension[8]:
+desc_test_options.proto:27:9
+desc_test_options.proto:27:40
+
+
+ > extension[8] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[8] > label:
+desc_test_options.proto:27:9
+desc_test_options.proto:27:17
+
+
+ > extension[8] > type:
+desc_test_options.proto:27:18
+desc_test_options.proto:27:23
+
+
+ > extension[8] > name:
+desc_test_options.proto:27:24
+desc_test_options.proto:27:31
+
+
+ > extension[8] > number:
+desc_test_options.proto:27:34
+desc_test_options.proto:27:39
+
+
+ > extension[9]:
+desc_test_options.proto:28:9
+desc_test_options.proto:28:42
+
+
+ > extension[9] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[9] > label:
+desc_test_options.proto:28:9
+desc_test_options.proto:28:17
+
+
+ > extension[9] > type:
+desc_test_options.proto:28:18
+desc_test_options.proto:28:24
+
+
+ > extension[9] > name:
+desc_test_options.proto:28:25
+desc_test_options.proto:28:33
+
+
+ > extension[9] > number:
+desc_test_options.proto:28:36
+desc_test_options.proto:28:41
+
+
+ > extension[10]:
+desc_test_options.proto:29:9
+desc_test_options.proto:29:45
+
+
+ > extension[10] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[10] > label:
+desc_test_options.proto:29:9
+desc_test_options.proto:29:17
+
+
+ > extension[10] > type:
+desc_test_options.proto:29:18
+desc_test_options.proto:29:26
+
+
+ > extension[10] > name:
+desc_test_options.proto:29:27
+desc_test_options.proto:29:36
+
+
+ > extension[10] > number:
+desc_test_options.proto:29:39
+desc_test_options.proto:29:44
+
+
+ > extension[11]:
+desc_test_options.proto:30:9
+desc_test_options.proto:30:42
+
+
+ > extension[11] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[11] > label:
+desc_test_options.proto:30:9
+desc_test_options.proto:30:17
+
+
+ > extension[11] > type:
+desc_test_options.proto:30:18
+desc_test_options.proto:30:24
+
+
+ > extension[11] > name:
+desc_test_options.proto:30:25
+desc_test_options.proto:30:33
+
+
+ > extension[11] > number:
+desc_test_options.proto:30:36
+desc_test_options.proto:30:41
+
+
+ > extension[12]:
+desc_test_options.proto:31:9
+desc_test_options.proto:31:44
+
+
+ > extension[12] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[12] > label:
+desc_test_options.proto:31:9
+desc_test_options.proto:31:17
+
+
+ > extension[12] > type:
+desc_test_options.proto:31:18
+desc_test_options.proto:31:25
+
+
+ > extension[12] > name:
+desc_test_options.proto:31:26
+desc_test_options.proto:31:35
+
+
+ > extension[12] > number:
+desc_test_options.proto:31:38
+desc_test_options.proto:31:43
+
+
+ > extension:
+desc_test_options.proto:34:1
+desc_test_options.proto:37:2
+
+
+ > extension[13]:
+desc_test_options.proto:35:9
+desc_test_options.proto:35:53
+
+
+ > extension[13] > extendee:
+desc_test_options.proto:34:8
+desc_test_options.proto:34:38
+
+
+ > extension[13] > label:
+desc_test_options.proto:35:9
+desc_test_options.proto:35:17
+
+
+ > extension[13] > type_name:
+desc_test_options.proto:35:18
+desc_test_options.proto:35:37
+
+
+ > extension[13] > name:
+desc_test_options.proto:35:38
+desc_test_options.proto:35:44
+
+
+ > extension[13] > number:
+desc_test_options.proto:35:47
+desc_test_options.proto:35:52
+
+
+ > extension[14]:
+desc_test_options.proto:36:9
+desc_test_options.proto:36:51
+
+
+ > extension[14] > extendee:
+desc_test_options.proto:34:8
+desc_test_options.proto:34:38
+
+
+ > extension[14] > label:
+desc_test_options.proto:36:9
+desc_test_options.proto:36:17
+
+
+ > extension[14] > type_name:
+desc_test_options.proto:36:18
+desc_test_options.proto:36:34
+
+
+ > extension[14] > name:
+desc_test_options.proto:36:35
+desc_test_options.proto:36:42
+
+
+ > extension[14] > number:
+desc_test_options.proto:36:45
+desc_test_options.proto:36:50
+
+
+ > extension:
+desc_test_options.proto:39:1
+desc_test_options.proto:42:2
+
+
+ > extension[15]:
+desc_test_options.proto:40:9
+desc_test_options.proto:40:40
+
+
+ > extension[15] > extendee:
+desc_test_options.proto:39:8
+desc_test_options.proto:39:37
+
+
+ > extension[15] > label:
+desc_test_options.proto:40:9
+desc_test_options.proto:40:17
+
+
+ > extension[15] > type:
+desc_test_options.proto:40:18
+desc_test_options.proto:40:23
+
+
+ > extension[15] > name:
+desc_test_options.proto:40:24
+desc_test_options.proto:40:31
+
+
+ > extension[15] > number:
+desc_test_options.proto:40:34
+desc_test_options.proto:40:39
+
+
+ > extension[16]:
+desc_test_options.proto:41:9
+desc_test_options.proto:41:42
+
+
+ > extension[16] > extendee:
+desc_test_options.proto:39:8
+desc_test_options.proto:39:37
+
+
+ > extension[16] > label:
+desc_test_options.proto:41:9
+desc_test_options.proto:41:17
+
+
+ > extension[16] > type:
+desc_test_options.proto:41:18
+desc_test_options.proto:41:24
+
+
+ > extension[16] > name:
+desc_test_options.proto:41:25
+desc_test_options.proto:41:33
+
+
+ > extension[16] > number:
+desc_test_options.proto:41:36
+desc_test_options.proto:41:41
+
+
+ > message_type[0]:
+desc_test_options.proto:45:1
+desc_test_options.proto:48:2
+ Leading comments:
+ Test message used by custom options
+
+
+
+ > message_type[0] > name:
+desc_test_options.proto:45:9
+desc_test_options.proto:45:28
+
+
+ > message_type[0] > field[0]:
+desc_test_options.proto:46:9
+desc_test_options.proto:46:32
+
+
+ > message_type[0] > field[0] > label:
+desc_test_options.proto:46:9
+desc_test_options.proto:46:17
+
+
+ > message_type[0] > field[0] > type:
+desc_test_options.proto:46:18
+desc_test_options.proto:46:24
+
+
+ > message_type[0] > field[0] > name:
+desc_test_options.proto:46:25
+desc_test_options.proto:46:27
+
+
+ > message_type[0] > field[0] > number:
+desc_test_options.proto:46:30
+desc_test_options.proto:46:31
+
+
+ > message_type[0] > field[1]:
+desc_test_options.proto:47:9
+desc_test_options.proto:47:34
+
+
+ > message_type[0] > field[1] > label:
+desc_test_options.proto:47:9
+desc_test_options.proto:47:17
+
+
+ > message_type[0] > field[1] > type:
+desc_test_options.proto:47:18
+desc_test_options.proto:47:24
+
+
+ > message_type[0] > field[1] > name:
+desc_test_options.proto:47:25
+desc_test_options.proto:47:29
+
+
+ > message_type[0] > field[1] > number:
+desc_test_options.proto:47:32
+desc_test_options.proto:47:33
+
+
+ > enum_type[0]:
+desc_test_options.proto:51:1
+desc_test_options.proto:53:2
+ Leading comments:
+ Test enum used by custom options
+
+
+
+ > enum_type[0] > name:
+desc_test_options.proto:51:6
+desc_test_options.proto:51:22
+
+
+ > enum_type[0] > value[0]:
+desc_test_options.proto:52:9
+desc_test_options.proto:52:19
+
+
+ > enum_type[0] > value[0] > name:
+desc_test_options.proto:52:9
+desc_test_options.proto:52:14
+
+
+ > enum_type[0] > value[0] > number:
+desc_test_options.proto:52:17
+desc_test_options.proto:52:18
+
+
+ > extension:
+desc_test_options.proto:55:1
+desc_test_options.proto:58:2
+
+
+ > extension[17]:
+desc_test_options.proto:56:9
+desc_test_options.proto:56:41
+
+
+ > extension[17] > extendee:
+desc_test_options.proto:55:8
+desc_test_options.proto:55:45
+
+
+ > extension[17] > label:
+desc_test_options.proto:56:9
+desc_test_options.proto:56:17
+
+
+ > extension[17] > type:
+desc_test_options.proto:56:18
+desc_test_options.proto:56:24
+
+
+ > extension[17] > name:
+desc_test_options.proto:56:25
+desc_test_options.proto:56:32
+
+
+ > extension[17] > number:
+desc_test_options.proto:56:35
+desc_test_options.proto:56:40
+
+
+ > extension[18]:
+desc_test_options.proto:57:9
+desc_test_options.proto:57:41
+
+
+ > extension[18] > extendee:
+desc_test_options.proto:55:8
+desc_test_options.proto:55:45
+
+
+ > extension[18] > label:
+desc_test_options.proto:57:9
+desc_test_options.proto:57:17
+
+
+ > extension[18] > type:
+desc_test_options.proto:57:18
+desc_test_options.proto:57:23
+
+
+ > extension[18] > name:
+desc_test_options.proto:57:24
+desc_test_options.proto:57:32
+
+
+ > extension[18] > number:
+desc_test_options.proto:57:35
+desc_test_options.proto:57:40
+
+
+ > extension:
+desc_test_options.proto:60:1
+desc_test_options.proto:63:2
+
+
+ > extension[19]:
+desc_test_options.proto:61:9
+desc_test_options.proto:61:41
+
+
+ > extension[19] > extendee:
+desc_test_options.proto:60:8
+desc_test_options.proto:60:36
+
+
+ > extension[19] > label:
+desc_test_options.proto:61:9
+desc_test_options.proto:61:17
+
+
+ > extension[19] > type:
+desc_test_options.proto:61:18
+desc_test_options.proto:61:24
+
+
+ > extension[19] > name:
+desc_test_options.proto:61:25
+desc_test_options.proto:61:32
+
+
+ > extension[19] > number:
+desc_test_options.proto:61:35
+desc_test_options.proto:61:40
+
+
+ > extension[20]:
+desc_test_options.proto:62:9
+desc_test_options.proto:62:41
+
+
+ > extension[20] > extendee:
+desc_test_options.proto:60:8
+desc_test_options.proto:60:36
+
+
+ > extension[20] > label:
+desc_test_options.proto:62:9
+desc_test_options.proto:62:17
+
+
+ > extension[20] > type:
+desc_test_options.proto:62:18
+desc_test_options.proto:62:23
+
+
+ > extension[20] > name:
+desc_test_options.proto:62:24
+desc_test_options.proto:62:32
+
+
+ > extension[20] > number:
+desc_test_options.proto:62:35
+desc_test_options.proto:62:40
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go
new file mode 100644
index 0000000..b56e8ac
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go
@@ -0,0 +1,7 @@
+// Package protoprint provides a mechanism to generate protobuf source code
+// from descriptors.
+//
+// This can be useful to turn file descriptor sets (produced by protoc) back
+// into proto IDL code. Combined with the protoreflect/builder package, it can
+// also be used to perform code generation of proto source code.
+package protoprint
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/message_literal.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/message_literal.go
new file mode 100644
index 0000000..4bc0d1a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/message_literal.go
@@ -0,0 +1,315 @@
+package protoprint
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strings"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/dynamicpb"
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+func (p *Printer) printMessageLiteralCompact(msg protoreflect.Message, res *protoregistry.Types, pkg, scope string) string {
+ var buf bytes.Buffer
+ p.printMessageLiteralToBuffer(&buf, msg, res, pkg, scope, 0, -1)
+ return buf.String()
+}
+
+func (p *Printer) printMessageLiteral(msg protoreflect.Message, res *protoregistry.Types, pkg, scope string, threshold, indent int) string {
+ var buf bytes.Buffer
+ p.printMessageLiteralToBuffer(&buf, msg, res, pkg, scope, threshold, indent)
+ return buf.String()
+}
+
+var (
+ anyTypeName = (&anypb.Any{}).ProtoReflect().Descriptor().FullName()
+)
+
+const (
+ anyTypeUrlTag = 1
+ anyValueTag = 2
+)
+
+func (p *Printer) printMessageLiteralToBuffer(buf *bytes.Buffer, msg protoreflect.Message, res *protoregistry.Types, pkg, scope string, threshold, indent int) {
+ if p.maybePrintAnyMessageToBuffer(buf, msg, res, pkg, scope, threshold, indent) {
+ return
+ }
+
+ buf.WriteRune('{')
+ if indent >= 0 {
+ indent++
+ }
+
+ type fieldVal struct {
+ fld protoreflect.FieldDescriptor
+ val protoreflect.Value
+ }
+ var fields []fieldVal
+ msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ fields = append(fields, fieldVal{fld: fld, val: val})
+ return true
+ })
+ sort.Slice(fields, func(i, j int) bool {
+ return fields[i].fld.Number() < fields[j].fld.Number()
+ })
+
+ for i, fldVal := range fields {
+ fld, val := fldVal.fld, fldVal.val
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ p.maybeNewline(buf, indent)
+ if fld.IsExtension() {
+ buf.WriteRune('[')
+ buf.WriteString(p.qualifyExtensionLiteralName(pkg, scope, string(fld.FullName())))
+ buf.WriteRune(']')
+ } else {
+ buf.WriteString(string(fld.Name()))
+ }
+ buf.WriteString(": ")
+ switch {
+ case fld.IsList():
+ p.printArrayLiteralToBufferMaybeCompact(buf, fld, val.List(), res, pkg, scope, threshold, indent)
+ case fld.IsMap():
+ p.printMapLiteralToBufferMaybeCompact(buf, fld, val.Map(), res, pkg, scope, threshold, indent)
+ case fld.Kind() == protoreflect.MessageKind || fld.Kind() == protoreflect.GroupKind:
+ p.printMessageLiteralToBufferMaybeCompact(buf, val.Message(), res, pkg, scope, threshold, indent)
+ default:
+ p.printValueLiteralToBuffer(buf, fld, val.Interface())
+ }
+ }
+
+ if indent >= 0 {
+ indent--
+ }
+ p.maybeNewline(buf, indent)
+ buf.WriteRune('}')
+}
+
+func (p *Printer) printMessageLiteralToBufferMaybeCompact(buf *bytes.Buffer, msg protoreflect.Message, res *protoregistry.Types, pkg, scope string, threshold, indent int) {
+ if indent >= 0 {
+ // first see if the message is compact enough to fit on one line
+ str := p.printMessageLiteralCompact(msg, res, pkg, scope)
+ fieldCount := strings.Count(str, ",")
+ nestedCount := strings.Count(str, "{") - 1
+ if fieldCount <= 1 && nestedCount == 0 {
+ // can't expand
+ buf.WriteString(str)
+ return
+ }
+ if len(str) <= threshold {
+ // no need to expand
+ buf.WriteString(str)
+ return
+ }
+ }
+ p.printMessageLiteralToBuffer(buf, msg, res, pkg, scope, threshold, indent)
+}
+
+func (p *Printer) maybePrintAnyMessageToBuffer(buf *bytes.Buffer, msg protoreflect.Message, res *protoregistry.Types, pkg, scope string, threshold, indent int) bool {
+ md := msg.Descriptor()
+ if md.FullName() != anyTypeName {
+ return false
+ }
+ typeUrlFld := md.Fields().ByNumber(anyTypeUrlTag)
+ if typeUrlFld == nil || typeUrlFld.Kind() != protoreflect.StringKind || typeUrlFld.Cardinality() == protoreflect.Repeated {
+ return false
+ }
+ valueFld := md.Fields().ByNumber(anyValueTag)
+ if valueFld == nil || valueFld.Kind() != protoreflect.BytesKind || valueFld.Cardinality() == protoreflect.Repeated {
+ return false
+ }
+ typeUrl := msg.Get(typeUrlFld).String()
+ if typeUrl == "" {
+ return false
+ }
+ mt, err := res.FindMessageByURL(typeUrl)
+ if err != nil {
+ return false
+ }
+ valueMsg := mt.New()
+ valueBytes := msg.Get(valueFld).Bytes()
+ if err := (proto.UnmarshalOptions{Resolver: res}).Unmarshal(valueBytes, valueMsg.Interface()); err != nil {
+ return false
+ }
+
+ buf.WriteRune('{')
+ if indent >= 0 {
+ indent++
+ }
+ p.maybeNewline(buf, indent)
+
+ buf.WriteRune('[')
+ buf.WriteString(typeUrl)
+ buf.WriteString("]: ")
+ p.printMessageLiteralToBufferMaybeCompact(buf, valueMsg, res, pkg, scope, threshold, indent)
+
+ if indent >= 0 {
+ indent--
+ }
+ p.maybeNewline(buf, indent)
+ buf.WriteRune('}')
+
+ return true
+}
+
+func (p *Printer) printValueLiteralToBuffer(buf *bytes.Buffer, fld protoreflect.FieldDescriptor, value interface{}) {
+ switch val := value.(type) {
+ case protoreflect.EnumNumber:
+ ev := fld.Enum().Values().ByNumber(val)
+ if ev == nil {
+ _, _ = fmt.Fprintf(buf, "%v", value)
+ } else {
+ buf.WriteString(string(ev.Name()))
+ }
+ case string:
+ buf.WriteString(quotedString(val))
+ case []byte:
+ buf.WriteString(quotedBytes(string(val)))
+ case int32, uint32, int64, uint64:
+ _, _ = fmt.Fprintf(buf, "%d", val)
+ case float32, float64:
+ _, _ = fmt.Fprintf(buf, "%f", val)
+ default:
+ _, _ = fmt.Fprintf(buf, "%v", val)
+ }
+}
+
+func (p *Printer) maybeNewline(buf *bytes.Buffer, indent int) {
+ if indent < 0 {
+ // compact form
+ buf.WriteRune(' ')
+ return
+ }
+ buf.WriteRune('\n')
+ p.indent(buf, indent)
+}
+
+func (p *Printer) printArrayLiteralToBufferMaybeCompact(buf *bytes.Buffer, fld protoreflect.FieldDescriptor, val protoreflect.List, res *protoregistry.Types, pkg, scope string, threshold, indent int) {
+ if indent >= 0 {
+ // first see if the array is compact enough to fit on one line
+ str := p.printArrayLiteralCompact(fld, val, res, pkg, scope)
+ elementCount := strings.Count(str, ",")
+ nestedCount := strings.Count(str, "{") - 1
+ if elementCount <= 1 && nestedCount == 0 {
+ // can't expand
+ buf.WriteString(str)
+ return
+ }
+ if len(str) <= threshold {
+ // no need to expand
+ buf.WriteString(str)
+ return
+ }
+ }
+ p.printArrayLiteralToBuffer(buf, fld, val, res, pkg, scope, threshold, indent)
+}
+
+func (p *Printer) printArrayLiteralCompact(fld protoreflect.FieldDescriptor, val protoreflect.List, res *protoregistry.Types, pkg, scope string) string {
+ var buf bytes.Buffer
+ p.printArrayLiteralToBuffer(&buf, fld, val, res, pkg, scope, 0, -1)
+ return buf.String()
+}
+
+func (p *Printer) printArrayLiteralToBuffer(buf *bytes.Buffer, fld protoreflect.FieldDescriptor, val protoreflect.List, res *protoregistry.Types, pkg, scope string, threshold, indent int) {
+ buf.WriteRune('[')
+ if indent >= 0 {
+ indent++
+ }
+
+ for i := 0; i < val.Len(); i++ {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ p.maybeNewline(buf, indent)
+ if fld.Kind() == protoreflect.MessageKind || fld.Kind() == protoreflect.GroupKind {
+ p.printMessageLiteralToBufferMaybeCompact(buf, val.Get(i).Message(), res, pkg, scope, threshold, indent)
+ } else {
+ p.printValueLiteralToBuffer(buf, fld, val.Get(i).Interface())
+ }
+ }
+
+ if indent >= 0 {
+ indent--
+ }
+ p.maybeNewline(buf, indent)
+ buf.WriteRune(']')
+}
+
+func (p *Printer) printMapLiteralToBufferMaybeCompact(buf *bytes.Buffer, fld protoreflect.FieldDescriptor, val protoreflect.Map, res *protoregistry.Types, pkg, scope string, threshold, indent int) {
+ if indent >= 0 {
+ // first see if the map is compact enough to fit on one line
+ str := p.printMapLiteralCompact(fld, val, res, pkg, scope)
+ if len(str) <= threshold {
+ buf.WriteString(str)
+ return
+ }
+ }
+ p.printMapLiteralToBuffer(buf, fld, val, res, pkg, scope, threshold, indent)
+}
+
+func (p *Printer) printMapLiteralCompact(fld protoreflect.FieldDescriptor, val protoreflect.Map, res *protoregistry.Types, pkg, scope string) string {
+ var buf bytes.Buffer
+ p.printMapLiteralToBuffer(&buf, fld, val, res, pkg, scope, 0, -1)
+ return buf.String()
+}
+
+func (p *Printer) printMapLiteralToBuffer(buf *bytes.Buffer, fld protoreflect.FieldDescriptor, val protoreflect.Map, res *protoregistry.Types, pkg, scope string, threshold, indent int) {
+ keys := sortKeys(val)
+ l := &mapAsList{
+ m: val,
+ entry: dynamicpb.NewMessageType(fld.Message()),
+ keyFld: fld.MapKey(),
+ valFld: fld.MapValue(),
+ keys: keys,
+ }
+ p.printArrayLiteralToBuffer(buf, fld, l, res, pkg, scope, threshold, indent)
+}
+
+type mapAsList struct {
+ m protoreflect.Map
+ entry protoreflect.MessageType
+ keyFld, valFld protoreflect.FieldDescriptor
+ keys []protoreflect.MapKey
+}
+
+func (m *mapAsList) Len() int {
+ return len(m.keys)
+}
+
+func (m *mapAsList) Get(i int) protoreflect.Value {
+ msg := m.entry.New()
+ key := m.keys[i]
+ msg.Set(m.keyFld, key.Value())
+ val := m.m.Get(key)
+ msg.Set(m.valFld, val)
+ return protoreflect.ValueOfMessage(msg)
+}
+
+func (m *mapAsList) Set(_i int, _ protoreflect.Value) {
+ panic("Set is not implemented")
+}
+
+func (m *mapAsList) Append(_ protoreflect.Value) {
+ panic("Append is not implemented")
+}
+
+func (m *mapAsList) AppendMutable() protoreflect.Value {
+ panic("AppendMutable is not implemented")
+}
+
+func (m *mapAsList) Truncate(_ int) {
+ panic("Truncate is not implemented")
+}
+
+func (m *mapAsList) NewElement() protoreflect.Value {
+ panic("NewElement is not implemented")
+}
+
+func (m *mapAsList) IsValid() bool {
+ return true
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go
new file mode 100644
index 0000000..0197964
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go
@@ -0,0 +1,2744 @@
+package protoprint
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "reflect"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ protov1 "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+ "google.golang.org/protobuf/types/dynamicpb"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+// Printer knows how to format file descriptors as proto source code. Its fields
+// provide some control over how the resulting source file is constructed and
+// formatted.
+type Printer struct {
+ // If true, comments are rendered using "/*" style comments. Otherwise, they
+ // are printed using "//" style line comments.
+ PreferMultiLineStyleComments bool
+
+ // If true, elements are sorted into a canonical order.
+ //
+ // The canonical order for elements in a file follows:
+ // 1. Syntax
+ // 2. Package
+ // 3. Imports (sorted lexically)
+ // 4. Options (sorted by name, standard options before custom options)
+ // 5. Messages (sorted by name)
+ // 6. Enums (sorted by name)
+ // 7. Services (sorted by name)
+ // 8. Extensions (grouped by extendee, sorted by extendee+tag)
+ //
+ // The canonical order of elements in a message follows:
+ // 1. Options (sorted by name, standard options before custom options)
+ // 2. Fields and One-Ofs (sorted by tag; one-ofs interleaved based on the
+ // minimum tag therein)
+ // 3. Nested Messages (sorted by name)
+ // 4. Nested Enums (sorted by name)
+ // 5. Extension ranges (sorted by starting tag number)
+ // 6. Nested Extensions (grouped by extendee, sorted by extendee+tag)
+ // 7. Reserved ranges (sorted by starting tag number)
+ // 8. Reserved names (sorted lexically)
+ //
+ // Methods are sorted within a service by name and appear after any service
+ // options (which are sorted by name, standard options before custom ones).
+ // Enum values are sorted within an enum, first by numeric value then by
+ // name, and also appear after any enum options.
+ //
+ // Options for fields, enum values, and extension ranges are sorted by name,
+ // standard options before custom ones.
+ SortElements bool
+
+ // The "less" function used to sort elements when printing. It is given two
+ // elements, a and b, and should return true if a is "less than" b. In this
+ // case, "less than" means that element a should appear earlier in the file
+ // than element b.
+ //
+ // If this field is nil, no custom sorting is done and the SortElements
+ // field is consulted to decide how to order the output. If this field is
+ // non-nil, the SortElements field is ignored and this function is called to
+ // order elements.
+ CustomSortFunction func(a, b Element) bool
+
+ // The indentation used. Any characters other than spaces or tabs will be
+ // replaced with spaces. If unset/empty, two spaces will be used.
+ Indent string
+
+ // If true, detached comments (between elements) will be ignored.
+ //
+ // Deprecated: Use OmitComments bitmask instead.
+ OmitDetachedComments bool
+
+ // A bitmask of comment types to omit. If unset, all comments will be
+ // included. Use CommentsAll to not print any comments.
+ OmitComments CommentType
+
+ // If true, trailing comments that typically appear on the same line as an
+ // element (option, field, enum value, method) will be printed on a separate
+ // line instead.
+ //
+ // So, with this set, you'll get output like so:
+ //
+ // // leading comment for field
+ // repeated string names = 1;
+ // // trailing comment
+ //
+ // If left false, the printer will try to emit trailing comments on the same
+ // line instead:
+ //
+ // // leading comment for field
+ // repeated string names = 1; // trailing comment
+ //
+ // If the trailing comment has more than one line, it will automatically be
+ // forced to the next line.
+ TrailingCommentsOnSeparateLine bool
+
+ // If true, the printed output will eschew any blank lines, which otherwise
+ // appear between descriptor elements and comment blocks. Note that if
+ // detached comments are being printed, this will cause them to be merged
+ // into the subsequent leading comments. Similarly, any element trailing
+ // comments will be merged into the subsequent leading comments.
+ Compact bool
+
+ // If true, all references to messages, extensions, and enums (such as in
+ // options, field types, and method request and response types) will be
+ // fully-qualified. When left unset, the referenced elements will contain
+ // only as much qualifier as is required.
+ //
+ // For example, if a message is in the same package as the reference, the
+ // simple name can be used. If a message shares some context with the
+ // reference, only the unshared context needs to be included. For example:
+ //
+ // message Foo {
+ // message Bar {
+ // enum Baz {
+ // ZERO = 0;
+ // ONE = 1;
+ // }
+ // }
+ //
+ // // This field shares some context as the enum it references: they are
+ // // both inside of the namespace Foo:
+ // // field is "Foo.my_baz"
+ // // enum is "Foo.Bar.Baz"
+ // // So we only need to qualify the reference with the context that they
+ // // do NOT have in common:
+ // Bar.Baz my_baz = 1;
+ // }
+ //
+ // When printing fully-qualified names, they will be preceded by a dot, to
+ // avoid any ambiguity that they might be relative vs. fully-qualified.
+ ForceFullyQualifiedNames bool
+
+ // The number of options that trigger short options expressions to be
+ // rendered using multiple lines. Short options expressions are those
+ // found on fields and enum values, that use brackets ("[" and "]") and
+ // comma-separated options. If more options than this are present, they
+ // will be expanded to multiple lines (one option per line).
+ //
+ // If unset (e.g. if zero), a default threshold of 3 is used.
+ ShortOptionsExpansionThresholdCount int
+
+ // The length of printed options that trigger short options expressions to
+ // be rendered using multiple lines. If the short options contain more than
+ // one option and their printed length is longer than this threshold, they
+ // will be expanded to multiple lines (one option per line).
+ //
+ // If unset (e.g. if zero), a default threshold of 50 is used.
+ ShortOptionsExpansionThresholdLength int
+
+ // The length of a printed option value message literal that triggers the
+ // message literal to be rendered using multiple lines instead of using a
+ // compact single-line form. The message must include at least two fields
+ // or contain a field that is a nested message to be expanded.
+ //
+ // This value is further used to decide when to expand individual field
+ // values that are nested message literals or array literals (for repeated
+ // fields).
+ //
+ // If unset (e.g. if zero), a default threshold of 50 is used.
+ MessageLiteralExpansionThresholdLength int
+}
+
+// CommentType is a kind of comments in a proto source file. This can be used
+// as a bitmask.
+type CommentType int
+
+const (
+ // CommentsDetached refers to comments that are not "attached" to any
+ // source element. They are attributed to the subsequent element in the
+ // file as "detached" comments.
+ CommentsDetached CommentType = 1 << iota
+ // CommentsTrailing refers to a comment block immediately following an
+ // element in the source file. If another element immediately follows
+ // the trailing comment, it is instead considered a leading comment for
+ // that subsequent element.
+ CommentsTrailing
+ // CommentsLeading refers to a comment block immediately preceding an
+ // element in the source file. For high-level elements (those that have
+ // their own descriptor), these are used as doc comments for that element.
+ CommentsLeading
+ // CommentsTokens refers to any comments (leading, trailing, or detached)
+ // on low-level elements in the file. "High-level" elements have their own
+ // descriptors, e.g. messages, enums, fields, services, and methods. But
+ // comments can appear anywhere (such as around identifiers and keywords,
+ // sprinkled inside the declarations of a high-level element). This class
+ // of comments are for those extra comments sprinkled into the file.
+ CommentsTokens
+
+ // CommentsNonDoc refers to comments that are *not* doc comments. This is a
+ // bitwise union of everything other than CommentsLeading. If you configure
+ // a printer to omit this, only doc comments on descriptor elements will be
+ // included in the printed output.
+ CommentsNonDoc = CommentsDetached | CommentsTrailing | CommentsTokens
+ // CommentsAll indicates all kinds of comments. If you configure a printer
+ // to omit this, no comments will appear in the printed output, even if the
+ // input descriptors had source info and comments.
+ CommentsAll = -1
+)
+
+// PrintProtoFiles prints all of the given file descriptors. The given open
+// function is given a file name and is responsible for creating the outputs and
+// returning the corresponding writer.
+func (p *Printer) PrintProtoFiles(fds []*desc.FileDescriptor, open func(name string) (io.WriteCloser, error)) error {
+ for _, fd := range fds {
+ w, err := open(fd.GetName())
+ if err != nil {
+ return fmt.Errorf("failed to open %s: %v", fd.GetName(), err)
+ }
+ err = func() error {
+ defer w.Close()
+ return p.PrintProtoFile(fd, w)
+ }()
+ if err != nil {
+ return fmt.Errorf("failed to write %s: %v", fd.GetName(), err)
+ }
+ }
+ return nil
+}
+
+// PrintProtosToFileSystem prints all of the given file descriptors to files in
+// the given directory. If file names in the given descriptors include path
+// information, they will be relative to the given root.
+func (p *Printer) PrintProtosToFileSystem(fds []*desc.FileDescriptor, rootDir string) error {
+ return p.PrintProtoFiles(fds, func(name string) (io.WriteCloser, error) {
+ fullPath := filepath.Join(rootDir, name)
+ dir := filepath.Dir(fullPath)
+ if err := os.MkdirAll(dir, os.ModePerm); err != nil {
+ return nil, err
+ }
+ return os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+ })
+}
+
+// pkg represents a package name
+type pkg string
+
+// imp represents an imported file name
+type imp string
+
+// ident represents an identifier
+type ident string
+
+// messageVal represents a message value for an option
+type messageVal struct {
+ // the package and scope in which the option value is defined
+ pkg, scope string
+ // the option value
+ msg proto.Message
+}
+
+// option represents a resolved descriptor option
+type option struct {
+ name string
+ val interface{}
+}
+
+// reservedRange represents a reserved range from a message or enum
+type reservedRange struct {
+ start, end int32
+}
+
+// PrintProtoFile prints the given single file descriptor to the given writer.
+func (p *Printer) PrintProtoFile(fd *desc.FileDescriptor, out io.Writer) error {
+ return p.printProto(fd, out)
+}
+
+// PrintProtoToString prints the given descriptor and returns the resulting
+// string. This can be used to print proto files, but it can also be used to get
+// the proto "source form" for any kind of descriptor, which can be a more
+// user-friendly way to present descriptors that are intended for human
+// consumption.
+func (p *Printer) PrintProtoToString(dsc desc.Descriptor) (string, error) {
+ var buf bytes.Buffer
+ if err := p.printProto(dsc, &buf); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+func (p *Printer) printProto(dsc desc.Descriptor, out io.Writer) error {
+ w := newWriter(out)
+
+ if p.Indent == "" {
+ // default indent to two spaces
+ p.Indent = " "
+ } else {
+ // indent must be all spaces or tabs, so convert other chars to spaces
+ ind := make([]rune, 0, len(p.Indent))
+ for _, r := range p.Indent {
+ if r == '\t' {
+ ind = append(ind, r)
+ } else {
+ ind = append(ind, ' ')
+ }
+ }
+ p.Indent = string(ind)
+ }
+ if p.OmitDetachedComments {
+ p.OmitComments |= CommentsDetached
+ }
+
+ fdp := dsc.GetFile().AsFileDescriptorProto()
+ sourceInfo := internal.CreateSourceInfoMap(fdp)
+ extendOptionLocations(sourceInfo, fdp.GetSourceCodeInfo().GetLocation())
+
+ var reg protoregistry.Types
+ internal.RegisterTypesVisibleToFile(&reg, dsc.GetFile().UnwrapFile())
+ reparseUnknown(&reg, fdp.ProtoReflect())
+
+ path := findElement(dsc)
+ switch d := dsc.(type) {
+ case *desc.FileDescriptor:
+ p.printFile(d, &reg, w, sourceInfo)
+ case *desc.MessageDescriptor:
+ p.printMessage(d, &reg, w, sourceInfo, path, 0)
+ case *desc.FieldDescriptor:
+ var scope string
+ if md, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ scope = md.GetFullyQualifiedName()
+ } else {
+ scope = d.GetFile().GetPackage()
+ }
+ if d.IsExtension() {
+ _, _ = fmt.Fprint(w, "extend ")
+ extNameSi := sourceInfo.Get(append(path, internal.Field_extendeeTag))
+ p.printElementString(extNameSi, w, 0, p.qualifyName(d.GetFile().GetPackage(), scope, d.GetOwner().GetFullyQualifiedName()))
+ _, _ = fmt.Fprintln(w, "{")
+
+ p.printField(d, &reg, w, sourceInfo, path, scope, 1)
+
+ _, _ = fmt.Fprintln(w, "}")
+ } else {
+ p.printField(d, &reg, w, sourceInfo, path, scope, 0)
+ }
+ case *desc.OneOfDescriptor:
+ md := d.GetOwner()
+ elements := elementAddrs{dsc: md}
+ for i := range md.GetFields() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_fieldsTag, elementIndex: i})
+ }
+ p.printOneOf(d, elements, 0, &reg, w, sourceInfo, path[:len(path)-1], 0, path[len(path)-1])
+ case *desc.EnumDescriptor:
+ p.printEnum(d, &reg, w, sourceInfo, path, 0)
+ case *desc.EnumValueDescriptor:
+ p.printEnumValue(d, &reg, w, sourceInfo, path, 0)
+ case *desc.ServiceDescriptor:
+ p.printService(d, &reg, w, sourceInfo, path, 0)
+ case *desc.MethodDescriptor:
+ p.printMethod(d, &reg, w, sourceInfo, path, 0)
+ }
+
+ return w.err
+}
+
+func findElement(dsc desc.Descriptor) []int32 {
+ if dsc.GetParent() == nil {
+ return nil
+ }
+ path := findElement(dsc.GetParent())
+ switch d := dsc.(type) {
+ case *desc.MessageDescriptor:
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_nestedMessagesTag, getMessageIndex(d, pm.GetNestedMessageTypes()))
+ }
+ return append(path, internal.File_messagesTag, getMessageIndex(d, d.GetFile().GetMessageTypes()))
+
+ case *desc.FieldDescriptor:
+ if d.IsExtension() {
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_extensionsTag, getFieldIndex(d, pm.GetNestedExtensions()))
+ }
+ return append(path, internal.File_extensionsTag, getFieldIndex(d, d.GetFile().GetExtensions()))
+ }
+ return append(path, internal.Message_fieldsTag, getFieldIndex(d, d.GetOwner().GetFields()))
+
+ case *desc.OneOfDescriptor:
+ return append(path, internal.Message_oneOfsTag, getOneOfIndex(d, d.GetOwner().GetOneOfs()))
+
+ case *desc.EnumDescriptor:
+ if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+ return append(path, internal.Message_enumsTag, getEnumIndex(d, pm.GetNestedEnumTypes()))
+ }
+ return append(path, internal.File_enumsTag, getEnumIndex(d, d.GetFile().GetEnumTypes()))
+
+ case *desc.EnumValueDescriptor:
+ return append(path, internal.Enum_valuesTag, getEnumValueIndex(d, d.GetEnum().GetValues()))
+
+ case *desc.ServiceDescriptor:
+ return append(path, internal.File_servicesTag, getServiceIndex(d, d.GetFile().GetServices()))
+
+ case *desc.MethodDescriptor:
+ return append(path, internal.Service_methodsTag, getMethodIndex(d, d.GetService().GetMethods()))
+
+ default:
+ panic(fmt.Sprintf("unexpected descriptor type: %T", dsc))
+ }
+}
+
+func getMessageIndex(md *desc.MessageDescriptor, list []*desc.MessageDescriptor) int32 {
+ for i := range list {
+ if md == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of message %s", md.GetFullyQualifiedName()))
+}
+
+func getFieldIndex(fd *desc.FieldDescriptor, list []*desc.FieldDescriptor) int32 {
+ for i := range list {
+ if fd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of field %s", fd.GetFullyQualifiedName()))
+}
+
+func getOneOfIndex(ood *desc.OneOfDescriptor, list []*desc.OneOfDescriptor) int32 {
+ for i := range list {
+ if ood == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of oneof %s", ood.GetFullyQualifiedName()))
+}
+
+func getEnumIndex(ed *desc.EnumDescriptor, list []*desc.EnumDescriptor) int32 {
+ for i := range list {
+ if ed == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of enum %s", ed.GetFullyQualifiedName()))
+}
+
+func getEnumValueIndex(evd *desc.EnumValueDescriptor, list []*desc.EnumValueDescriptor) int32 {
+ for i := range list {
+ if evd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of enum value %s", evd.GetFullyQualifiedName()))
+}
+
+func getServiceIndex(sd *desc.ServiceDescriptor, list []*desc.ServiceDescriptor) int32 {
+ for i := range list {
+ if sd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of service %s", sd.GetFullyQualifiedName()))
+}
+
+func getMethodIndex(mtd *desc.MethodDescriptor, list []*desc.MethodDescriptor) int32 {
+ for i := range list {
+ if mtd == list[i] {
+ return int32(i)
+ }
+ }
+ panic(fmt.Sprintf("unable to determine index of method %s", mtd.GetFullyQualifiedName()))
+}
+
+func (p *Printer) newLine(w io.Writer) {
+ if !p.Compact {
+ _, _ = fmt.Fprintln(w)
+ }
+}
+
+func reparseUnknown(reg *protoregistry.Types, msg protoreflect.Message) {
+ msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ if fld.Kind() != protoreflect.MessageKind && fld.Kind() != protoreflect.GroupKind {
+ return true
+ }
+ if fld.IsList() {
+ l := val.List()
+ for i := 0; i < l.Len(); i++ {
+ reparseUnknown(reg, l.Get(i).Message())
+ }
+ } else if fld.IsMap() {
+ mapVal := fld.MapValue()
+ if mapVal.Kind() != protoreflect.MessageKind && mapVal.Kind() != protoreflect.GroupKind {
+ return true
+ }
+ m := val.Map()
+ m.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ reparseUnknown(reg, v.Message())
+ return true
+ })
+ } else {
+ reparseUnknown(reg, val.Message())
+ }
+ return true
+ })
+
+ unk := msg.GetUnknown()
+ if len(unk) > 0 {
+ other := msg.New().Interface()
+ if err := (proto.UnmarshalOptions{Resolver: reg}).Unmarshal(unk, other); err == nil {
+ msg.SetUnknown(nil)
+ proto.Merge(msg.Interface(), other)
+ }
+ }
+}
+
+func (p *Printer) printFile(fd *desc.FileDescriptor, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap) {
+ opts, err := p.extractOptions(fd, protov1.MessageV2(fd.GetOptions()))
+ if err != nil {
+ return
+ }
+
+ fdp := fd.AsFileDescriptorProto()
+ path := make([]int32, 1)
+
+ path[0] = internal.File_packageTag
+ sourceInfo.PutIfAbsent(append(path, 0), sourceInfo.Get(path))
+
+ path[0] = internal.File_syntaxTag
+ si := sourceInfo.Get(path)
+ p.printElement(false, si, w, 0, func(w *writer) {
+ syn := fdp.GetSyntax()
+ if syn == "editions" {
+ _, _ = fmt.Fprintf(w, "edition = %q;", strings.TrimPrefix(fdp.GetEdition().String(), "EDITION_"))
+ return
+ }
+ if syn == "" {
+ syn = "proto2"
+ }
+ _, _ = fmt.Fprintf(w, "syntax = %q;", syn)
+ })
+ p.newLine(w)
+
+ skip := map[interface{}]bool{}
+
+ elements := elementAddrs{dsc: fd, opts: opts}
+ if fdp.Package != nil {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_packageTag, elementIndex: 0, order: -3})
+ }
+ for i := range fdp.GetDependency() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_dependencyTag, elementIndex: i, order: -2})
+ }
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.File_optionsTag, -1, opts)...)
+ for i := range fd.GetMessageTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_messagesTag, elementIndex: i})
+ }
+ for i := range fd.GetEnumTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_enumsTag, elementIndex: i})
+ }
+ for i := range fd.GetServices() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_servicesTag, elementIndex: i})
+ }
+ exts := p.computeExtensions(sourceInfo, fd.GetExtensions(), []int32{internal.File_extensionsTag})
+ for i, extd := range fd.GetExtensions() {
+ if extd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+ // we don't emit nested messages for groups since
+ // they get special treatment
+ skip[extd.GetMessageType()] = true
+ }
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_extensionsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, nil)
+
+ pkgName := fd.GetPackage()
+
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+
+ // skip[d] will panic if d is a slice (which it could be for []option),
+ // so just ignore it since we don't try to skip options
+ if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+ // skip this element
+ continue
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ path = []int32{el.elementType, int32(el.elementIndex)}
+
+ switch d := d.(type) {
+ case pkg:
+ si := sourceInfo.Get(path)
+ p.printElement(false, si, w, 0, func(w *writer) {
+ _, _ = fmt.Fprintf(w, "package %s;", d)
+ })
+ case imp:
+ si := sourceInfo.Get(path)
+ var modifier string
+ for _, idx := range fdp.PublicDependency {
+ if fdp.Dependency[idx] == string(d) {
+ modifier = "public "
+ break
+ }
+ }
+ if modifier == "" {
+ for _, idx := range fdp.WeakDependency {
+ if fdp.Dependency[idx] == string(d) {
+ modifier = "weak "
+ break
+ }
+ }
+ }
+ p.printElement(false, si, w, 0, func(w *writer) {
+ _, _ = fmt.Fprintf(w, "import %s%q;", modifier, d)
+ })
+ case []option:
+ p.printOptionsLong(d, reg, w, sourceInfo, path, 0)
+ case *desc.MessageDescriptor:
+ p.printMessage(d, reg, w, sourceInfo, path, 0)
+ case *desc.EnumDescriptor:
+ p.printEnum(d, reg, w, sourceInfo, path, 0)
+ case *desc.ServiceDescriptor:
+ p.printService(d, reg, w, sourceInfo, path, 0)
+ case *desc.FieldDescriptor:
+ extDecl := exts[d]
+ p.printExtensions(extDecl, exts, elements, i, reg, w, sourceInfo, nil, internal.File_extensionsTag, pkgName, pkgName, 0)
+ // we printed all extensions in the group, so we can skip the others
+ for _, fld := range extDecl.fields {
+ skip[fld] = true
+ }
+ }
+ }
+}
+
+func findExtSi(fieldSi *descriptorpb.SourceCodeInfo_Location, extSis []*descriptorpb.SourceCodeInfo_Location) *descriptorpb.SourceCodeInfo_Location {
+ if len(fieldSi.GetSpan()) == 0 {
+ return nil
+ }
+ for _, extSi := range extSis {
+ if isSpanWithin(fieldSi.Span, extSi.Span) {
+ return extSi
+ }
+ }
+ return nil
+}
+
+func isSpanWithin(span, enclosing []int32) bool {
+ start := enclosing[0]
+ var end int32
+ if len(enclosing) == 3 {
+ end = enclosing[0]
+ } else {
+ end = enclosing[2]
+ }
+ if span[0] < start || span[0] > end {
+ return false
+ }
+
+ if span[0] == start {
+ return span[1] >= enclosing[1]
+ } else if span[0] == end {
+ return span[1] <= enclosing[len(enclosing)-1]
+ }
+ return true
+}
+
+type extensionDecl struct {
+ extendee string
+ sourceInfo *descriptorpb.SourceCodeInfo_Location
+ fields []*desc.FieldDescriptor
+}
+
+type extensions map[*desc.FieldDescriptor]*extensionDecl
+
+func (p *Printer) computeExtensions(sourceInfo internal.SourceInfoMap, exts []*desc.FieldDescriptor, path []int32) extensions {
+ extsMap := map[string]map[*descriptorpb.SourceCodeInfo_Location]*extensionDecl{}
+ extSis := sourceInfo.GetAll(path)
+ for _, extd := range exts {
+ name := extd.GetOwner().GetFullyQualifiedName()
+ extSi := findExtSi(extd.GetSourceInfo(), extSis)
+ extsBySi := extsMap[name]
+ if extsBySi == nil {
+ extsBySi = map[*descriptorpb.SourceCodeInfo_Location]*extensionDecl{}
+ extsMap[name] = extsBySi
+ }
+ extDecl := extsBySi[extSi]
+ if extDecl == nil {
+ extDecl = &extensionDecl{
+ sourceInfo: extSi,
+ extendee: name,
+ }
+ extsBySi[extSi] = extDecl
+ }
+ extDecl.fields = append(extDecl.fields, extd)
+ }
+
+ ret := extensions{}
+ for _, extsBySi := range extsMap {
+ for _, extDecl := range extsBySi {
+ for _, extd := range extDecl.fields {
+ ret[extd] = extDecl
+ }
+ }
+ }
+ return ret
+}
+
+func (p *Printer) sort(elements elementAddrs, sourceInfo internal.SourceInfoMap, path []int32) {
+ if p.CustomSortFunction != nil {
+ sort.Stable(customSortOrder{elementAddrs: elements, less: p.CustomSortFunction})
+ } else if p.SortElements {
+ // canonical sorted order
+ sort.Stable(elements)
+ } else {
+ // use source order (per location information in SourceCodeInfo); or
+ // if that isn't present use declaration order, but grouped by type
+ sort.Stable(elementSrcOrder{
+ elementAddrs: elements,
+ sourceInfo: sourceInfo,
+ prefix: path,
+ })
+ }
+}
+
+func (p *Printer) qualifyMessageOptionName(pkg, scope string, fqn string) string {
+ // Message options must at least include the message scope, even if the option
+ // is inside that message. We do that by requiring we have at least one
+ // enclosing skip in the qualified name.
+ return p.qualifyElementName(pkg, scope, fqn, 1)
+}
+
+func (p *Printer) qualifyExtensionLiteralName(pkg, scope string, fqn string) string {
+ // In message literals, extensions can have package name omitted but may not
+ // have any other scopes omitted. We signal that via negative arg.
+ return p.qualifyElementName(pkg, scope, fqn, -1)
+}
+
+func (p *Printer) qualifyName(pkg, scope string, fqn string) string {
+ return p.qualifyElementName(pkg, scope, fqn, 0)
+}
+
+func (p *Printer) qualifyElementName(pkg, scope string, fqn string, required int) string {
+ if p.ForceFullyQualifiedNames {
+ // forcing fully-qualified names; make sure to include preceding dot
+ if fqn[0] == '.' {
+ return fqn
+ }
+ return fmt.Sprintf(".%s", fqn)
+ }
+
+ // compute relative name (so no leading dot)
+ if fqn[0] == '.' {
+ fqn = fqn[1:]
+ }
+ if required < 0 {
+ scope = pkg + "."
+ } else if len(scope) > 0 && scope[len(scope)-1] != '.' {
+ scope = scope + "."
+ }
+ count := 0
+ for scope != "" {
+ if strings.HasPrefix(fqn, scope) && count >= required {
+ return fqn[len(scope):]
+ }
+ if scope == pkg+"." {
+ break
+ }
+ pos := strings.LastIndex(scope[:len(scope)-1], ".")
+ scope = scope[:pos+1]
+ count++
+ }
+ return fqn
+}
+
+func (p *Printer) typeString(fld *desc.FieldDescriptor, scope string) string {
+ if fld.IsMap() {
+ return fmt.Sprintf("map<%s, %s>", p.typeString(fld.GetMapKeyType(), scope), p.typeString(fld.GetMapValueType(), scope))
+ }
+ fldProto := fld.AsFieldDescriptorProto()
+ if fldProto.Type == nil && fldProto.TypeName != nil {
+ // In an unlinked proto, the type may be absent because it is not known
+ // whether the symbol is a message or an enum. In that case, just return
+ // the type name.
+ return fldProto.GetTypeName()
+ }
+ switch fld.GetType() {
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32:
+ return "int32"
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64:
+ return "int64"
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32:
+ return "uint32"
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64:
+ return "uint64"
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
+ return "sint32"
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
+ return "sint64"
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
+ return "fixed32"
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
+ return "fixed64"
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
+ return "sfixed32"
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
+ return "sfixed64"
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ return "float"
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ return "double"
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ return "bool"
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ return "string"
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ return "bytes"
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
+ return p.qualifyName(fld.GetFile().GetPackage(), scope, fld.GetEnumType().GetFullyQualifiedName())
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
+ return p.qualifyName(fld.GetFile().GetPackage(), scope, fld.GetMessageType().GetFullyQualifiedName())
+ case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
+ return fld.GetMessageType().GetName()
+ }
+ panic(fmt.Sprintf("invalid type: %v", fld.GetType()))
+}
+
+func (p *Printer) printMessage(md *desc.MessageDescriptor, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) {
+ p.indent(w, indent)
+
+ _, _ = fmt.Fprint(w, "message ")
+ nameSi := sourceInfo.Get(append(path, internal.Message_nameTag))
+ p.printElementString(nameSi, w, indent, md.GetName())
+ _, _ = fmt.Fprintln(w, "{")
+ trailer(indent+1, true)
+
+ p.printMessageBody(md, reg, w, sourceInfo, path, indent+1)
+ p.indent(w, indent)
+ _, _ = fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printMessageBody(md *desc.MessageDescriptor, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ opts, err := p.extractOptions(md, protov1.MessageV2(md.GetOptions()))
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ skip := map[interface{}]bool{}
+ maxTag := internal.GetMaxTag(md.GetMessageOptions().GetMessageSetWireFormat())
+
+ elements := elementAddrs{dsc: md, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Message_optionsTag, -1, opts)...)
+ for i := range md.AsDescriptorProto().GetReservedRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_reservedRangeTag, elementIndex: i})
+ }
+ for i := range md.AsDescriptorProto().GetReservedName() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_reservedNameTag, elementIndex: i})
+ }
+ for i := range md.AsDescriptorProto().GetExtensionRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_extensionRangeTag, elementIndex: i})
+ }
+ for i, fld := range md.GetFields() {
+ if fld.IsMap() || fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+ // we don't emit nested messages for map types or groups since
+ // they get special treatment
+ skip[fld.GetMessageType()] = true
+ }
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_fieldsTag, elementIndex: i})
+ }
+ for i := range md.GetNestedMessageTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_nestedMessagesTag, elementIndex: i})
+ }
+ for i := range md.GetNestedEnumTypes() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_enumsTag, elementIndex: i})
+ }
+ exts := p.computeExtensions(sourceInfo, md.GetNestedExtensions(), append(path, internal.Message_extensionsTag))
+ for i, extd := range md.GetNestedExtensions() {
+ if extd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+ // we don't emit nested messages for groups since
+ // they get special treatment
+ skip[extd.GetMessageType()] = true
+ }
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_extensionsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ pkg := md.GetFile().GetPackage()
+ scope := md.GetFullyQualifiedName()
+
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+
+ // skip[d] will panic if d is a slice (which it could be for []option),
+ // so just ignore it since we don't try to skip options
+ if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+ // skip this element
+ continue
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+
+ switch d := d.(type) {
+ case []option:
+ p.printOptionsLong(d, reg, w, sourceInfo, childPath, indent)
+ case *desc.FieldDescriptor:
+ if d.IsExtension() {
+ extDecl := exts[d]
+ p.printExtensions(extDecl, exts, elements, i, reg, w, sourceInfo, path, internal.Message_extensionsTag, pkg, scope, indent)
+ // we printed all extensions in the group, so we can skip the others
+ for _, fld := range extDecl.fields {
+ skip[fld] = true
+ }
+ } else {
+ ood := d.GetOneOf()
+ if ood == nil || ood.IsSynthetic() {
+ p.printField(d, reg, w, sourceInfo, childPath, scope, indent)
+ } else {
+ // print the one-of, including all of its fields
+ p.printOneOf(ood, elements, i, reg, w, sourceInfo, path, indent, d.AsFieldDescriptorProto().GetOneofIndex())
+ for _, fld := range ood.GetChoices() {
+ skip[fld] = true
+ }
+ }
+ }
+ case *desc.MessageDescriptor:
+ p.printMessage(d, reg, w, sourceInfo, childPath, indent)
+ case *desc.EnumDescriptor:
+ p.printEnum(d, reg, w, sourceInfo, childPath, indent)
+ case *descriptorpb.DescriptorProto_ExtensionRange:
+ // collapse ranges into a single "extensions" block
+ ranges := []*descriptorpb.DescriptorProto_ExtensionRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ extr := elements.at(elnext).(*descriptorpb.DescriptorProto_ExtensionRange)
+ if !proto.Equal(d.Options, extr.Options) {
+ break
+ }
+ ranges = append(ranges, extr)
+ addrs = append(addrs, elnext)
+ skip[extr] = true
+ }
+ p.printExtensionRanges(md, ranges, maxTag, addrs, reg, w, sourceInfo, path, indent)
+ case reservedRange:
+ // collapse reserved ranges into a single "reserved" block
+ ranges := []reservedRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rr := elements.at(elnext).(reservedRange)
+ ranges = append(ranges, rr)
+ addrs = append(addrs, elnext)
+ skip[rr] = true
+ }
+ p.printReservedRanges(ranges, maxTag, addrs, w, sourceInfo, path, indent)
+ case string: // reserved name
+ // collapse reserved names into a single "reserved" block
+ names := []string{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rn := elements.at(elnext).(string)
+ names = append(names, rn)
+ addrs = append(addrs, elnext)
+ skip[rn] = true
+ }
+ p.printReservedNames(names, addrs, w, sourceInfo, path, indent, useQuotedReserved(md.GetFile()))
+ }
+ }
+}
+
+func (p *Printer) printField(fld *desc.FieldDescriptor, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, scope string, indent int) {
+ var groupPath []int32
+ var si *descriptorpb.SourceCodeInfo_Location
+
+ group := isGroup(fld)
+
+ if group {
+ // compute path to group message type
+ groupPath = make([]int32, len(path)-2)
+ copy(groupPath, path)
+
+ var candidates []*desc.MessageDescriptor
+ var parentTag int32
+ switch parent := fld.GetParent().(type) {
+ case *desc.MessageDescriptor:
+ // group in a message
+ candidates = parent.GetNestedMessageTypes()
+ parentTag = internal.Message_nestedMessagesTag
+ case *desc.FileDescriptor:
+ // group that is a top-level extension
+ candidates = parent.GetMessageTypes()
+ parentTag = internal.File_messagesTag
+ }
+
+ var groupMsgIndex int32
+ for i, nmd := range candidates {
+ if nmd == fld.GetMessageType() {
+ // found it
+ groupMsgIndex = int32(i)
+ break
+ }
+ }
+ groupPath = append(groupPath, parentTag, groupMsgIndex)
+
+ // the group message is where the field's comments and position are stored
+ si = sourceInfo.Get(groupPath)
+ } else {
+ si = sourceInfo.Get(path)
+ }
+
+ p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) {
+ p.indent(w, indent)
+ if shouldEmitLabel(fld) {
+ locSi := sourceInfo.Get(append(path, internal.Field_labelTag))
+ p.printElementString(locSi, w, indent, labelString(fld.GetLabel()))
+ }
+
+ if group {
+ _, _ = fmt.Fprint(w, "group ")
+ }
+
+ typeSi := sourceInfo.Get(append(path, internal.Field_typeTag))
+ p.printElementString(typeSi, w, indent, p.typeString(fld, scope))
+
+ if !group {
+ nameSi := sourceInfo.Get(append(path, internal.Field_nameTag))
+ p.printElementString(nameSi, w, indent, fld.GetName())
+ }
+
+ _, _ = fmt.Fprint(w, "= ")
+ numSi := sourceInfo.Get(append(path, internal.Field_numberTag))
+ p.printElementString(numSi, w, indent, fmt.Sprintf("%d", fld.GetNumber()))
+
+ opts, err := p.extractOptions(fld, protov1.MessageV2(fld.GetOptions()))
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ // we use negative values for "extras" keys so they can't collide
+ // with legit option tags
+
+ if fld.UnwrapField().HasPresence() && fld.AsFieldDescriptorProto().DefaultValue != nil {
+ defVal := fld.GetDefaultValue()
+ if fld.GetEnumType() != nil {
+ defVal = ident(fld.GetEnumType().FindValueByNumber(defVal.(int32)).GetName())
+ }
+ opts[-internal.Field_defaultTag] = []option{{name: "default", val: defVal}}
+ }
+
+ jsn := fld.AsFieldDescriptorProto().GetJsonName()
+ if jsn != "" && jsn != internal.JsonName(fld.GetName()) {
+ opts[-internal.Field_jsonNameTag] = []option{{name: "json_name", val: jsn}}
+ }
+
+ p.printOptionsShort(fld, opts, internal.Field_optionsTag, reg, w, sourceInfo, path, indent)
+
+ if group {
+ _, _ = fmt.Fprintln(w, "{")
+ trailer(indent+1, true)
+
+ p.printMessageBody(fld.GetMessageType(), reg, w, sourceInfo, groupPath, indent+1)
+
+ p.indent(w, indent)
+ _, _ = fmt.Fprintln(w, "}")
+
+ } else {
+ _, _ = fmt.Fprint(w, ";")
+ trailer(indent, false)
+ }
+ })
+}
+
+func shouldEmitLabel(fld *desc.FieldDescriptor) bool {
+ return fld.IsProto3Optional() ||
+ (!fld.IsMap() && fld.GetOneOf() == nil &&
+ (fld.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL ||
+ fld.GetFile().UnwrapFile().Syntax() == protoreflect.Proto2))
+}
+
+func labelString(lbl descriptorpb.FieldDescriptorProto_Label) string {
+ switch lbl {
+ case descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL:
+ return "optional"
+ case descriptorpb.FieldDescriptorProto_LABEL_REQUIRED:
+ return "required"
+ case descriptorpb.FieldDescriptorProto_LABEL_REPEATED:
+ return "repeated"
+ }
+ panic(fmt.Sprintf("invalid label: %v", lbl))
+}
+
+func isGroup(fld *desc.FieldDescriptor) bool {
+ return fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP
+}
+
+func (p *Printer) printOneOf(ood *desc.OneOfDescriptor, parentElements elementAddrs, startFieldIndex int, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int, ooIndex int32) {
+ oopath := append(parentPath, internal.Message_oneOfsTag, ooIndex)
+ oosi := sourceInfo.Get(oopath)
+ p.printBlockElement(true, oosi, w, indent, func(w *writer, trailer func(int, bool)) {
+ p.indent(w, indent)
+ _, _ = fmt.Fprint(w, "oneof ")
+ extNameSi := sourceInfo.Get(append(oopath, internal.OneOf_nameTag))
+ p.printElementString(extNameSi, w, indent, ood.GetName())
+ _, _ = fmt.Fprintln(w, "{")
+ indent++
+ trailer(indent, true)
+
+ opts, err := p.extractOptions(ood, protov1.MessageV2(ood.GetOptions()))
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ elements := elementAddrs{dsc: ood, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.OneOf_optionsTag, -1, opts)...)
+
+ count := len(ood.GetChoices())
+ for idx := startFieldIndex; count > 0 && idx < len(parentElements.addrs); idx++ {
+ el := parentElements.addrs[idx]
+ if el.elementType != internal.Message_fieldsTag {
+ continue
+ }
+ if parentElements.at(el).(*desc.FieldDescriptor).GetOneOf() == ood {
+ // negative tag indicates that this element is actually a sibling, not a child
+ elements.addrs = append(elements.addrs, elementAddr{elementType: -internal.Message_fieldsTag, elementIndex: el.elementIndex})
+ count--
+ }
+ }
+
+ // the fields are already sorted, but we have to re-sort in order to
+ // interleave the options (in the event that we are using file location
+ // order and the option locations are interleaved with the fields)
+ p.sort(elements, sourceInfo, oopath)
+ scope := ood.GetOwner().GetFullyQualifiedName()
+
+ for i, el := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ switch d := elements.at(el).(type) {
+ case []option:
+ childPath := append(oopath, el.elementType, int32(el.elementIndex))
+ p.printOptionsLong(d, reg, w, sourceInfo, childPath, indent)
+ case *desc.FieldDescriptor:
+ childPath := append(parentPath, -el.elementType, int32(el.elementIndex))
+ p.printField(d, reg, w, sourceInfo, childPath, scope, indent)
+ }
+ }
+
+ p.indent(w, indent-1)
+ _, _ = fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printExtensions(exts *extensionDecl, allExts extensions, parentElements elementAddrs, startFieldIndex int, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, extTag int32, pkg, scope string, indent int) {
+ path := append(parentPath, extTag)
+ p.printLeadingComments(exts.sourceInfo, w, indent)
+ p.indent(w, indent)
+ _, _ = fmt.Fprint(w, "extend ")
+ extNameSi := sourceInfo.Get(append(path, 0, internal.Field_extendeeTag))
+ p.printElementString(extNameSi, w, indent, p.qualifyName(pkg, scope, exts.extendee))
+ _, _ = fmt.Fprintln(w, "{")
+
+ if p.printTrailingComments(exts.sourceInfo, w, indent+1) && !p.Compact {
+ // separator line between trailing comment and next element
+ _, _ = fmt.Fprintln(w)
+ }
+
+ count := len(exts.fields)
+ first := true
+ for idx := startFieldIndex; count > 0 && idx < len(parentElements.addrs); idx++ {
+ el := parentElements.addrs[idx]
+ if el.elementType != extTag {
+ continue
+ }
+ fld := parentElements.at(el).(*desc.FieldDescriptor)
+ if allExts[fld] == exts {
+ if first {
+ first = false
+ } else {
+ p.newLine(w)
+ }
+ childPath := append(path, int32(el.elementIndex))
+ p.printField(fld, reg, w, sourceInfo, childPath, scope, indent+1)
+ count--
+ }
+ }
+
+ p.indent(w, indent)
+ _, _ = fmt.Fprintln(w, "}")
+}
+
+func (p *Printer) printExtensionRanges(parent *desc.MessageDescriptor, ranges []*descriptorpb.DescriptorProto_ExtensionRange, maxTag int32, addrs []elementAddr, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+ p.indent(w, indent)
+ _, _ = fmt.Fprint(w, "extensions ")
+
+ var opts *descriptorpb.ExtensionRangeOptions
+ var elPath []int32
+ first := true
+ for i, extr := range ranges {
+ if first {
+ first = false
+ } else {
+ _, _ = fmt.Fprint(w, ", ")
+ }
+ opts = extr.Options
+ el := addrs[i]
+ elPath = append(parentPath, el.elementType, int32(el.elementIndex))
+ si := sourceInfo.Get(elPath)
+ p.printElement(true, si, w, inline(indent), func(w *writer) {
+ if extr.GetStart() == extr.GetEnd()-1 {
+ _, _ = fmt.Fprintf(w, "%d ", extr.GetStart())
+ } else if extr.GetEnd()-1 == maxTag {
+ _, _ = fmt.Fprintf(w, "%d to max ", extr.GetStart())
+ } else {
+ _, _ = fmt.Fprintf(w, "%d to %d ", extr.GetStart(), extr.GetEnd()-1)
+ }
+ })
+ }
+ dsc := extensionRange{owner: parent, extRange: ranges[0]}
+ p.extractAndPrintOptionsShort(dsc, opts, reg, internal.ExtensionRange_optionsTag, w, sourceInfo, elPath, indent)
+
+ _, _ = fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printReservedRanges(ranges []reservedRange, maxVal int32, addrs []elementAddr, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+ p.indent(w, indent)
+ _, _ = fmt.Fprint(w, "reserved ")
+
+ first := true
+ for i, rr := range ranges {
+ if first {
+ first = false
+ } else {
+ _, _ = fmt.Fprint(w, ", ")
+ }
+ el := addrs[i]
+ si := sourceInfo.Get(append(parentPath, el.elementType, int32(el.elementIndex)))
+ p.printElement(false, si, w, inline(indent), func(w *writer) {
+ if rr.start == rr.end {
+ _, _ = fmt.Fprintf(w, "%d ", rr.start)
+ } else if rr.end == maxVal {
+ _, _ = fmt.Fprintf(w, "%d to max ", rr.start)
+ } else {
+ _, _ = fmt.Fprintf(w, "%d to %d ", rr.start, rr.end)
+ }
+ })
+ }
+
+ _, _ = fmt.Fprintln(w, ";")
+}
+
+func useQuotedReserved(fd *desc.FileDescriptor) bool {
+ return fd.AsFileDescriptorProto().GetEdition() < descriptorpb.Edition_EDITION_2023
+}
+
+func (p *Printer) printReservedNames(names []string, addrs []elementAddr, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int, useQuotes bool) {
+ p.indent(w, indent)
+ _, _ = fmt.Fprint(w, "reserved ")
+
+ first := true
+ for i, name := range names {
+ if first {
+ first = false
+ } else {
+ _, _ = fmt.Fprint(w, ", ")
+ }
+ el := addrs[i]
+ si := sourceInfo.Get(append(parentPath, el.elementType, int32(el.elementIndex)))
+ if useQuotes {
+ p.printElementString(si, w, indent, quotedString(name))
+ } else {
+ p.printElementString(si, w, indent, name)
+ }
+ }
+
+ _, _ = fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printEnum(ed *desc.EnumDescriptor, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) {
+ p.indent(w, indent)
+
+ _, _ = fmt.Fprint(w, "enum ")
+ nameSi := sourceInfo.Get(append(path, internal.Enum_nameTag))
+ p.printElementString(nameSi, w, indent, ed.GetName())
+ _, _ = fmt.Fprintln(w, "{")
+ indent++
+ trailer(indent, true)
+
+ opts, err := p.extractOptions(ed, protov1.MessageV2(ed.GetOptions()))
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ skip := map[interface{}]bool{}
+
+ elements := elementAddrs{dsc: ed, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Enum_optionsTag, -1, opts)...)
+ for i := range ed.GetValues() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_valuesTag, elementIndex: i})
+ }
+ for i := range ed.AsEnumDescriptorProto().GetReservedRange() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_reservedRangeTag, elementIndex: i})
+ }
+ for i := range ed.AsEnumDescriptorProto().GetReservedName() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_reservedNameTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ for i, el := range elements.addrs {
+ d := elements.at(el)
+
+ // skip[d] will panic if d is a slice (which it could be for []option),
+ // so just ignore it since we don't try to skip options
+ if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+ // skip this element
+ continue
+ }
+
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+
+ switch d := d.(type) {
+ case []option:
+ p.printOptionsLong(d, reg, w, sourceInfo, childPath, indent)
+ case *desc.EnumValueDescriptor:
+ p.printEnumValue(d, reg, w, sourceInfo, childPath, indent)
+ case reservedRange:
+ // collapse reserved ranges into a single "reserved" block
+ ranges := []reservedRange{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rr := elements.at(elnext).(reservedRange)
+ ranges = append(ranges, rr)
+ addrs = append(addrs, elnext)
+ skip[rr] = true
+ }
+ p.printReservedRanges(ranges, math.MaxInt32, addrs, w, sourceInfo, path, indent)
+ case string: // reserved name
+ // collapse reserved names into a single "reserved" block
+ names := []string{d}
+ addrs := []elementAddr{el}
+ for idx := i + 1; idx < len(elements.addrs); idx++ {
+ elnext := elements.addrs[idx]
+ if elnext.elementType != el.elementType {
+ break
+ }
+ rn := elements.at(elnext).(string)
+ names = append(names, rn)
+ addrs = append(addrs, elnext)
+ skip[rn] = true
+ }
+ p.printReservedNames(names, addrs, w, sourceInfo, path, indent, useQuotedReserved(ed.GetFile()))
+ }
+ }
+
+ p.indent(w, indent-1)
+ _, _ = fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printEnumValue(evd *desc.EnumValueDescriptor, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printElement(true, si, w, indent, func(w *writer) {
+ p.indent(w, indent)
+
+ nameSi := sourceInfo.Get(append(path, internal.EnumVal_nameTag))
+ p.printElementString(nameSi, w, indent, evd.GetName())
+ _, _ = fmt.Fprint(w, "= ")
+
+ numSi := sourceInfo.Get(append(path, internal.EnumVal_numberTag))
+ p.printElementString(numSi, w, indent, fmt.Sprintf("%d", evd.GetNumber()))
+
+ p.extractAndPrintOptionsShort(evd, protov1.MessageV2(evd.GetOptions()), reg, internal.EnumVal_optionsTag, w, sourceInfo, path, indent)
+
+ _, _ = fmt.Fprint(w, ";")
+ })
+}
+
+func (p *Printer) printService(sd *desc.ServiceDescriptor, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) {
+ p.indent(w, indent)
+
+ _, _ = fmt.Fprint(w, "service ")
+ nameSi := sourceInfo.Get(append(path, internal.Service_nameTag))
+ p.printElementString(nameSi, w, indent, sd.GetName())
+ _, _ = fmt.Fprintln(w, "{")
+ indent++
+ trailer(indent, true)
+
+ opts, err := p.extractOptions(sd, protov1.MessageV2(sd.GetOptions()))
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ elements := elementAddrs{dsc: sd, opts: opts}
+ elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Service_optionsTag, -1, opts)...)
+ for i := range sd.GetMethods() {
+ elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Service_methodsTag, elementIndex: i})
+ }
+
+ p.sort(elements, sourceInfo, path)
+
+ for i, el := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+
+ switch d := elements.at(el).(type) {
+ case []option:
+ p.printOptionsLong(d, reg, w, sourceInfo, childPath, indent)
+ case *desc.MethodDescriptor:
+ p.printMethod(d, reg, w, sourceInfo, childPath, indent)
+ }
+ }
+
+ p.indent(w, indent-1)
+ _, _ = fmt.Fprintln(w, "}")
+ })
+}
+
+func (p *Printer) printMethod(mtd *desc.MethodDescriptor, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ si := sourceInfo.Get(path)
+ pkg := mtd.GetFile().GetPackage()
+ p.printBlockElement(true, si, w, indent, func(w *writer, trailer func(int, bool)) {
+ p.indent(w, indent)
+
+ _, _ = fmt.Fprint(w, "rpc ")
+ nameSi := sourceInfo.Get(append(path, internal.Method_nameTag))
+ p.printElementString(nameSi, w, indent, mtd.GetName())
+
+ _, _ = fmt.Fprint(w, "( ")
+ inSi := sourceInfo.Get(append(path, internal.Method_inputTag))
+ inName := p.qualifyName(pkg, pkg, mtd.GetInputType().GetFullyQualifiedName())
+ if mtd.IsClientStreaming() {
+ inName = "stream " + inName
+ }
+ p.printElementString(inSi, w, indent, inName)
+
+ _, _ = fmt.Fprint(w, ") returns ( ")
+
+ outSi := sourceInfo.Get(append(path, internal.Method_outputTag))
+ outName := p.qualifyName(pkg, pkg, mtd.GetOutputType().GetFullyQualifiedName())
+ if mtd.IsServerStreaming() {
+ outName = "stream " + outName
+ }
+ p.printElementString(outSi, w, indent, outName)
+ _, _ = fmt.Fprint(w, ") ")
+
+ opts, err := p.extractOptions(mtd, protov1.MessageV2(mtd.GetOptions()))
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+
+ if len(opts) > 0 {
+ _, _ = fmt.Fprintln(w, "{")
+ indent++
+ trailer(indent, true)
+
+ elements := elementAddrs{dsc: mtd, opts: opts}
+ elements.addrs = optionsAsElementAddrs(internal.Method_optionsTag, 0, opts)
+ p.sort(elements, sourceInfo, path)
+
+ for i, el := range elements.addrs {
+ if i > 0 {
+ p.newLine(w)
+ }
+ o := elements.at(el).([]option)
+ childPath := append(path, el.elementType, int32(el.elementIndex))
+ p.printOptionsLong(o, reg, w, sourceInfo, childPath, indent)
+ }
+
+ p.indent(w, indent-1)
+ _, _ = fmt.Fprintln(w, "}")
+ } else {
+ _, _ = fmt.Fprint(w, ";")
+ trailer(indent, false)
+ }
+ })
+}
+
+func (p *Printer) printOptionsLong(opts []option, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ p.printOptions(opts, w, indent,
+ func(i int32) *descriptorpb.SourceCodeInfo_Location {
+ return sourceInfo.Get(append(path, i))
+ },
+ func(w *writer, indent int, opt option, _ bool) {
+ p.indent(w, indent)
+ _, _ = fmt.Fprint(w, "option ")
+ p.printOption(reg, opt.name, opt.val, w, indent)
+ _, _ = fmt.Fprint(w, ";")
+ },
+ false)
+}
+
+func (p *Printer) extractAndPrintOptionsShort(dsc interface{}, optsMsg proto.Message, reg *protoregistry.Types, optsTag int32, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ d, ok := dsc.(desc.Descriptor)
+ if !ok {
+ d = dsc.(extensionRange).owner
+ }
+ opts, err := p.extractOptions(d, protov1.MessageV2(optsMsg))
+ if err != nil {
+ if w.err == nil {
+ w.err = err
+ }
+ return
+ }
+ p.printOptionsShort(dsc, opts, optsTag, reg, w, sourceInfo, path, indent)
+}
+
+func (p *Printer) printOptionsShort(dsc interface{}, opts map[int32][]option, optsTag int32, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+ elements := elementAddrs{dsc: dsc, opts: opts}
+ elements.addrs = optionsAsElementAddrs(optsTag, 0, opts)
+ if len(elements.addrs) == 0 {
+ return
+ }
+ p.sort(elements, sourceInfo, path)
+
+ // we render expanded form if there are many options
+ count := 0
+ for _, addr := range elements.addrs {
+ opts := elements.at(addr).([]option)
+ count += len(opts)
+ }
+ threshold := p.ShortOptionsExpansionThresholdCount
+ if threshold <= 0 {
+ threshold = 3
+ }
+
+ if count > threshold {
+ p.printOptionElementsShort(elements, reg, w, sourceInfo, path, indent, true)
+ } else {
+ var tmp bytes.Buffer
+ tmpW := *w
+ tmpW.Writer = &tmp
+ p.printOptionElementsShort(elements, reg, &tmpW, sourceInfo, path, indent, false)
+ threshold := p.ShortOptionsExpansionThresholdLength
+ if threshold <= 0 {
+ threshold = 50
+ }
+ // we subtract 3 so we don't consider the leading " [" and trailing "]"
+ if tmp.Len()-3 > threshold {
+ p.printOptionElementsShort(elements, reg, w, sourceInfo, path, indent, true)
+ } else {
+ // not too long: commit what we rendered
+ b := tmp.Bytes()
+ if w.space && len(b) > 0 && b[0] == ' ' {
+ // don't write extra space
+ b = b[1:]
+ }
+ _, _ = w.Write(b)
+ w.newline = tmpW.newline
+ w.space = tmpW.space
+ }
+ }
+}
+
+func (p *Printer) printOptionElementsShort(addrs elementAddrs, reg *protoregistry.Types, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int, expand bool) {
+ if expand {
+ _, _ = fmt.Fprintln(w, "[")
+ indent++
+ } else {
+ _, _ = fmt.Fprint(w, "[")
+ }
+ for i, addr := range addrs.addrs {
+ opts := addrs.at(addr).([]option)
+ var childPath []int32
+ if addr.elementIndex < 0 {
+ // pseudo-option
+ childPath = append(path, int32(-addr.elementIndex))
+ } else {
+ childPath = append(path, addr.elementType, int32(addr.elementIndex))
+ }
+ optIndent := indent
+ if !expand {
+ optIndent = inline(indent)
+ }
+ p.printOptions(opts, w, optIndent,
+ func(i int32) *descriptorpb.SourceCodeInfo_Location {
+ p := childPath
+ if addr.elementIndex >= 0 {
+ p = append(p, i)
+ }
+ return sourceInfo.Get(p)
+ },
+ func(w *writer, indent int, opt option, more bool) {
+ if expand {
+ p.indent(w, indent)
+ }
+ p.printOption(reg, opt.name, opt.val, w, indent)
+ if more {
+ if expand {
+ _, _ = fmt.Fprintln(w, ",")
+ } else {
+ _, _ = fmt.Fprint(w, ", ")
+ }
+ }
+ },
+ i < len(addrs.addrs)-1)
+ }
+ if expand {
+ p.indent(w, indent-1)
+ }
+ _, _ = fmt.Fprint(w, "] ")
+}
+
+func (p *Printer) printOptions(opts []option, w *writer, indent int, siFetch func(i int32) *descriptorpb.SourceCodeInfo_Location, fn func(w *writer, indent int, opt option, more bool), haveMore bool) {
+ for i, opt := range opts {
+ more := haveMore
+ if !more {
+ more = i < len(opts)-1
+ }
+ si := siFetch(int32(i))
+ p.printElement(false, si, w, indent, func(w *writer) {
+ fn(w, indent, opt, more)
+ })
+ }
+}
+
+func inline(indent int) int {
+ if indent < 0 {
+ // already inlined
+ return indent
+ }
+ // negative indent means inline; indent 2 stops further in case value wraps
+ return -indent - 2
+}
+
+func sortKeys(m protoreflect.Map) []protoreflect.MapKey {
+ res := make([]protoreflect.MapKey, m.Len())
+ i := 0
+ m.Range(func(k protoreflect.MapKey, _ protoreflect.Value) bool {
+ res[i] = k
+ i++
+ return true
+ })
+ sort.Slice(res, func(i, j int) bool {
+ switch i := res[i].Interface().(type) {
+ case int32:
+ return i < int32(res[j].Int())
+ case uint32:
+ return i < uint32(res[j].Uint())
+ case int64:
+ return i < res[j].Int()
+ case uint64:
+ return i < res[j].Uint()
+ case string:
+ return i < res[j].String()
+ case bool:
+ return !i && res[j].Bool()
+ default:
+ panic(fmt.Sprintf("invalid type for map key: %T", i))
+ }
+ })
+ return res
+}
+
+func (p *Printer) printOption(reg *protoregistry.Types, name string, optVal interface{}, w *writer, indent int) {
+ _, _ = fmt.Fprintf(w, "%s = ", name)
+
+ switch optVal := optVal.(type) {
+ case int32, uint32, int64, uint64:
+ _, _ = fmt.Fprintf(w, "%d", optVal)
+ case float32, float64:
+ _, _ = fmt.Fprintf(w, "%f", optVal)
+ case string:
+ _, _ = fmt.Fprintf(w, "%s", quotedString(optVal))
+ case []byte:
+ _, _ = fmt.Fprintf(w, "%s", quotedBytes(string(optVal)))
+ case bool:
+ _, _ = fmt.Fprintf(w, "%v", optVal)
+ case ident:
+ _, _ = fmt.Fprintf(w, "%s", optVal)
+ case messageVal:
+ threshold := p.MessageLiteralExpansionThresholdLength
+ if threshold == 0 {
+ threshold = 50
+ }
+ var buf bytes.Buffer
+ p.printMessageLiteralToBufferMaybeCompact(&buf, optVal.msg.ProtoReflect(), reg, optVal.pkg, optVal.scope, threshold, indent)
+ _, _ = w.Write(buf.Bytes())
+
+ default:
+ panic(fmt.Sprintf("unknown type of value %T for field %s", optVal, name))
+ }
+}
+
+type edgeKind int
+
+const (
+ edgeKindOption edgeKind = iota
+ edgeKindFile
+ edgeKindMessage
+ edgeKindField
+ edgeKindOneOf
+ edgeKindExtensionRange
+ edgeKindReservedRange
+ edgeKindReservedName
+ edgeKindEnum
+ edgeKindEnumVal
+ edgeKindService
+ edgeKindMethod
+)
+
+// edges in simple state machine for matching options paths
+// whose prefix should be included in source info to handle
+// the way options are printed (which cannot always include
+// the full path from original source)
+var edges = map[edgeKind]map[int32]edgeKind{
+ edgeKindFile: {
+ internal.File_optionsTag: edgeKindOption,
+ internal.File_messagesTag: edgeKindMessage,
+ internal.File_enumsTag: edgeKindEnum,
+ internal.File_extensionsTag: edgeKindField,
+ internal.File_servicesTag: edgeKindService,
+ },
+ edgeKindMessage: {
+ internal.Message_optionsTag: edgeKindOption,
+ internal.Message_fieldsTag: edgeKindField,
+ internal.Message_oneOfsTag: edgeKindOneOf,
+ internal.Message_nestedMessagesTag: edgeKindMessage,
+ internal.Message_enumsTag: edgeKindEnum,
+ internal.Message_extensionsTag: edgeKindField,
+ internal.Message_extensionRangeTag: edgeKindExtensionRange,
+ internal.Message_reservedRangeTag: edgeKindReservedRange,
+ internal.Message_reservedNameTag: edgeKindReservedName,
+ },
+ edgeKindField: {
+ internal.Field_optionsTag: edgeKindOption,
+ },
+ edgeKindOneOf: {
+ internal.OneOf_optionsTag: edgeKindOption,
+ },
+ edgeKindExtensionRange: {
+ internal.ExtensionRange_optionsTag: edgeKindOption,
+ },
+ edgeKindEnum: {
+ internal.Enum_optionsTag: edgeKindOption,
+ internal.Enum_valuesTag: edgeKindEnumVal,
+ internal.Enum_reservedRangeTag: edgeKindReservedRange,
+ internal.Enum_reservedNameTag: edgeKindReservedName,
+ },
+ edgeKindEnumVal: {
+ internal.EnumVal_optionsTag: edgeKindOption,
+ },
+ edgeKindService: {
+ internal.Service_optionsTag: edgeKindOption,
+ internal.Service_methodsTag: edgeKindMethod,
+ },
+ edgeKindMethod: {
+ internal.Method_optionsTag: edgeKindOption,
+ },
+}
+
+func extendOptionLocations(sc internal.SourceInfoMap, locs []*descriptorpb.SourceCodeInfo_Location) {
+ // we iterate in the order that locations appear in descriptor
+ // for determinism (if we ranged over the map, order and thus
+ // potentially results are non-deterministic)
+ for _, loc := range locs {
+ allowed := edges[edgeKindFile]
+ for i := 0; i+1 < len(loc.Path); i += 2 {
+ nextKind, ok := allowed[loc.Path[i]]
+ if !ok {
+ break
+ }
+ if nextKind == edgeKindOption {
+ // We've found an option entry. This could be arbitrarily deep
+ // (for options that are nested messages) or it could end
+ // abruptly (for non-repeated fields). But we need a path that
+ // is exactly the path-so-far plus two: the option tag and an
+ // optional index for repeated option fields (zero for
+ // non-repeated option fields). This is used for querying source
+ // info when printing options.
+ newPath := make([]int32, i+3)
+ copy(newPath, loc.Path)
+ sc.PutIfAbsent(newPath, loc)
+ // we do another path of path-so-far plus two, but with
+ // explicit zero index -- just in case this actual path has
+ // an extra path element, but it's not an index (e.g the
+ // option field is not repeated, but the source info we are
+ // looking at indicates a tag of a nested field)
+ newPath[len(newPath)-1] = 0
+ sc.PutIfAbsent(newPath, loc)
+ // finally, we need the path-so-far plus one, just the option
+ // tag, for sorting option groups
+ newPath = newPath[:len(newPath)-1]
+ sc.PutIfAbsent(newPath, loc)
+
+ break
+ } else {
+ allowed = edges[nextKind]
+ }
+ }
+ }
+}
+
+func (p *Printer) extractOptions(dsc desc.Descriptor, opts proto.Message) (map[int32][]option, error) {
+ pkg := dsc.GetFile().GetPackage()
+ var scope string
+ isMessage := false
+ if _, ok := dsc.(*desc.FileDescriptor); ok {
+ scope = pkg
+ } else {
+ _, isMessage = dsc.(*desc.MessageDescriptor)
+ scope = dsc.GetFullyQualifiedName()
+ }
+
+ ref := opts.ProtoReflect()
+
+ options := map[int32][]option{}
+ ref.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ var name string
+ if fld.IsExtension() {
+ var n string
+ if isMessage {
+ n = p.qualifyMessageOptionName(pkg, scope, string(fld.FullName()))
+ } else {
+ n = p.qualifyName(pkg, scope, string(fld.FullName()))
+ }
+ name = fmt.Sprintf("(%s)", n)
+ } else {
+ name = string(fld.Name())
+ }
+ opts := valueToOptions(fld, name, val.Interface())
+ if len(opts) > 0 {
+ for i := range opts {
+ if msg, ok := opts[i].val.(proto.Message); ok {
+ opts[i].val = messageVal{pkg: pkg, scope: scope, msg: msg}
+ }
+ }
+ options[int32(fld.Number())] = opts
+ }
+ return true
+ })
+ return options, nil
+}
+
+func valueToOptions(fld protoreflect.FieldDescriptor, name string, val interface{}) []option {
+ switch val := val.(type) {
+ case protoreflect.List:
+ if fld.Number() == internal.UninterpretedOptionsTag {
+ // we handle uninterpreted options differently
+ uninterp := make([]*descriptorpb.UninterpretedOption, 0, val.Len())
+ for i := 0; i < val.Len(); i++ {
+ uo := toUninterpretedOption(val.Get(i).Message().Interface())
+ if uo != nil {
+ uninterp = append(uninterp, uo)
+ }
+ }
+ return uninterpretedToOptions(uninterp)
+ }
+ opts := make([]option, 0, val.Len())
+ for i := 0; i < val.Len(); i++ {
+ elem := valueForOption(fld, val.Get(i).Interface())
+ if elem != nil {
+ opts = append(opts, option{name: name, val: elem})
+ }
+ }
+ return opts
+ case protoreflect.Map:
+ opts := make([]option, 0, val.Len())
+ for _, k := range sortKeys(val) {
+ v := val.Get(k)
+ vf := fld.MapValue()
+ if vf.Kind() == protoreflect.EnumKind {
+ if vf.Enum().Values().ByNumber(v.Enum()) == nil {
+ // have to skip unknown enum values :(
+ continue
+ }
+ }
+ entry := dynamicpb.NewMessage(fld.Message())
+ entry.Set(fld.Message().Fields().ByNumber(1), k.Value())
+ entry.Set(fld.Message().Fields().ByNumber(2), v)
+ opts = append(opts, option{name: name, val: entry})
+ }
+ return opts
+ default:
+ v := valueForOption(fld, val)
+ if v == nil {
+ return nil
+ }
+ return []option{{name: name, val: v}}
+ }
+}
+
+func valueForOption(fld protoreflect.FieldDescriptor, val interface{}) interface{} {
+ switch val := val.(type) {
+ case protoreflect.EnumNumber:
+ ev := fld.Enum().Values().ByNumber(val)
+ if ev == nil {
+ // if enum val is unknown, we'll return nil and have to skip it :(
+ return nil
+ }
+ return ident(ev.Name())
+ case protoreflect.Message:
+ return val.Interface()
+ default:
+ return val
+ }
+}
+
+func toUninterpretedOption(message proto.Message) *descriptorpb.UninterpretedOption {
+ if uo, ok := message.(*descriptorpb.UninterpretedOption); ok {
+ return uo
+ }
+ // marshal and unmarshal to convert; if we fail to convert, skip it
+ var uo descriptorpb.UninterpretedOption
+ data, err := proto.Marshal(message)
+ if err != nil {
+ return nil
+ }
+ if proto.Unmarshal(data, &uo) != nil {
+ return nil
+ }
+ return &uo
+}
+
+func uninterpretedToOptions(uninterp []*descriptorpb.UninterpretedOption) []option {
+ opts := make([]option, len(uninterp))
+ for i, unint := range uninterp {
+ var buf bytes.Buffer
+ for ni, n := range unint.Name {
+ if ni > 0 {
+ buf.WriteByte('.')
+ }
+ if n.GetIsExtension() {
+ _, _ = fmt.Fprintf(&buf, "(%s)", n.GetNamePart())
+ } else {
+ buf.WriteString(n.GetNamePart())
+ }
+ }
+
+ var v interface{}
+ switch {
+ case unint.IdentifierValue != nil:
+ v = ident(unint.GetIdentifierValue())
+ case unint.StringValue != nil:
+ v = string(unint.GetStringValue())
+ case unint.DoubleValue != nil:
+ v = unint.GetDoubleValue()
+ case unint.PositiveIntValue != nil:
+ v = unint.GetPositiveIntValue()
+ case unint.NegativeIntValue != nil:
+ v = unint.GetNegativeIntValue()
+ case unint.AggregateValue != nil:
+ v = ident("{ " + unint.GetAggregateValue() + " }")
+ }
+
+ opts[i] = option{name: buf.String(), val: v}
+ }
+ return opts
+}
+
+func optionsAsElementAddrs(optionsTag int32, order int, opts map[int32][]option) []elementAddr {
+ optAddrs := make([]elementAddr, 0, len(opts))
+ for tag := range opts {
+ optAddrs = append(optAddrs, elementAddr{elementType: optionsTag, elementIndex: int(tag), order: order})
+ }
+ // We want stable output. So, if the printer can't sort these a better way,
+ // they'll at least be in a deterministic order (by name).
+ sort.Sort(optionsByName{addrs: optAddrs, opts: opts})
+ return optAddrs
+}
+
+// quotedBytes implements the text format for string literals for protocol
+// buffers. Since the underlying data is a bytes field, this encodes all
+// bytes outside the 7-bit ASCII printable range. To preserve unicode strings
+// without byte escapes, use quotedString.
+func quotedBytes(s string) string {
+ var b bytes.Buffer
+ b.WriteByte('"')
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ b.WriteString("\\n")
+ case '\r':
+ b.WriteString("\\r")
+ case '\t':
+ b.WriteString("\\t")
+ case '"':
+ b.WriteString("\\\"")
+ case '\\':
+ b.WriteString("\\\\")
+ default:
+ if c >= 0x20 && c < 0x7f {
+ b.WriteByte(c)
+ } else {
+ _, _ = fmt.Fprintf(&b, "\\%03o", c)
+ }
+ }
+ }
+ b.WriteByte('"')
+
+ return b.String()
+}
+
+// quotedString implements the text format for string literals for protocol
+// buffers. This form is also acceptable for string literals in option values
+// by the protocol buffer compiler, protoc.
+func quotedString(s string) string {
+ var b bytes.Buffer
+ b.WriteByte('"')
+ // Loop over the bytes, not the runes.
+ for {
+ r, n := utf8.DecodeRuneInString(s)
+ if n == 0 {
+ break // end of string
+ }
+ if r == utf8.RuneError && n == 1 {
+ // Invalid UTF8! Use an octal byte escape to encode the bad byte.
+ _, _ = fmt.Fprintf(&b, "\\%03o", s[0])
+ s = s[1:]
+ continue
+ }
+
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch r {
+ case '\n':
+ b.WriteString("\\n")
+ case '\r':
+ b.WriteString("\\r")
+ case '\t':
+ b.WriteString("\\t")
+ case '"':
+ b.WriteString("\\\"")
+ case '\\':
+ b.WriteString("\\\\")
+ default:
+ if unicode.IsPrint(r) {
+ b.WriteRune(r)
+ } else {
+ // if it's not printable, use a unicode escape
+ if r > 0xffff {
+ _, _ = fmt.Fprintf(&b, "\\U%08X", r)
+ } else if r > 0x7F {
+ _, _ = fmt.Fprintf(&b, "\\u%04X", r)
+ } else {
+ _, _ = fmt.Fprintf(&b, "\\%03o", byte(r))
+ }
+ }
+ }
+
+ s = s[n:]
+ }
+
+ b.WriteByte('"')
+
+ return b.String()
+}
+
+type elementAddr struct {
+ elementType int32
+ elementIndex int
+ order int
+}
+
+type elementAddrs struct {
+ addrs []elementAddr
+ dsc interface{}
+ opts map[int32][]option
+}
+
+func (a elementAddrs) Len() int {
+ return len(a.addrs)
+}
+
+func (a elementAddrs) Less(i, j int) bool {
+ // explicit order is considered first
+ if a.addrs[i].order < a.addrs[j].order {
+ return true
+ } else if a.addrs[i].order > a.addrs[j].order {
+ return false
+ }
+ // if order is equal, sort by element type
+ if a.addrs[i].elementType < a.addrs[j].elementType {
+ return true
+ } else if a.addrs[i].elementType > a.addrs[j].elementType {
+ return false
+ }
+
+ di := a.at(a.addrs[i])
+ dj := a.at(a.addrs[j])
+
+ switch vi := di.(type) {
+ case *desc.FieldDescriptor:
+ // fields are ordered by tag number
+ vj := dj.(*desc.FieldDescriptor)
+ // regular fields before extensions; extensions grouped by extendee
+ if !vi.IsExtension() && vj.IsExtension() {
+ return true
+ } else if vi.IsExtension() && !vj.IsExtension() {
+ return false
+ } else if vi.IsExtension() && vj.IsExtension() {
+ if vi.GetOwner() != vj.GetOwner() {
+ return vi.GetOwner().GetFullyQualifiedName() < vj.GetOwner().GetFullyQualifiedName()
+ }
+ }
+ return vi.GetNumber() < vj.GetNumber()
+
+ case *desc.EnumValueDescriptor:
+ // enum values ordered by number then name,
+ // but first value number must be 0 for open enums
+ vj := dj.(*desc.EnumValueDescriptor)
+ if vi.GetNumber() == vj.GetNumber() {
+ return vi.GetName() < vj.GetName()
+ }
+ if !vi.GetEnum().UnwrapEnum().IsClosed() {
+ if vj.GetNumber() == 0 {
+ return false
+ }
+ if vi.GetNumber() == 0 {
+ return true
+ }
+ }
+ return vi.GetNumber() < vj.GetNumber()
+
+ case *descriptorpb.DescriptorProto_ExtensionRange:
+ // extension ranges ordered by tag
+ return vi.GetStart() < dj.(*descriptorpb.DescriptorProto_ExtensionRange).GetStart()
+
+ case reservedRange:
+ // reserved ranges ordered by tag, too
+ return vi.start < dj.(reservedRange).start
+
+ case string:
+ // reserved names lexically sorted
+ return vi < dj.(string)
+
+ case pkg:
+ // reserved names lexically sorted
+ return vi < dj.(pkg)
+
+ case imp:
+ // reserved names lexically sorted
+ return vi < dj.(imp)
+
+ case []option:
+ // options sorted by name, extensions last
+ return optionLess(vi, dj.([]option))
+
+ default:
+ // all other descriptors ordered by name
+ return di.(desc.Descriptor).GetName() < dj.(desc.Descriptor).GetName()
+ }
+}
+
+func (a elementAddrs) Swap(i, j int) {
+ a.addrs[i], a.addrs[j] = a.addrs[j], a.addrs[i]
+}
+
+func (a elementAddrs) at(addr elementAddr) interface{} {
+ switch dsc := a.dsc.(type) {
+ case *desc.FileDescriptor:
+ switch addr.elementType {
+ case internal.File_packageTag:
+ return pkg(dsc.GetPackage())
+ case internal.File_dependencyTag:
+ return imp(dsc.AsFileDescriptorProto().GetDependency()[addr.elementIndex])
+ case internal.File_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.File_messagesTag:
+ return dsc.GetMessageTypes()[addr.elementIndex]
+ case internal.File_enumsTag:
+ return dsc.GetEnumTypes()[addr.elementIndex]
+ case internal.File_servicesTag:
+ return dsc.GetServices()[addr.elementIndex]
+ case internal.File_extensionsTag:
+ return dsc.GetExtensions()[addr.elementIndex]
+ }
+ case *desc.MessageDescriptor:
+ switch addr.elementType {
+ case internal.Message_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Message_fieldsTag:
+ return dsc.GetFields()[addr.elementIndex]
+ case internal.Message_nestedMessagesTag:
+ return dsc.GetNestedMessageTypes()[addr.elementIndex]
+ case internal.Message_enumsTag:
+ return dsc.GetNestedEnumTypes()[addr.elementIndex]
+ case internal.Message_extensionsTag:
+ return dsc.GetNestedExtensions()[addr.elementIndex]
+ case internal.Message_extensionRangeTag:
+ return dsc.AsDescriptorProto().GetExtensionRange()[addr.elementIndex]
+ case internal.Message_reservedRangeTag:
+ rng := dsc.AsDescriptorProto().GetReservedRange()[addr.elementIndex]
+ return reservedRange{start: rng.GetStart(), end: rng.GetEnd() - 1}
+ case internal.Message_reservedNameTag:
+ return dsc.AsDescriptorProto().GetReservedName()[addr.elementIndex]
+ }
+ case *desc.FieldDescriptor:
+ if addr.elementType == internal.Field_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case *desc.OneOfDescriptor:
+ switch addr.elementType {
+ case internal.OneOf_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case -internal.Message_fieldsTag:
+ return dsc.GetOwner().GetFields()[addr.elementIndex]
+ }
+ case *desc.EnumDescriptor:
+ switch addr.elementType {
+ case internal.Enum_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Enum_valuesTag:
+ return dsc.GetValues()[addr.elementIndex]
+ case internal.Enum_reservedRangeTag:
+ rng := dsc.AsEnumDescriptorProto().GetReservedRange()[addr.elementIndex]
+ return reservedRange{start: rng.GetStart(), end: rng.GetEnd()}
+ case internal.Enum_reservedNameTag:
+ return dsc.AsEnumDescriptorProto().GetReservedName()[addr.elementIndex]
+ }
+ case *desc.EnumValueDescriptor:
+ if addr.elementType == internal.EnumVal_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case *desc.ServiceDescriptor:
+ switch addr.elementType {
+ case internal.Service_optionsTag:
+ return a.opts[int32(addr.elementIndex)]
+ case internal.Service_methodsTag:
+ return dsc.GetMethods()[addr.elementIndex]
+ }
+ case *desc.MethodDescriptor:
+ if addr.elementType == internal.Method_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ case extensionRange:
+ if addr.elementType == internal.ExtensionRange_optionsTag {
+ return a.opts[int32(addr.elementIndex)]
+ }
+ }
+
+ panic(fmt.Sprintf("location for unknown field %d of %T", addr.elementType, a.dsc))
+}
+
+type extensionRange struct {
+ owner *desc.MessageDescriptor
+ extRange *descriptorpb.DescriptorProto_ExtensionRange
+}
+
+type elementSrcOrder struct {
+ elementAddrs
+ sourceInfo internal.SourceInfoMap
+ prefix []int32
+}
+
+func (a elementSrcOrder) Less(i, j int) bool {
+ ti := a.addrs[i].elementType
+ ei := a.addrs[i].elementIndex
+
+ tj := a.addrs[j].elementType
+ ej := a.addrs[j].elementIndex
+
+ var si, sj *descriptorpb.SourceCodeInfo_Location
+ if ei < 0 {
+ si = a.sourceInfo.Get(append(a.prefix, -int32(ei)))
+ } else if ti < 0 {
+ p := make([]int32, len(a.prefix)-2)
+ copy(p, a.prefix)
+ si = a.sourceInfo.Get(append(p, ti, int32(ei)))
+ } else {
+ si = a.sourceInfo.Get(append(a.prefix, ti, int32(ei)))
+ }
+ if ej < 0 {
+ sj = a.sourceInfo.Get(append(a.prefix, -int32(ej)))
+ } else if tj < 0 {
+ p := make([]int32, len(a.prefix)-2)
+ copy(p, a.prefix)
+ sj = a.sourceInfo.Get(append(p, tj, int32(ej)))
+ } else {
+ sj = a.sourceInfo.Get(append(a.prefix, tj, int32(ej)))
+ }
+
+ if (si == nil) != (sj == nil) {
+ // generally, we put unknown elements after known ones;
+ // except package, imports, and option elements go first
+
+ // i will be unknown and j will be known
+ swapped := false
+ if si != nil {
+ ti, tj = tj, ti
+ swapped = true
+ }
+ switch a.dsc.(type) {
+ case *desc.FileDescriptor:
+ // NB: These comparisons are *trying* to get things ordered so that
+ // 1) If the package element has no source info, it appears _first_.
+ // 2) If any import element has no source info, it appears _after_
+ // the package element but _before_ any other element.
+ // 3) If any option element has no source info, it appears _after_
+ // the package and import elements but _before_ any other element.
+ // If the package, imports, and options are all missing source info,
+ // this will sort them all to the top in expected order. But if they
+ // are mixed (some _do_ have source info, some do not), and elements
+ // with source info have spans that positions them _after_ other
+ // elements in the file, then this Less function will be unstable
+ // since the above dual objectives for imports and options ("before
+ // this but after that") may be in conflict with one another. This
+ // should not cause any problems, other than elements being possibly
+ // sorted in a confusing order.
+ //
+ // Well-formed descriptors should instead have consistent source
+ // info: either all elements have source info or none do. So this
+ // should not be an issue in practice.
+ if ti == internal.File_packageTag {
+ return !swapped
+ }
+ if ti == internal.File_dependencyTag {
+ if tj == internal.File_packageTag {
+ // imports will come *after* package
+ return swapped
+ }
+ return !swapped
+ }
+ if ti == internal.File_optionsTag {
+ if tj == internal.File_packageTag || tj == internal.File_dependencyTag {
+ // options will come *after* package and imports
+ return swapped
+ }
+ return !swapped
+ }
+ case *desc.MessageDescriptor:
+ if ti == internal.Message_optionsTag {
+ return !swapped
+ }
+ case *desc.EnumDescriptor:
+ if ti == internal.Enum_optionsTag {
+ return !swapped
+ }
+ case *desc.ServiceDescriptor:
+ if ti == internal.Service_optionsTag {
+ return !swapped
+ }
+ }
+ return swapped
+
+ } else if si == nil || sj == nil {
+ // let stable sort keep unknown elements in same relative order
+ return false
+ }
+
+ for idx := 0; idx < len(sj.Span); idx++ {
+ if idx >= len(si.Span) {
+ return true
+ }
+ if si.Span[idx] < sj.Span[idx] {
+ return true
+ }
+ if si.Span[idx] > sj.Span[idx] {
+ return false
+ }
+ }
+ return false
+}
+
+type customSortOrder struct {
+ elementAddrs
+ less func(a, b Element) bool
+}
+
+func (cso customSortOrder) Less(i, j int) bool {
+ // Regardless of the custom sort order, for proto3 files,
+ // the enum value zero MUST be first. So we override the
+ // custom sort order to make sure the file will be valid
+ // and can compile.
+ addri := cso.addrs[i]
+ addrj := cso.addrs[j]
+ di := cso.at(addri)
+ dj := cso.at(addrj)
+ if addri.elementType == addrj.elementType {
+ if vi, ok := di.(*desc.EnumValueDescriptor); ok {
+ vj := dj.(*desc.EnumValueDescriptor)
+ if !vi.GetEnum().UnwrapEnum().IsClosed() {
+ if vi.GetNumber() == 0 {
+ return true
+ }
+ if vj.GetNumber() == 0 {
+ return false
+ }
+ }
+ }
+ }
+
+ ei := asElement(di)
+ ej := asElement(dj)
+ return cso.less(ei, ej)
+}
+
+type optionsByName struct {
+ addrs []elementAddr
+ opts map[int32][]option
+}
+
+func (o optionsByName) Len() int {
+ return len(o.addrs)
+}
+
+func (o optionsByName) Less(i, j int) bool {
+ oi := o.opts[int32(o.addrs[i].elementIndex)]
+ oj := o.opts[int32(o.addrs[j].elementIndex)]
+ return optionLess(oi, oj)
+}
+
+func (o optionsByName) Swap(i, j int) {
+ o.addrs[i], o.addrs[j] = o.addrs[j], o.addrs[i]
+}
+
+func optionLess(i, j []option) bool {
+ ni := i[0].name
+ nj := j[0].name
+ if ni[0] != '(' && nj[0] == '(' {
+ return true
+ } else if ni[0] == '(' && nj[0] != '(' {
+ return false
+ }
+ return ni < nj
+}
+
+func (p *Printer) printBlockElement(isDecriptor bool, si *descriptorpb.SourceCodeInfo_Location, w *writer, indent int, el func(w *writer, trailer func(indent int, wantTrailingNewline bool))) {
+ includeComments := isDecriptor || p.includeCommentType(CommentsTokens)
+
+ if includeComments && si != nil {
+ p.printLeadingComments(si, w, indent)
+ }
+ el(w, func(indent int, wantTrailingNewline bool) {
+ if includeComments && si != nil {
+ if p.printTrailingComments(si, w, indent) && wantTrailingNewline && !p.Compact {
+ // separator line between trailing comment and next element
+ _, _ = fmt.Fprintln(w)
+ }
+ }
+ })
+ if indent >= 0 && !w.newline {
+ // if we're not printing inline but element did not have trailing newline, add one now
+ _, _ = fmt.Fprintln(w)
+ }
+}
+
+func (p *Printer) printElement(isDecriptor bool, si *descriptorpb.SourceCodeInfo_Location, w *writer, indent int, el func(*writer)) {
+ includeComments := isDecriptor || p.includeCommentType(CommentsTokens)
+
+ if includeComments && si != nil {
+ p.printLeadingComments(si, w, indent)
+ }
+ el(w)
+ if includeComments && si != nil {
+ p.printTrailingComments(si, w, indent)
+ }
+ if indent >= 0 && !w.newline {
+ // if we're not printing inline but element did not have trailing newline, add one now
+ _, _ = fmt.Fprintln(w)
+ }
+}
+
+func (p *Printer) printElementString(si *descriptorpb.SourceCodeInfo_Location, w *writer, indent int, str string) {
+ p.printElement(false, si, w, inline(indent), func(w *writer) {
+ _, _ = fmt.Fprintf(w, "%s ", str)
+ })
+}
+
+func (p *Printer) includeCommentType(c CommentType) bool {
+ return (p.OmitComments & c) == 0
+}
+
+func (p *Printer) printLeadingComments(si *descriptorpb.SourceCodeInfo_Location, w *writer, indent int) bool {
+ endsInNewLine := false
+
+ if p.includeCommentType(CommentsDetached) {
+ for _, c := range si.GetLeadingDetachedComments() {
+ if p.printComment(c, w, indent, true) {
+ // if comment ended in newline, add another newline to separate
+ // this comment from the next
+ p.newLine(w)
+ endsInNewLine = true
+ } else if indent < 0 {
+ // comment did not end in newline and we are trying to inline?
+ // just add a space to separate this comment from what follows
+ _, _ = fmt.Fprint(w, " ")
+ endsInNewLine = false
+ } else {
+ // comment did not end in newline and we are *not* trying to inline?
+ // add newline to end of comment and add another to separate this
+ // comment from what follows
+ _, _ = fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ p.newLine(w)
+ endsInNewLine = true
+ }
+ }
+ }
+
+ if p.includeCommentType(CommentsLeading) && si.GetLeadingComments() != "" {
+ endsInNewLine = p.printComment(si.GetLeadingComments(), w, indent, true)
+ if !endsInNewLine {
+ if indent >= 0 {
+ // leading comment didn't end with newline but needs one
+ // (because we're *not* inlining)
+ _, _ = fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ endsInNewLine = true
+ } else {
+ // space between comment and following element when inlined
+ _, _ = fmt.Fprint(w, " ")
+ }
+ }
+ }
+
+ return endsInNewLine
+}
+
+func (p *Printer) printTrailingComments(si *descriptorpb.SourceCodeInfo_Location, w *writer, indent int) bool {
+ if p.includeCommentType(CommentsTrailing) && si.GetTrailingComments() != "" {
+ if !p.printComment(si.GetTrailingComments(), w, indent, p.TrailingCommentsOnSeparateLine) && indent >= 0 {
+ // trailing comment didn't end with newline but needs one
+ // (because we're *not* inlining)
+ _, _ = fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+ } else if indent < 0 {
+ _, _ = fmt.Fprint(w, " ")
+ }
+ return true
+ }
+
+ return false
+}
+
+func (p *Printer) printComment(comments string, w *writer, indent int, forceNextLine bool) bool {
+ if comments == "" {
+ return false
+ }
+
+ var multiLine bool
+ if indent < 0 {
+ // use multi-line style when inlining
+ multiLine = true
+ } else {
+ multiLine = p.PreferMultiLineStyleComments
+ }
+ if multiLine && strings.Contains(comments, "*/") {
+ // can't emit '*/' in a multi-line style comment
+ multiLine = false
+ }
+
+ lines := strings.Split(comments, "\n")
+
+ // first, remove leading and trailing blank lines
+ if lines[0] == "" {
+ lines = lines[1:]
+ }
+ if len(lines) > 0 && lines[len(lines)-1] == "" {
+ lines = lines[:len(lines)-1]
+ }
+ if len(lines) == 0 {
+ return false
+ }
+
+ if indent >= 0 && !w.newline {
+ // last element did not have trailing newline, so we
+ // either need to tack on newline or, if comment is
+ // just one line, inline it on the end
+ if forceNextLine || len(lines) > 1 {
+ _, _ = fmt.Fprintln(w)
+ } else {
+ if !w.space {
+ _, _ = fmt.Fprint(w, " ")
+ }
+ indent = inline(indent)
+ }
+ }
+
+ if len(lines) == 1 && multiLine {
+ p.indent(w, indent)
+ line := lines[0]
+ if line[0] == ' ' && line[len(line)-1] != ' ' {
+ // add trailing space for symmetry
+ line += " "
+ }
+ _, _ = fmt.Fprintf(w, "/*%s*/", line)
+ if indent >= 0 {
+ _, _ = fmt.Fprintln(w)
+ return true
+ }
+ return false
+ }
+
+ if multiLine {
+ // multi-line style comments that actually span multiple lines
+ // get a blank line before and after so that comment renders nicely
+ lines = append(lines, "", "")
+ copy(lines[1:], lines)
+ lines[0] = ""
+ }
+
+ for i, l := range lines {
+ if l != "" && !strings.HasPrefix(l, " ") {
+ l = " " + l
+ }
+ p.maybeIndent(w, indent, i > 0)
+ if multiLine {
+ if i == 0 {
+ // first line
+ _, _ = fmt.Fprintf(w, "/*%s\n", strings.TrimRight(l, " \t"))
+ } else if i == len(lines)-1 {
+ // last line
+ if strings.TrimSpace(l) == "" {
+ _, _ = fmt.Fprint(w, " */")
+ } else {
+ _, _ = fmt.Fprintf(w, " *%s*/", l)
+ }
+ if indent >= 0 {
+ _, _ = fmt.Fprintln(w)
+ }
+ } else {
+ _, _ = fmt.Fprintf(w, " *%s\n", strings.TrimRight(l, " \t"))
+ }
+ } else {
+ _, _ = fmt.Fprintf(w, "//%s\n", strings.TrimRight(l, " \t"))
+ }
+ }
+
+ // single-line comments always end in newline; multi-line comments only
+ // end in newline for non-negative (e.g. non-inlined) indentation
+ return !multiLine || indent >= 0
+}
+
+func (p *Printer) indent(w io.Writer, indent int) {
+ for i := 0; i < indent; i++ {
+ _, _ = fmt.Fprint(w, p.Indent)
+ }
+}
+
+func (p *Printer) maybeIndent(w io.Writer, indent int, requireIndent bool) {
+ if indent < 0 && requireIndent {
+ p.indent(w, -indent)
+ } else {
+ p.indent(w, indent)
+ }
+}
+
+type writer struct {
+ io.Writer
+ err error
+ space bool
+ newline bool
+}
+
+func newWriter(w io.Writer) *writer {
+ return &writer{Writer: w, newline: true}
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+ if len(p) == 0 {
+ return 0, nil
+ }
+
+ w.newline = false
+
+ if w.space {
+ // skip any trailing space if the following
+ // character is semicolon, comma, or close bracket
+ if p[0] != ';' && p[0] != ',' {
+ _, err := w.Writer.Write([]byte{' '})
+ if err != nil {
+ w.err = err
+ return 0, err
+ }
+ }
+ w.space = false
+ }
+
+ if p[len(p)-1] == ' ' {
+ w.space = true
+ p = p[:len(p)-1]
+ }
+ if len(p) > 0 && p[len(p)-1] == '\n' {
+ w.newline = true
+ }
+
+ num, err := w.Writer.Write(p)
+ if err != nil {
+ w.err = err
+ } else if w.space {
+ // pretend space was written
+ num++
+ }
+ return num, err
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/sort.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/sort.go
new file mode 100644
index 0000000..9b52ceb
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/sort.go
@@ -0,0 +1,439 @@
+package protoprint
+
+import (
+ "fmt"
+ "strings"
+
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// ElementKind is an enumeration of the types of elements in a protobuf
+// file descriptor. This can be used by custom sort functions, for
+// printing a file using a custom ordering of elements.
+type ElementKind int
+
+const (
+ KindPackage = ElementKind(iota) + 1
+ KindImport
+ KindOption
+ KindField
+ KindMessage
+ KindEnum
+ KindService
+ KindExtensionRange
+ KindExtension
+ KindReservedRange
+ KindReservedName
+ KindEnumValue
+ KindMethod
+)
+
+// Element represents an element in a proto descriptor that can be
+// printed. This interface is primarily used to allow users of this package to
+// define custom sort orders for the printed output. The methods of this
+// interface represent the values that can be used for ordering elements.
+type Element interface {
+ // Kind returns the kind of the element. The kind determines which other
+ // methods are applicable.
+ Kind() ElementKind
+ // Name returns the element name. This is NOT applicable to syntax,
+ // extension range, and reserved range kinds and will return the empty
+ // string for these kinds. For custom options, this will be the
+ // fully-qualified name of the corresponding extension.
+ Name() string
+ // Number returns the element number. This is only applicable to field,
+ // extension, and enum value kinds and will return zero for all other kinds.
+ Number() int32
+ // NumberRange returns the range of numbers/tags for the element. This is
+ // only applicable to extension ranges and reserved ranges and will return
+ // (0, 0) for all other kinds.
+ NumberRange() (int32, int32)
+ // Extendee is the extended message for the extension element. Elements
+ // other than extensions will return the empty string.
+ Extendee() string
+ // IsCustomOption returns true if the element is a custom option. If it is
+ // not (including if the element kind is not option) then this method will
+ // return false.
+ IsCustomOption() bool
+}
+
+func asElement(v interface{}) Element {
+ switch v := v.(type) {
+ case pkg:
+ return pkgElement(v)
+ case imp:
+ return impElement(v)
+ case []option:
+ return (*optionElement)(&v[0])
+ case reservedRange:
+ return resvdRangeElement(v)
+ case string:
+ return resvdNameElement(v)
+ case *desc.FieldDescriptor:
+ return (*fieldElement)(v)
+ case *desc.MessageDescriptor:
+ return (*msgElement)(v)
+ case *desc.EnumDescriptor:
+ return (*enumElement)(v)
+ case *desc.EnumValueDescriptor:
+ return (*enumValElement)(v)
+ case *desc.ServiceDescriptor:
+ return (*svcElement)(v)
+ case *desc.MethodDescriptor:
+ return (*methodElement)(v)
+ case *descriptorpb.DescriptorProto_ExtensionRange:
+ return (*extRangeElement)(v)
+ default:
+ panic(fmt.Sprintf("unexpected type of element: %T", v))
+ }
+}
+
+type pkgElement pkg
+
+var _ Element = pkgElement("")
+
+func (p pkgElement) Kind() ElementKind {
+ return KindPackage
+}
+
+func (p pkgElement) Name() string {
+ return string(p)
+}
+
+func (p pkgElement) Number() int32 {
+ return 0
+}
+
+func (p pkgElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (p pkgElement) Extendee() string {
+ return ""
+}
+
+func (p pkgElement) IsCustomOption() bool {
+ return false
+}
+
+type impElement imp
+
+var _ Element = impElement("")
+
+func (i impElement) Kind() ElementKind {
+ return KindImport
+}
+
+func (i impElement) Name() string {
+ return string(i)
+}
+
+func (i impElement) Number() int32 {
+ return 0
+}
+
+func (i impElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (i impElement) Extendee() string {
+ return ""
+}
+
+func (i impElement) IsCustomOption() bool {
+ return false
+}
+
+type optionElement option
+
+var _ Element = (*optionElement)(nil)
+
+func (o *optionElement) Kind() ElementKind {
+ return KindOption
+}
+
+func (o *optionElement) Name() string {
+ if strings.HasPrefix(o.name, "(") {
+ // remove parentheses
+ return o.name[1 : len(o.name)-1]
+ }
+ return o.name
+}
+
+func (o *optionElement) Number() int32 {
+ return 0
+}
+
+func (o *optionElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (o *optionElement) Extendee() string {
+ return ""
+}
+
+func (o *optionElement) IsCustomOption() bool {
+ return strings.HasPrefix(o.name, "(")
+}
+
+type resvdRangeElement reservedRange
+
+var _ Element = resvdRangeElement{}
+
+func (r resvdRangeElement) Kind() ElementKind {
+ return KindReservedRange
+}
+
+func (r resvdRangeElement) Name() string {
+ return ""
+}
+
+func (r resvdRangeElement) Number() int32 {
+ return 0
+}
+
+func (r resvdRangeElement) NumberRange() (int32, int32) {
+ return r.start, r.end
+}
+
+func (r resvdRangeElement) Extendee() string {
+ return ""
+}
+
+func (r resvdRangeElement) IsCustomOption() bool {
+ return false
+}
+
+type resvdNameElement string
+
+var _ Element = resvdNameElement("")
+
+func (r resvdNameElement) Kind() ElementKind {
+ return KindReservedName
+}
+
+func (r resvdNameElement) Name() string {
+ return string(r)
+}
+
+func (r resvdNameElement) Number() int32 {
+ return 0
+}
+
+func (r resvdNameElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (r resvdNameElement) Extendee() string {
+ return ""
+}
+
+func (r resvdNameElement) IsCustomOption() bool {
+ return false
+}
+
+type fieldElement desc.FieldDescriptor
+
+var _ Element = (*fieldElement)(nil)
+
+func (f *fieldElement) Kind() ElementKind {
+ if (*desc.FieldDescriptor)(f).IsExtension() {
+ return KindExtension
+ }
+ return KindField
+}
+
+func (f *fieldElement) Name() string {
+ return (*desc.FieldDescriptor)(f).GetName()
+}
+
+func (f *fieldElement) Number() int32 {
+ return (*desc.FieldDescriptor)(f).GetNumber()
+}
+
+func (f *fieldElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (f *fieldElement) Extendee() string {
+ fd := (*desc.FieldDescriptor)(f)
+ if fd.IsExtension() {
+ fd.GetOwner().GetFullyQualifiedName()
+ }
+ return ""
+}
+
+func (f *fieldElement) IsCustomOption() bool {
+ return false
+}
+
+type msgElement desc.MessageDescriptor
+
+var _ Element = (*msgElement)(nil)
+
+func (m *msgElement) Kind() ElementKind {
+ return KindMessage
+}
+
+func (m *msgElement) Name() string {
+ return (*desc.MessageDescriptor)(m).GetName()
+}
+
+func (m *msgElement) Number() int32 {
+ return 0
+}
+
+func (m *msgElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (m *msgElement) Extendee() string {
+ return ""
+}
+
+func (m *msgElement) IsCustomOption() bool {
+ return false
+}
+
+type enumElement desc.EnumDescriptor
+
+var _ Element = (*enumElement)(nil)
+
+func (e *enumElement) Kind() ElementKind {
+ return KindEnum
+}
+
+func (e *enumElement) Name() string {
+ return (*desc.EnumDescriptor)(e).GetName()
+}
+
+func (e *enumElement) Number() int32 {
+ return 0
+}
+
+func (e *enumElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (e *enumElement) Extendee() string {
+ return ""
+}
+
+func (e *enumElement) IsCustomOption() bool {
+ return false
+}
+
+type enumValElement desc.EnumValueDescriptor
+
+var _ Element = (*enumValElement)(nil)
+
+func (e *enumValElement) Kind() ElementKind {
+ return KindEnumValue
+}
+
+func (e *enumValElement) Name() string {
+ return (*desc.EnumValueDescriptor)(e).GetName()
+}
+
+func (e *enumValElement) Number() int32 {
+ return (*desc.EnumValueDescriptor)(e).GetNumber()
+}
+
+func (e *enumValElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (e *enumValElement) Extendee() string {
+ return ""
+}
+
+func (e *enumValElement) IsCustomOption() bool {
+ return false
+}
+
+type svcElement desc.ServiceDescriptor
+
+var _ Element = (*svcElement)(nil)
+
+func (s *svcElement) Kind() ElementKind {
+ return KindService
+}
+
+func (s *svcElement) Name() string {
+ return (*desc.ServiceDescriptor)(s).GetName()
+}
+
+func (s *svcElement) Number() int32 {
+ return 0
+}
+
+func (s *svcElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (s *svcElement) Extendee() string {
+ return ""
+}
+
+func (s *svcElement) IsCustomOption() bool {
+ return false
+}
+
+type methodElement desc.MethodDescriptor
+
+var _ Element = (*methodElement)(nil)
+
+func (m *methodElement) Kind() ElementKind {
+ return KindMethod
+}
+
+func (m *methodElement) Name() string {
+ return (*desc.MethodDescriptor)(m).GetName()
+}
+
+func (m *methodElement) Number() int32 {
+ return 0
+}
+
+func (m *methodElement) NumberRange() (int32, int32) {
+ return 0, 0
+}
+
+func (m *methodElement) Extendee() string {
+ return ""
+}
+
+func (m *methodElement) IsCustomOption() bool {
+ return false
+}
+
+type extRangeElement descriptorpb.DescriptorProto_ExtensionRange
+
+var _ Element = (*extRangeElement)(nil)
+
+func (e *extRangeElement) Kind() ElementKind {
+ return KindExtensionRange
+}
+
+func (e *extRangeElement) Name() string {
+ return ""
+}
+
+func (e *extRangeElement) Number() int32 {
+ return 0
+}
+
+func (e *extRangeElement) NumberRange() (int32, int32) {
+ ext := (*descriptorpb.DescriptorProto_ExtensionRange)(e)
+ return ext.GetStart(), ext.GetEnd()
+}
+
+func (e *extRangeElement) Extendee() string {
+ return ""
+}
+
+func (e *extRangeElement) IsCustomOption() bool {
+ return false
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go
new file mode 100644
index 0000000..20d2d7a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go
@@ -0,0 +1,207 @@
+package sourceinfo
+
+import (
+ "math"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+// NB: forked from google.golang.org/protobuf/internal/filedesc
+type sourceLocations struct {
+ protoreflect.SourceLocations
+
+ orig []*descriptorpb.SourceCodeInfo_Location
+ // locs is a list of sourceLocations.
+ // The SourceLocation.Next field does not need to be populated
+ // as it will be lazily populated upon first need.
+ locs []protoreflect.SourceLocation
+
+ // fd is the parent file descriptor that these locations are relative to.
+ // If non-nil, ByDescriptor verifies that the provided descriptor
+ // is a child of this file descriptor.
+ fd protoreflect.FileDescriptor
+
+ once sync.Once
+ byPath map[pathKey]int
+}
+
+func (p *sourceLocations) Len() int { return len(p.orig) }
+func (p *sourceLocations) Get(i int) protoreflect.SourceLocation {
+ return p.lazyInit().locs[i]
+}
+func (p *sourceLocations) byKey(k pathKey) protoreflect.SourceLocation {
+ if i, ok := p.lazyInit().byPath[k]; ok {
+ return p.locs[i]
+ }
+ return protoreflect.SourceLocation{}
+}
+func (p *sourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation {
+ return p.byKey(newPathKey(path))
+}
+func (p *sourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation {
+ if p.fd != nil && desc != nil && p.fd != desc.ParentFile() {
+ return protoreflect.SourceLocation{} // mismatching parent imports
+ }
+ var pathArr [16]int32
+ path := pathArr[:0]
+ for {
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ // Reverse the path since it was constructed in reverse.
+ for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+ path[i], path[j] = path[j], path[i]
+ }
+ return p.byKey(newPathKey(path))
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ path = append(path, int32(internal.File_messagesTag))
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_nestedMessagesTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.FieldDescriptor:
+ isExtension := desc.(protoreflect.FieldDescriptor).IsExtension()
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ if isExtension {
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ path = append(path, int32(internal.File_extensionsTag))
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_extensionsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ } else {
+ switch desc.(type) {
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_fieldsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ }
+ case protoreflect.OneofDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_oneOfsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.EnumDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ path = append(path, int32(internal.File_enumsTag))
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_enumsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.EnumValueDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.EnumDescriptor:
+ path = append(path, int32(internal.Enum_valuesTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.ServiceDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ path = append(path, int32(internal.File_servicesTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.MethodDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.ServiceDescriptor:
+ path = append(path, int32(internal.Service_methodsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ }
+}
+func (p *sourceLocations) lazyInit() *sourceLocations {
+ p.once.Do(func() {
+ if len(p.orig) > 0 {
+ p.locs = make([]protoreflect.SourceLocation, len(p.orig))
+ // Collect all the indexes for a given path.
+ pathIdxs := make(map[pathKey][]int, len(p.locs))
+ for i := range p.orig {
+ l := asSourceLocation(p.orig[i])
+ p.locs[i] = l
+ k := newPathKey(l.Path)
+ pathIdxs[k] = append(pathIdxs[k], i)
+ }
+
+ // Update the next index for all locations.
+ p.byPath = make(map[pathKey]int, len(p.locs))
+ for k, idxs := range pathIdxs {
+ for i := 0; i < len(idxs)-1; i++ {
+ p.locs[idxs[i]].Next = idxs[i+1]
+ }
+ p.locs[idxs[len(idxs)-1]].Next = 0
+ p.byPath[k] = idxs[0] // record the first location for this path
+ }
+ }
+ })
+ return p
+}
+
+func asSourceLocation(l *descriptorpb.SourceCodeInfo_Location) protoreflect.SourceLocation {
+ endLine := l.Span[0]
+ endCol := l.Span[2]
+ if len(l.Span) > 3 {
+ endLine = l.Span[2]
+ endCol = l.Span[3]
+ }
+ return protoreflect.SourceLocation{
+ Path: l.Path,
+ StartLine: int(l.Span[0]),
+ StartColumn: int(l.Span[1]),
+ EndLine: int(endLine),
+ EndColumn: int(endCol),
+ LeadingDetachedComments: l.LeadingDetachedComments,
+ LeadingComments: l.GetLeadingComments(),
+ TrailingComments: l.GetTrailingComments(),
+ }
+}
+
+// pathKey is a comparable representation of protoreflect.SourcePath.
+type pathKey struct {
+ arr [16]uint8 // first n-1 path segments; last element is the length
+ str string // used if the path does not fit in arr
+}
+
+func newPathKey(p protoreflect.SourcePath) (k pathKey) {
+ if len(p) < len(k.arr) {
+ for i, ps := range p {
+ if ps < 0 || math.MaxUint8 <= ps {
+ return pathKey{str: p.String()}
+ }
+ k.arr[i] = uint8(ps)
+ }
+ k.arr[len(k.arr)-1] = uint8(len(p))
+ return k
+ }
+ return pathKey{str: p.String()}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go
new file mode 100644
index 0000000..8301c40
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go
@@ -0,0 +1,340 @@
+// Package sourceinfo provides the ability to register and query source code info
+// for file descriptors that are compiled into the binary. This data is registered
+// by code generated from the protoc-gen-gosrcinfo plugin.
+//
+// The standard descriptors bundled into the compiled binary are stripped of source
+// code info, to reduce binary size and reduce runtime memory footprint. However,
+// the source code info can be very handy and worth the size cost when used with
+// gRPC services and the server reflection service. Without source code info, the
+// descriptors that a client downloads from the reflection service have no comments.
+// But the presence of comments, and the ability to show them to humans, can greatly
+// improve the utility of user agents that use the reflection service.
+//
+// When the protoc-gen-gosrcinfo plugin is used, the desc.Load* methods, which load
+// descriptors for compiled-in elements, will automatically include source code
+// info, using the data registered with this package.
+//
+// In order to make the reflection service use this functionality, you will need to
+// be using v1.45 or higher of the Go runtime for gRPC (google.golang.org/grpc). The
+// following snippet demonstrates how to do this in your server. Do this instead of
+// using the reflection.Register function:
+//
+// refSvr := reflection.NewServer(reflection.ServerOptions{
+// Services: grpcServer,
+// DescriptorResolver: sourceinfo.GlobalFiles,
+// ExtensionResolver: sourceinfo.GlobalFiles,
+// })
+// grpc_reflection_v1alpha.RegisterServerReflectionServer(grpcServer, refSvr)
+package sourceinfo
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+var (
+ // GlobalFiles is a registry of descriptors that include source code info, if the
+ // files they belong to were processed with protoc-gen-gosrcinfo.
+ //
+ // If is mean to serve as a drop-in alternative to protoregistry.GlobalFiles that
+ // can include source code info in the returned descriptors.
+ GlobalFiles Resolver = registry{}
+
+ // GlobalTypes is a registry of descriptors that include source code info, if the
+ // files they belong to were processed with protoc-gen-gosrcinfo.
+ //
+ // If is mean to serve as a drop-in alternative to protoregistry.GlobalTypes that
+ // can include source code info in the returned descriptors.
+ GlobalTypes TypeResolver = registry{}
+
+ mu sync.RWMutex
+ sourceInfoByFile = map[string]*descriptorpb.SourceCodeInfo{}
+ fileDescriptors = map[protoreflect.FileDescriptor]protoreflect.FileDescriptor{}
+ updatedDescriptors filesWithFallback
+)
+
+// Resolver can resolve file names into file descriptors and also provides methods for
+// resolving extensions.
+type Resolver interface {
+ protodesc.Resolver
+ protoregistry.ExtensionTypeResolver
+ RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// NB: These interfaces are far from ideal. Ideally, Resolver would have
+// * EITHER been named FileResolver and not included the extension methods.
+// * OR also included message methods (i.e. embed protoregistry.MessageTypeResolver).
+// Now (since it's been released) we can't add the message methods to the interface as
+// that's not a backwards-compatible change. So we have to introduce the new interface
+// below, which is now a little confusing since it has some overlap with Resolver.
+
+// TypeResolver can resolve message names and URLs into message descriptors and also
+// provides methods for resolving extensions.
+type TypeResolver interface {
+ protoregistry.MessageTypeResolver
+ protoregistry.ExtensionTypeResolver
+ RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// RegisterSourceInfo registers the given source code info for the file descriptor
+// with the given path/name.
+//
+// This is automatically used from older generated code if using a previous release of
+// the protoc-gen-gosrcinfo plugin.
+func RegisterSourceInfo(file string, srcInfo *descriptorpb.SourceCodeInfo) {
+ mu.Lock()
+ defer mu.Unlock()
+ sourceInfoByFile[file] = srcInfo
+}
+
+// RegisterEncodedSourceInfo registers the given source code info, which is a serialized
+// and gzipped form of a google.protobuf.SourceCodeInfo message.
+//
+// This is automatically used from generated code if using the protoc-gen-gosrcinfo
+// plugin.
+func RegisterEncodedSourceInfo(file string, data []byte) error {
+ zipReader, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = zipReader.Close()
+ }()
+ unzipped, err := io.ReadAll(zipReader)
+ if err != nil {
+ return err
+ }
+ var srcInfo descriptorpb.SourceCodeInfo
+ if err := proto.Unmarshal(unzipped, &srcInfo); err != nil {
+ return err
+ }
+ RegisterSourceInfo(file, &srcInfo)
+ return nil
+}
+
+// SourceInfoForFile queries for any registered source code info for the file
+// descriptor with the given path/name. It returns nil if no source code info
+// was registered.
+func SourceInfoForFile(file string) *descriptorpb.SourceCodeInfo {
+ mu.RLock()
+ defer mu.RUnlock()
+ return sourceInfoByFile[file]
+}
+
+func canUpgrade(d protoreflect.Descriptor) bool {
+ if d == nil {
+ return false
+ }
+ fd := d.ParentFile()
+ if fd.SourceLocations().Len() > 0 {
+ // already has source info
+ return false
+ }
+ if genFile, err := protoregistry.GlobalFiles.FindFileByPath(fd.Path()); err != nil || genFile != fd {
+ // given descriptor is not from generated code
+ return false
+ }
+ return true
+}
+
+func getFile(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+ if !canUpgrade(fd) {
+ return fd, nil
+ }
+
+ mu.RLock()
+ result := fileDescriptors[fd]
+ mu.RUnlock()
+
+ if result != nil {
+ return result, nil
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+ result, err := getFileLocked(fd)
+ if err != nil {
+ return nil, fmt.Errorf("updating file %q: %w", fd.Path(), err)
+ }
+ return result, nil
+}
+
+func getFileLocked(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+ result := fileDescriptors[fd]
+ if result != nil {
+ return result, nil
+ }
+
+ // We have to build its dependencies, too, so that the descriptor's
+ // references *all* have source code info.
+ var deps []protoreflect.FileDescriptor
+ imps := fd.Imports()
+ for i, length := 0, imps.Len(); i < length; i++ {
+ origDep := imps.Get(i).FileDescriptor
+ updatedDep, err := getFileLocked(origDep)
+ if err != nil {
+ return nil, fmt.Errorf("updating import %q: %w", origDep.Path(), err)
+ }
+ if updatedDep != origDep && deps == nil {
+ // lazily init slice of deps and copy over deps before this one
+ deps = make([]protoreflect.FileDescriptor, i, length)
+ for j := 0; j < i; j++ {
+ deps[j] = imps.Get(i).FileDescriptor
+ }
+ }
+ if deps != nil {
+ deps = append(deps, updatedDep)
+ }
+ }
+
+ srcInfo := sourceInfoByFile[fd.Path()]
+ if len(srcInfo.GetLocation()) == 0 && len(deps) == 0 {
+ // nothing to do; don't bother changing
+ return fd, nil
+ }
+
+ // Add source code info and rebuild.
+ fdProto := protodesc.ToFileDescriptorProto(fd)
+ fdProto.SourceCodeInfo = srcInfo
+
+ result, err := protodesc.NewFile(fdProto, &updatedDescriptors)
+ if err != nil {
+ return nil, err
+ }
+ if err := updatedDescriptors.RegisterFile(result); err != nil {
+ return nil, fmt.Errorf("registering import %q: %w", result.Path(), err)
+ }
+
+ fileDescriptors[fd] = result
+ return result, nil
+}
+
+type registry struct{}
+
+var _ protodesc.Resolver = &registry{}
+
+func (r registry) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ fd, err := protoregistry.GlobalFiles.FindFileByPath(path)
+ if err != nil {
+ return nil, err
+ }
+ return getFile(fd)
+}
+
+func (r registry) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ d, err := protoregistry.GlobalFiles.FindDescriptorByName(name)
+ if err != nil {
+ return nil, err
+ }
+ if !canUpgrade(d) {
+ return d, nil
+ }
+ switch d := d.(type) {
+ case protoreflect.FileDescriptor:
+ return getFile(d)
+ case protoreflect.MessageDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.FieldDescriptor:
+ return updateField(d)
+ case protoreflect.OneofDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.EnumDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.EnumValueDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.ServiceDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.MethodDescriptor:
+ return updateDescriptor(d)
+ default:
+ return nil, fmt.Errorf("unrecognized descriptor type: %T", d)
+ }
+}
+
+func (r registry) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+ mt, err := protoregistry.GlobalTypes.FindMessageByName(message)
+ if err != nil {
+ return nil, err
+ }
+ msg, err := updateDescriptor(mt.Descriptor())
+ if err != nil {
+ return mt, nil
+ }
+ return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+func (r registry) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return nil, err
+ }
+ msg, err := updateDescriptor(mt.Descriptor())
+ if err != nil {
+ return mt, nil
+ }
+ return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+func (r registry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ xt, err := protoregistry.GlobalTypes.FindExtensionByName(field)
+ if err != nil {
+ return nil, err
+ }
+ ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+ if err != nil {
+ return xt, nil
+ }
+ return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+func (r registry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ xt, err := protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+ if err != nil {
+ return nil, err
+ }
+ ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+ if err != nil {
+ return xt, nil
+ }
+ return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+func (r registry) RangeExtensionsByMessage(message protoreflect.FullName, fn func(protoreflect.ExtensionType) bool) {
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(message, func(xt protoreflect.ExtensionType) bool {
+ ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+ if err != nil {
+ return fn(xt)
+ }
+ return fn(extensionType{ExtensionType: xt, extDesc: ext})
+ })
+}
+
+type filesWithFallback struct {
+ protoregistry.Files
+}
+
+func (f *filesWithFallback) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ fd, err := f.Files.FindFileByPath(path)
+ if errors.Is(err, protoregistry.NotFound) {
+ fd, err = protoregistry.GlobalFiles.FindFileByPath(path)
+ }
+ return fd, err
+}
+
+func (f *filesWithFallback) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ fd, err := f.Files.FindDescriptorByName(name)
+ if errors.Is(err, protoregistry.NotFound) {
+ fd, err = protoregistry.GlobalFiles.FindDescriptorByName(name)
+ }
+ return fd, err
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go
new file mode 100644
index 0000000..53bc457
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go
@@ -0,0 +1,314 @@
+package sourceinfo
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// AddSourceInfoToFile will return a new file descriptor that is a copy
+// of fd except that it includes source code info. If the given file
+// already contains source info, was not registered from generated code,
+// or was not processed with the protoc-gen-gosrcinfo plugin, then fd
+// is returned as is, unchanged.
+func AddSourceInfoToFile(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+ return getFile(fd)
+}
+
+// AddSourceInfoToMessage will return a new message descriptor that is a
+// copy of md except that it includes source code info. If the file that
+// contains the given message descriptor already contains source info,
+// was not registered from generated code, or was not processed with the
+// protoc-gen-gosrcinfo plugin, then md is returned as is, unchanged.
+func AddSourceInfoToMessage(md protoreflect.MessageDescriptor) (protoreflect.MessageDescriptor, error) {
+ return updateDescriptor(md)
+}
+
+// AddSourceInfoToEnum will return a new enum descriptor that is a copy
+// of ed except that it includes source code info. If the file that
+// contains the given enum descriptor already contains source info, was
+// not registered from generated code, or was not processed with the
+// protoc-gen-gosrcinfo plugin, then ed is returned as is, unchanged.
+func AddSourceInfoToEnum(ed protoreflect.EnumDescriptor) (protoreflect.EnumDescriptor, error) {
+ return updateDescriptor(ed)
+}
+
+// AddSourceInfoToService will return a new service descriptor that is
+// a copy of sd except that it includes source code info. If the file
+// that contains the given service descriptor already contains source
+// info, was not registered from generated code, or was not processed
+// with the protoc-gen-gosrcinfo plugin, then ed is returned as is,
+// unchanged.
+func AddSourceInfoToService(sd protoreflect.ServiceDescriptor) (protoreflect.ServiceDescriptor, error) {
+ return updateDescriptor(sd)
+}
+
+// AddSourceInfoToExtensionType will return a new extension type that
+// is a copy of xt except that its associated descriptors includes
+// source code info. If the file that contains the given extension
+// already contains source info, was not registered from generated
+// code, or was not processed with the protoc-gen-gosrcinfo plugin,
+// then xt is returned as is, unchanged.
+func AddSourceInfoToExtensionType(xt protoreflect.ExtensionType) (protoreflect.ExtensionType, error) {
+ if genType, err := protoregistry.GlobalTypes.FindExtensionByName(xt.TypeDescriptor().FullName()); err != nil || genType != xt {
+ return xt, nil // not from generated code
+ }
+ ext, err := updateField(xt.TypeDescriptor().Descriptor())
+ if err != nil {
+ return nil, err
+ }
+ return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+// AddSourceInfoToMessageType will return a new message type that
+// is a copy of mt except that its associated descriptors includes
+// source code info. If the file that contains the given message
+// already contains source info, was not registered from generated
+// code, or was not processed with the protoc-gen-gosrcinfo plugin,
+// then mt is returned as is, unchanged.
+func AddSourceInfoToMessageType(mt protoreflect.MessageType) (protoreflect.MessageType, error) {
+ if genType, err := protoregistry.GlobalTypes.FindMessageByName(mt.Descriptor().FullName()); err != nil || genType != mt {
+ return mt, nil // not from generated code
+ }
+ msg, err := updateDescriptor(mt.Descriptor())
+ if err != nil {
+ return nil, err
+ }
+ return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+// WrapFile is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToFile and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToFile directly instead. The word "wrap" is
+// a misnomer since this method does not actually wrap the given value.
+// Though unlikely, the operation can technically fail, so the recommended
+// function allows the return of an error instead of panic'ing.
+func WrapFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor {
+ result, err := AddSourceInfoToFile(fd)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapMessage is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToMessage and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToMessage directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapMessage(md protoreflect.MessageDescriptor) protoreflect.MessageDescriptor {
+ result, err := AddSourceInfoToMessage(md)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapEnum is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToEnum and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToEnum directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapEnum(ed protoreflect.EnumDescriptor) protoreflect.EnumDescriptor {
+ result, err := AddSourceInfoToEnum(ed)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapService is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToService and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToService directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapService(sd protoreflect.ServiceDescriptor) protoreflect.ServiceDescriptor {
+ result, err := AddSourceInfoToService(sd)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapExtensionType is present for backwards-compatibility reasons. It
+// calls AddSourceInfoToExtensionType and panics if that function
+// returns an error.
+//
+// Deprecated: Use AddSourceInfoToExtensionType directly instead. The
+// word "wrap" is a misnomer since this method does not actually wrap
+// the given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapExtensionType(xt protoreflect.ExtensionType) protoreflect.ExtensionType {
+ result, err := AddSourceInfoToExtensionType(xt)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapMessageType is present for backwards-compatibility reasons. It
+// calls AddSourceInfoToMessageType and panics if that function returns
+// an error.
+//
+// Deprecated: Use AddSourceInfoToMessageType directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail, so
+// the recommended function allows the return of an error instead of
+// panic'ing.
+func WrapMessageType(mt protoreflect.MessageType) protoreflect.MessageType {
+ result, err := AddSourceInfoToMessageType(mt)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+type extensionType struct {
+ protoreflect.ExtensionType
+ extDesc protoreflect.ExtensionDescriptor
+}
+
+func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
+ return extensionTypeDescriptor{ExtensionDescriptor: xt.extDesc, extType: xt.ExtensionType}
+}
+
+type extensionTypeDescriptor struct {
+ protoreflect.ExtensionDescriptor
+ extType protoreflect.ExtensionType
+}
+
+func (xtd extensionTypeDescriptor) Type() protoreflect.ExtensionType {
+ return extensionType{ExtensionType: xtd.extType, extDesc: xtd.ExtensionDescriptor}
+}
+
+func (xtd extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
+ return xtd.ExtensionDescriptor
+}
+
+type messageType struct {
+ protoreflect.MessageType
+ msgDesc protoreflect.MessageDescriptor
+}
+
+func (mt messageType) Descriptor() protoreflect.MessageDescriptor {
+ return mt.msgDesc
+}
+
+func updateField(fd protoreflect.FieldDescriptor) (protoreflect.FieldDescriptor, error) {
+ if xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor); ok {
+ ext, err := updateField(xtd.Descriptor())
+ if err != nil {
+ return nil, err
+ }
+ return extensionTypeDescriptor{ExtensionDescriptor: ext, extType: xtd.Type()}, nil
+ }
+ d, err := updateDescriptor(fd)
+ if err != nil {
+ return nil, err
+ }
+ return d.(protoreflect.FieldDescriptor), nil
+}
+
+func updateDescriptor[D protoreflect.Descriptor](d D) (D, error) {
+ updatedFile, err := getFile(d.ParentFile())
+ if err != nil {
+ var zero D
+ return zero, err
+ }
+ if updatedFile == d.ParentFile() {
+ // no change
+ return d, nil
+ }
+ updated := findDescriptor(updatedFile, d)
+ result, ok := updated.(D)
+ if !ok {
+ var zero D
+ return zero, fmt.Errorf("updated result is type %T which could not be converted to %T", updated, result)
+ }
+ return result, nil
+}
+
+func findDescriptor(fd protoreflect.FileDescriptor, d protoreflect.Descriptor) protoreflect.Descriptor {
+ if d == nil {
+ return nil
+ }
+ if _, isFile := d.(protoreflect.FileDescriptor); isFile {
+ return fd
+ }
+ if d.Parent() == nil {
+ return d
+ }
+ switch d := d.(type) {
+ case protoreflect.MessageDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(messageContainer)
+ return parent.Messages().Get(d.Index())
+ case protoreflect.FieldDescriptor:
+ if d.IsExtension() {
+ parent := findDescriptor(fd, d.Parent()).(extensionContainer)
+ return parent.Extensions().Get(d.Index())
+ } else {
+ parent := findDescriptor(fd, d.Parent()).(fieldContainer)
+ return parent.Fields().Get(d.Index())
+ }
+ case protoreflect.OneofDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(oneofContainer)
+ return parent.Oneofs().Get(d.Index())
+ case protoreflect.EnumDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(enumContainer)
+ return parent.Enums().Get(d.Index())
+ case protoreflect.EnumValueDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(enumValueContainer)
+ return parent.Values().Get(d.Index())
+ case protoreflect.ServiceDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(serviceContainer)
+ return parent.Services().Get(d.Index())
+ case protoreflect.MethodDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(methodContainer)
+ return parent.Methods().Get(d.Index())
+ }
+ return d
+}
+
+type messageContainer interface {
+ Messages() protoreflect.MessageDescriptors
+}
+
+type extensionContainer interface {
+ Extensions() protoreflect.ExtensionDescriptors
+}
+
+type fieldContainer interface {
+ Fields() protoreflect.FieldDescriptors
+}
+
+type oneofContainer interface {
+ Oneofs() protoreflect.OneofDescriptors
+}
+
+type enumContainer interface {
+ Enums() protoreflect.EnumDescriptors
+}
+
+type enumValueContainer interface {
+ Values() protoreflect.EnumValueDescriptors
+}
+
+type serviceContainer interface {
+ Services() protoreflect.ServiceDescriptors
+}
+
+type methodContainer interface {
+ Methods() protoreflect.MethodDescriptors
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/wrap.go b/vendor/github.com/jhump/protoreflect/desc/wrap.go
new file mode 100644
index 0000000..5491afd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/wrap.go
@@ -0,0 +1,211 @@
+package desc
+
+import (
+ "fmt"
+
+ "github.com/bufbuild/protocompile/protoutil"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DescriptorWrapper wraps a protoreflect.Descriptor. All of the Descriptor
+// implementations in this package implement this interface. This can be
+// used to recover the underlying descriptor. Each descriptor type in this
+// package also provides a strongly-typed form of this method, such as the
+// following method for *FileDescriptor:
+//
+// UnwrapFile() protoreflect.FileDescriptor
+type DescriptorWrapper interface {
+ Unwrap() protoreflect.Descriptor
+}
+
+// WrapDescriptor wraps the given descriptor, returning a desc.Descriptor
+// value that represents the same element.
+func WrapDescriptor(d protoreflect.Descriptor) (Descriptor, error) {
+ return wrapDescriptor(d, mapCache{})
+}
+
+func wrapDescriptor(d protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) {
+ switch d := d.(type) {
+ case protoreflect.FileDescriptor:
+ return wrapFile(d, cache)
+ case protoreflect.MessageDescriptor:
+ return wrapMessage(d, cache)
+ case protoreflect.FieldDescriptor:
+ return wrapField(d, cache)
+ case protoreflect.OneofDescriptor:
+ return wrapOneOf(d, cache)
+ case protoreflect.EnumDescriptor:
+ return wrapEnum(d, cache)
+ case protoreflect.EnumValueDescriptor:
+ return wrapEnumValue(d, cache)
+ case protoreflect.ServiceDescriptor:
+ return wrapService(d, cache)
+ case protoreflect.MethodDescriptor:
+ return wrapMethod(d, cache)
+ default:
+ return nil, fmt.Errorf("unknown descriptor type: %T", d)
+ }
+}
+
+// WrapFiles wraps the given file descriptors, returning a slice of *desc.FileDescriptor
+// values that represent the same files.
+func WrapFiles(d []protoreflect.FileDescriptor) ([]*FileDescriptor, error) {
+ cache := mapCache{}
+ results := make([]*FileDescriptor, len(d))
+ for i := range d {
+ var err error
+ results[i], err = wrapFile(d[i], cache)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return results, nil
+}
+
+// WrapFile wraps the given file descriptor, returning a *desc.FileDescriptor
+// value that represents the same file.
+func WrapFile(d protoreflect.FileDescriptor) (*FileDescriptor, error) {
+ return wrapFile(d, mapCache{})
+}
+
+func wrapFile(d protoreflect.FileDescriptor, cache descriptorCache) (*FileDescriptor, error) {
+ if res := cache.get(d); res != nil {
+ return res.(*FileDescriptor), nil
+ }
+ fdp := protoutil.ProtoFromFileDescriptor(d)
+ return convertFile(d, fdp, cache)
+}
+
+// WrapMessage wraps the given message descriptor, returning a *desc.MessageDescriptor
+// value that represents the same message.
+func WrapMessage(d protoreflect.MessageDescriptor) (*MessageDescriptor, error) {
+ return wrapMessage(d, mapCache{})
+}
+
+func wrapMessage(d protoreflect.MessageDescriptor, cache descriptorCache) (*MessageDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ switch p := parent.(type) {
+ case *FileDescriptor:
+ return p.messages[d.Index()], nil
+ case *MessageDescriptor:
+ return p.nested[d.Index()], nil
+ default:
+ return nil, fmt.Errorf("message has unexpected parent type: %T", parent)
+ }
+}
+
+// WrapField wraps the given field descriptor, returning a *desc.FieldDescriptor
+// value that represents the same field.
+func WrapField(d protoreflect.FieldDescriptor) (*FieldDescriptor, error) {
+ return wrapField(d, mapCache{})
+}
+
+func wrapField(d protoreflect.FieldDescriptor, cache descriptorCache) (*FieldDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ switch p := parent.(type) {
+ case *FileDescriptor:
+ return p.extensions[d.Index()], nil
+ case *MessageDescriptor:
+ if d.IsExtension() {
+ return p.extensions[d.Index()], nil
+ }
+ return p.fields[d.Index()], nil
+ default:
+ return nil, fmt.Errorf("field has unexpected parent type: %T", parent)
+ }
+}
+
+// WrapOneOf wraps the given oneof descriptor, returning a *desc.OneOfDescriptor
+// value that represents the same oneof.
+func WrapOneOf(d protoreflect.OneofDescriptor) (*OneOfDescriptor, error) {
+ return wrapOneOf(d, mapCache{})
+}
+
+func wrapOneOf(d protoreflect.OneofDescriptor, cache descriptorCache) (*OneOfDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := parent.(*MessageDescriptor); ok {
+ return p.oneOfs[d.Index()], nil
+ }
+ return nil, fmt.Errorf("oneof has unexpected parent type: %T", parent)
+}
+
+// WrapEnum wraps the given enum descriptor, returning a *desc.EnumDescriptor
+// value that represents the same enum.
+func WrapEnum(d protoreflect.EnumDescriptor) (*EnumDescriptor, error) {
+ return wrapEnum(d, mapCache{})
+}
+
+func wrapEnum(d protoreflect.EnumDescriptor, cache descriptorCache) (*EnumDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ switch p := parent.(type) {
+ case *FileDescriptor:
+ return p.enums[d.Index()], nil
+ case *MessageDescriptor:
+ return p.enums[d.Index()], nil
+ default:
+ return nil, fmt.Errorf("enum has unexpected parent type: %T", parent)
+ }
+}
+
+// WrapEnumValue wraps the given enum value descriptor, returning a *desc.EnumValueDescriptor
+// value that represents the same enum value.
+func WrapEnumValue(d protoreflect.EnumValueDescriptor) (*EnumValueDescriptor, error) {
+ return wrapEnumValue(d, mapCache{})
+}
+
+func wrapEnumValue(d protoreflect.EnumValueDescriptor, cache descriptorCache) (*EnumValueDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := parent.(*EnumDescriptor); ok {
+ return p.values[d.Index()], nil
+ }
+ return nil, fmt.Errorf("enum value has unexpected parent type: %T", parent)
+}
+
+// WrapService wraps the given service descriptor, returning a *desc.ServiceDescriptor
+// value that represents the same service.
+func WrapService(d protoreflect.ServiceDescriptor) (*ServiceDescriptor, error) {
+ return wrapService(d, mapCache{})
+}
+
+func wrapService(d protoreflect.ServiceDescriptor, cache descriptorCache) (*ServiceDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := parent.(*FileDescriptor); ok {
+ return p.services[d.Index()], nil
+ }
+ return nil, fmt.Errorf("service has unexpected parent type: %T", parent)
+}
+
+// WrapMethod wraps the given method descriptor, returning a *desc.MethodDescriptor
+// value that represents the same method.
+func WrapMethod(d protoreflect.MethodDescriptor) (*MethodDescriptor, error) {
+ return wrapMethod(d, mapCache{})
+}
+
+func wrapMethod(d protoreflect.MethodDescriptor, cache descriptorCache) (*MethodDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := parent.(*ServiceDescriptor); ok {
+ return p.methods[d.Index()], nil
+ }
+ return nil, fmt.Errorf("method has unexpected parent type: %T", parent)
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
new file mode 100644
index 0000000..39e077a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -0,0 +1,193 @@
+package dynamic
+
+// Binary serialization and de-serialization for dynamic messages
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/codec"
+)
+
+// defaultDeterminism, if true, will mean that calls to Marshal will produce
+// deterministic output. This is used to make the output of proto.Marshal(...)
+// deterministic (since there is no way to have that convey determinism intent).
+// **This is only used from tests.**
+var defaultDeterminism = false
+
+// Marshal serializes this message to bytes, returning an error if the operation
+// fails. The resulting bytes are in the standard protocol buffer binary format.
+func (m *Message) Marshal() ([]byte, error) {
+ var b codec.Buffer
+ b.SetDeterministic(defaultDeterminism)
+ if err := m.marshal(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// MarshalAppend behaves exactly the same as Marshal, except instead of allocating a
+// new byte slice to marshal into, it uses the provided byte slice. The backing array
+// for the returned byte slice *may* be the same as the one that was passed in, but
+// it's not guaranteed as a new backing array will automatically be allocated if
+// more bytes need to be written than the provided buffer has capacity for.
+func (m *Message) MarshalAppend(b []byte) ([]byte, error) {
+ codedBuf := codec.NewBuffer(b)
+ codedBuf.SetDeterministic(defaultDeterminism)
+ if err := m.marshal(codedBuf); err != nil {
+ return nil, err
+ }
+ return codedBuf.Bytes(), nil
+}
+
+// MarshalDeterministic serializes this message to bytes in a deterministic way,
+// returning an error if the operation fails. This differs from Marshal in that
+// map keys will be sorted before serializing to bytes. The protobuf spec does
+// not define ordering for map entries, so Marshal will use standard Go map
+// iteration order (which will be random). But for cases where determinism is
+// more important than performance, use this method instead.
+func (m *Message) MarshalDeterministic() ([]byte, error) {
+ var b codec.Buffer
+ b.SetDeterministic(true)
+ if err := m.marshal(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// MarshalAppendDeterministic behaves exactly the same as MarshalDeterministic,
+// except instead of allocating a new byte slice to marshal into, it uses the
+// provided byte slice. The backing array for the returned byte slice *may* be
+// the same as the one that was passed in, but it's not guaranteed as a new
+// backing array will automatically be allocated if more bytes need to be written
+// than the provided buffer has capacity for.
+func (m *Message) MarshalAppendDeterministic(b []byte) ([]byte, error) {
+ codedBuf := codec.NewBuffer(b)
+ codedBuf.SetDeterministic(true)
+ if err := m.marshal(codedBuf); err != nil {
+ return nil, err
+ }
+ return codedBuf.Bytes(), nil
+}
+
+func (m *Message) marshal(b *codec.Buffer) error {
+ if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() {
+ return fmt.Errorf("%s is a message set; marshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName())
+ }
+ if err := m.marshalKnownFields(b); err != nil {
+ return err
+ }
+ return m.marshalUnknownFields(b)
+}
+
+func (m *Message) marshalKnownFields(b *codec.Buffer) error {
+ for _, tag := range m.knownFieldTags() {
+ itag := int32(tag)
+ val := m.values[itag]
+ fd := m.FindFieldDescriptor(itag)
+ if fd == nil {
+ panic(fmt.Sprintf("Couldn't find field for tag %d", itag))
+ }
+ if err := b.EncodeFieldValue(fd, val); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *Message) marshalUnknownFields(b *codec.Buffer) error {
+ for _, tag := range m.unknownFieldTags() {
+ itag := int32(tag)
+ sl := m.unknownFields[itag]
+ for _, u := range sl {
+ if err := b.EncodeTagAndWireType(itag, u.Encoding); err != nil {
+ return err
+ }
+ switch u.Encoding {
+ case proto.WireBytes:
+ if err := b.EncodeRawBytes(u.Contents); err != nil {
+ return err
+ }
+ case proto.WireStartGroup:
+ _, _ = b.Write(u.Contents)
+ if err := b.EncodeTagAndWireType(itag, proto.WireEndGroup); err != nil {
+ return err
+ }
+ case proto.WireFixed32:
+ if err := b.EncodeFixed32(u.Value); err != nil {
+ return err
+ }
+ case proto.WireFixed64:
+ if err := b.EncodeFixed64(u.Value); err != nil {
+ return err
+ }
+ case proto.WireVarint:
+ if err := b.EncodeVarint(u.Value); err != nil {
+ return err
+ }
+ default:
+ return codec.ErrBadWireType
+ }
+ }
+ }
+ return nil
+}
+
+// Unmarshal de-serializes the message that is present in the given bytes into
+// this message. It first resets the current message. It returns an error if the
+// given bytes do not contain a valid encoding of this message type.
+func (m *Message) Unmarshal(b []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMerge(b); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMerge de-serializes the message that is present in the given bytes
+// into this message. Unlike Unmarshal, it does not first reset the message,
+// instead merging the data in the given bytes into the existing data in this
+// message.
+func (m *Message) UnmarshalMerge(b []byte) error {
+ return m.unmarshal(codec.NewBuffer(b), false)
+}
+
+func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error {
+ if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() {
+ return fmt.Errorf("%s is a message set; unmarshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName())
+ }
+ for !buf.EOF() {
+ fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf)
+ if err != nil {
+ if err == codec.ErrWireTypeEndGroup {
+ if isGroup {
+ // finished parsing group
+ return nil
+ }
+ return codec.ErrBadWireType
+ }
+ return err
+ }
+
+ if fd == nil {
+ if m.unknownFields == nil {
+ m.unknownFields = map[int32][]UnknownField{}
+ }
+ uv := val.(codec.UnknownField)
+ u := UnknownField{
+ Encoding: uv.Encoding,
+ Value: uv.Value,
+ Contents: uv.Contents,
+ }
+ m.unknownFields[uv.Tag] = append(m.unknownFields[uv.Tag], u)
+ } else if err := mergeField(m, fd, val); err != nil {
+ return err
+ }
+ }
+ if isGroup {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
new file mode 100644
index 0000000..59b77eb
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -0,0 +1,167 @@
+// Package dynamic provides an implementation for a dynamic protobuf message.
+//
+// The dynamic message is essentially a message descriptor along with a map of
+// tag numbers to values. It has a broad API for interacting with the message,
+// including inspection and modification. Generally, most operations have two
+// forms: a regular method that panics on bad input or error and a "Try" form
+// of the method that will instead return an error.
+//
+// A dynamic message can optionally be constructed with a MessageFactory. The
+// MessageFactory has various registries that may be used by the dynamic message,
+// such as during de-serialization. The message factory is "inherited" by any
+// other dynamic messages created, such as nested messages that are created
+// during de-serialization. Similarly, any dynamic message created using
+// MessageFactory.NewMessage will be associated with that factory, which in turn
+// will be used to create other messages or parse extension fields during
+// de-serialization.
+//
+// # Field Types
+//
+// The types of values expected by setters and returned by getters are the
+// same as protoc generates for scalar fields. For repeated fields, there are
+// methods for getting and setting values at a particular index or for adding
+// an element. Similarly, for map fields, there are methods for getting and
+// setting values for a particular key.
+//
+// If you use GetField for a repeated field, it will return a copy of all
+// elements as a slice []interface{}. Similarly, using GetField for a map field
+// will return a copy of all mappings as a map[interface{}]interface{}. You can
+// also use SetField to supply an entire slice or map for repeated or map fields.
+// The slice need not be []interface{} but can actually be typed according to
+// the field's expected type. For example, a repeated uint64 field can be set
+// using a slice of type []uint64.
+//
+// Descriptors for map fields describe them as repeated fields with a nested
+// message type. The nested message type is a special generated type that
+// represents a single mapping: key and value pair. The dynamic message has some
+// special affordances for this representation. For example, you can use
+// SetField to set a map field using a slice of these entry messages. Internally,
+// the slice of entries will be converted to an actual map. Similarly, you can
+// use AddRepeatedField with an entry message to add (or overwrite) a mapping.
+// However, you cannot use GetRepeatedField or SetRepeatedField to modify maps,
+// since those take numeric index arguments which are not relevant to maps
+// (since maps in Go have no defined ordering).
+//
+// When setting field values in dynamic messages, the type-checking is lenient
+// in that it accepts any named type with the right kind. So a string field can
+// be assigned to any type that is defined as a string. Enum fields require
+// int32 values (or any type that is defined as an int32).
+//
+// Unlike normal use of numeric values in Go, values will be automatically
+// widened when assigned. So, for example, an int64 field can be set using an
+// int32 value since it can be safely widened without truncation or loss of
+// precision. Similar goes for uint32 values being converted to uint64 and
+// float32 being converted to float64. Narrowing conversions are not done,
+// however. Also, unsigned values will never be automatically converted to
+// signed (and vice versa), and floating point values will never be
+// automatically converted to integral values (and vice versa). Since the bit
+// width of int and uint fields is allowed to be platform dependent, but will
+// always be less than or equal to 64, they can only be used as values for
+// int64 and uint64 fields, respectively. They cannot be used to set int32 or
+// uint32 fields, which includes enums fields.
+//
+// Fields whose type is a nested message can have values set to either other
+// dynamic messages or generated messages (e.g. pointers to structs generated by
+// protoc). Getting a value for such a field will return the actual type it is
+// set to (e.g. either a dynamic message or a generated message). If the value
+// is not set and the message uses proto2 syntax, the default message returned
+// will be whatever is returned by the dynamic message's MessageFactory (if the
+// dynamic message was not created with a factory, it will use the logic of the
+// zero value factory). In most typical cases, it will return a dynamic message,
+// but if the factory is configured with a KnownTypeRegistry, or if the field's
+// type is a well-known type, it will return a zero value generated message.
+//
+// # Unrecognized Fields
+//
+// Unrecognized fields are preserved by the dynamic message when unmarshaling
+// from the standard binary format. If the message's MessageFactory was
+// configured with an ExtensionRegistry, it will be used to identify and parse
+// extension fields for the message.
+//
+// Unrecognized fields can dynamically become recognized fields if the
+// application attempts to retrieve an unrecognized field's value using a
+// FieldDescriptor. In this case, the given FieldDescriptor is used to parse the
+// unknown field and move the parsed value into the message's set of known
+// fields. This behavior is most suited to the use of extensions, where an
+// ExtensionRegistry is not setup with all known extensions ahead of time. But
+// it can even happen for non-extension fields! Here's an example scenario where
+// a non-extension field can initially be unknown and become known:
+//
+// 1. A dynamic message is created with a descriptor, A, and then
+// de-serialized from a stream of bytes. The stream includes an
+// unrecognized tag T. The message will include tag T in its unrecognized
+// field set.
+// 2. Another call site retrieves a newer descriptor, A', which includes a
+// newly added field with tag T.
+// 3. That other call site then uses a FieldDescriptor to access the value of
+// the new field. This will cause the dynamic message to parse the bytes
+// for the unknown tag T and store them as a known field.
+// 4. Subsequent operations for tag T, including setting the field using only
+// tag number or de-serializing a stream that includes tag T, will operate
+// as if that tag were part of the original descriptor, A.
+//
+// # Compatibility
+//
+// In addition to implementing the proto.Message interface, the included
+// Message type also provides an XXX_MessageName() method, so it can work with
+// proto.MessageName. And it provides a Descriptor() method that behaves just
+// like the method of the same signature in messages generated by protoc.
+// Because of this, it is actually compatible with proto.Message in many (though
+// not all) contexts. In particular, it is compatible with proto.Marshal and
+// proto.Unmarshal for serializing and de-serializing messages.
+//
+// The dynamic message supports binary and text marshaling, using protobuf's
+// well-defined binary format and the same text format that protoc-generated
+// types use. It also supports JSON serialization/de-serialization by
+// implementing the json.Marshaler and json.Unmarshaler interfaces. And dynamic
+// messages can safely be used with the jsonpb package for JSON serialization
+// and de-serialization.
+//
+// In addition to implementing the proto.Message interface and numerous related
+// methods, it also provides inter-op with generated messages via conversion.
+// The ConvertTo, ConvertFrom, MergeInto, and MergeFrom methods copy message
+// contents from a dynamic message to a generated message and vice versa.
+//
+// When copying from a generated message into a dynamic message, if the
+// generated message contains fields unknown to the dynamic message (e.g. not
+// present in the descriptor used to create the dynamic message), these fields
+// become known to the dynamic message (as per behavior described above in
+// "Unrecognized Fields"). If the generated message has unrecognized fields of
+// its own, including unrecognized extensions, they are preserved in the dynamic
+// message. It is possible that the dynamic message knows about fields that the
+// generated message did not, like if it has a different version of the
+// descriptor or its MessageFactory has an ExtensionRegistry that knows about
+// different extensions than were linked into the program. In this case, these
+// unrecognized fields in the generated message will be known fields in the
+// dynamic message.
+//
+// Similarly, when copying from a dynamic message into a generated message, if
+// the dynamic message has unrecognized fields they can be preserved in the
+// generated message (currently only for syntax proto2 since proto3 generated
+// messages do not preserve unrecognized fields). If the generated message knows
+// about fields that the dynamic message does not, these unrecognized fields may
+// become known fields in the generated message.
+//
+// # Registries
+//
+// This package also contains a couple of registries, for managing known types
+// and descriptors.
+//
+// The KnownTypeRegistry allows de-serialization of a dynamic message to use
+// generated message types, instead of dynamic messages, for some kinds of
+// nested message fields. This is particularly useful for working with proto
+// messages that have special encodings as JSON (e.g. the well-known types),
+// since the dynamic message does not try to handle these special cases in its
+// JSON marshaling facilities.
+//
+// The ExtensionRegistry allows for recognizing and parsing extensions fields
+// (for proto2 messages).
+//
+// Deprecated: This module was created for use with the older "v1" Protobuf API
+// in github.com/golang/protobuf. However, much of this module is no longer
+// necessary as the newer "v2" API in google.golang.org/protobuf provides similar
+// capabilities. Instead of using this github.com/jhump/protoreflect/dynamic package,
+// see [google.golang.org/protobuf/types/dynamicpb].
+//
+// [google.golang.org/protobuf/types/dynamicpb]: https://pkg.go.dev/google.golang.org/protobuf/types/dynamicpb
+package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
new file mode 100644
index 0000000..ff136b0
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -0,0 +1,2830 @@
+package dynamic
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+ protov2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/codec"
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/internal"
+)
+
+// ErrUnknownTagNumber is an error that is returned when an operation refers
+// to an unknown tag number.
+var ErrUnknownTagNumber = errors.New("unknown tag number")
+
+// UnknownTagNumberError is the same as ErrUnknownTagNumber.
+// Deprecated: use ErrUnknownTagNumber
+var UnknownTagNumberError = ErrUnknownTagNumber
+
+// ErrUnknownFieldName is an error that is returned when an operation refers
+// to an unknown field name.
+var ErrUnknownFieldName = errors.New("unknown field name")
+
+// UnknownFieldNameError is the same as ErrUnknownFieldName.
+// Deprecated: use ErrUnknownFieldName
+var UnknownFieldNameError = ErrUnknownFieldName
+
+// ErrFieldIsNotMap is an error that is returned when map-related operations
+// are attempted with fields that are not maps.
+var ErrFieldIsNotMap = errors.New("field is not a map type")
+
+// FieldIsNotMapError is the same as ErrFieldIsNotMap.
+// Deprecated: use ErrFieldIsNotMap
+var FieldIsNotMapError = ErrFieldIsNotMap
+
+// ErrFieldIsNotRepeated is an error that is returned when repeated field
+// operations are attempted with fields that are not repeated.
+var ErrFieldIsNotRepeated = errors.New("field is not repeated")
+
+// FieldIsNotRepeatedError is the same as ErrFieldIsNotRepeated.
+// Deprecated: use ErrFieldIsNotRepeated
+var FieldIsNotRepeatedError = ErrFieldIsNotRepeated
+
+// ErrIndexOutOfRange is an error that is returned when an invalid index is
+// provided when access a single element of a repeated field.
+var ErrIndexOutOfRange = errors.New("index is out of range")
+
+// IndexOutOfRangeError is the same as ErrIndexOutOfRange.
+// Deprecated: use ErrIndexOutOfRange
+var IndexOutOfRangeError = ErrIndexOutOfRange
+
+// ErrNumericOverflow is an error returned by operations that encounter a
+// numeric value that is too large, for example de-serializing a value into an
+// int32 field when the value is larger that can fit into a 32-bit value.
+var ErrNumericOverflow = errors.New("numeric value is out of range")
+
+// NumericOverflowError is the same as ErrNumericOverflow.
+// Deprecated: use ErrNumericOverflow
+var NumericOverflowError = ErrNumericOverflow
+
+var typeOfProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+var typeOfDynamicMessage = reflect.TypeOf((*Message)(nil))
+var typeOfBytes = reflect.TypeOf(([]byte)(nil))
+
+// Message is a dynamic protobuf message. Instead of a generated struct,
+// like most protobuf messages, this is a map of field number to values and
+// a message descriptor, which is used to validate the field values and
+// also to de-serialize messages (from the standard binary format, as well
+// as from the text format and from JSON).
+type Message struct {
+ md *desc.MessageDescriptor
+ er *ExtensionRegistry
+ mf *MessageFactory
+ extraFields map[int32]*desc.FieldDescriptor
+ values map[int32]interface{}
+ unknownFields map[int32][]UnknownField
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+ // Encoding indicates how the unknown field was encoded on the wire. If it
+ // is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+ // the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+ // significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+ // Value.
+ Encoding int8
+ Contents []byte
+ Value uint64
+}
+
+// NewMessage creates a new dynamic message for the type represented by the given
+// message descriptor. During de-serialization, a default MessageFactory is used to
+// instantiate any nested message fields and no extension fields will be parsed. To
+// use a custom MessageFactory or ExtensionRegistry, use MessageFactory.NewMessage.
+func NewMessage(md *desc.MessageDescriptor) *Message {
+ return NewMessageWithMessageFactory(md, nil)
+}
+
+// NewMessageWithExtensionRegistry creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// ExtensionRegistry is used to parse extension fields and nested messages will be
+// instantiated using dynamic.NewMessageFactoryWithExtensionRegistry(er).
+func NewMessageWithExtensionRegistry(md *desc.MessageDescriptor, er *ExtensionRegistry) *Message {
+ mf := NewMessageFactoryWithExtensionRegistry(er)
+ return NewMessageWithMessageFactory(md, mf)
+}
+
+// NewMessageWithMessageFactory creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// MessageFactory is used to instantiate nested messages.
+func NewMessageWithMessageFactory(md *desc.MessageDescriptor, mf *MessageFactory) *Message {
+ var er *ExtensionRegistry
+ if mf != nil {
+ er = mf.er
+ }
+ return &Message{
+ md: md,
+ mf: mf,
+ er: er,
+ }
+}
+
+// AsDynamicMessage converts the given message to a dynamic message. If the
+// given message is dynamic, it is returned. Otherwise, a dynamic message is
+// created using NewMessage.
+func AsDynamicMessage(msg proto.Message) (*Message, error) {
+ return AsDynamicMessageWithMessageFactory(msg, nil)
+}
+
+// AsDynamicMessageWithExtensionRegistry converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithExtensionRegistry.
+func AsDynamicMessageWithExtensionRegistry(msg proto.Message, er *ExtensionRegistry) (*Message, error) {
+ mf := NewMessageFactoryWithExtensionRegistry(er)
+ return AsDynamicMessageWithMessageFactory(msg, mf)
+}
+
+// AsDynamicMessageWithMessageFactory converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithMessageFactory.
+func AsDynamicMessageWithMessageFactory(msg proto.Message, mf *MessageFactory) (*Message, error) {
+ if dm, ok := msg.(*Message); ok {
+ return dm, nil
+ }
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return nil, err
+ }
+ dm := NewMessageWithMessageFactory(md, mf)
+ err = dm.mergeFrom(msg)
+ if err != nil {
+ return nil, err
+ }
+ return dm, nil
+}
+
+// GetMessageDescriptor returns a descriptor for this message's type.
+func (m *Message) GetMessageDescriptor() *desc.MessageDescriptor {
+ return m.md
+}
+
+// GetKnownFields returns a slice of descriptors for all known fields. The
+// fields will not be in any defined order.
+func (m *Message) GetKnownFields() []*desc.FieldDescriptor {
+ if len(m.extraFields) == 0 {
+ return m.md.GetFields()
+ }
+ flds := make([]*desc.FieldDescriptor, len(m.md.GetFields()), len(m.md.GetFields())+len(m.extraFields))
+ copy(flds, m.md.GetFields())
+ for _, fld := range m.extraFields {
+ if !fld.IsExtension() {
+ flds = append(flds, fld)
+ }
+ }
+ return flds
+}
+
+// GetKnownExtensions returns a slice of descriptors for all extensions known by
+// the message's extension registry. The fields will not be in any defined order.
+func (m *Message) GetKnownExtensions() []*desc.FieldDescriptor {
+ if !m.md.IsExtendable() {
+ return nil
+ }
+ exts := m.er.AllExtensionsForType(m.md.GetFullyQualifiedName())
+ for _, fld := range m.extraFields {
+ if fld.IsExtension() {
+ exts = append(exts, fld)
+ }
+ }
+ return exts
+}
+
+// GetUnknownFields returns a slice of tag numbers for all unknown fields that
+// this message contains. The tags will not be in any defined order.
+func (m *Message) GetUnknownFields() []int32 {
+ flds := make([]int32, 0, len(m.unknownFields))
+ for tag := range m.unknownFields {
+ flds = append(flds, tag)
+ }
+ return flds
+}
+
+// Descriptor returns the serialized form of the file descriptor in which the
+// message was defined and a path to the message type therein. This mimics the
+// method of the same name on message types generated by protoc.
+func (m *Message) Descriptor() ([]byte, []int) {
+ // get encoded file descriptor
+ b, err := proto.Marshal(m.md.GetFile().AsProto())
+ if err != nil {
+ panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+ var zippedBytes bytes.Buffer
+ w := gzip.NewWriter(&zippedBytes)
+ if _, err := w.Write(b); err != nil {
+ panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+ if err := w.Close(); err != nil {
+ panic(fmt.Sprintf("failed to get an encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+ }
+
+ // and path to message
+ path := []int{}
+ var d desc.Descriptor
+ name := m.md.GetFullyQualifiedName()
+ for d = m.md.GetParent(); d != nil; name, d = d.GetFullyQualifiedName(), d.GetParent() {
+ found := false
+ switch d := d.(type) {
+ case (*desc.FileDescriptor):
+ for i, md := range d.GetMessageTypes() {
+ if md.GetFullyQualifiedName() == name {
+ found = true
+ path = append(path, i)
+ }
+ }
+ case (*desc.MessageDescriptor):
+ for i, md := range d.GetNestedMessageTypes() {
+ if md.GetFullyQualifiedName() == name {
+ found = true
+ path = append(path, i)
+ }
+ }
+ }
+ if !found {
+ panic(fmt.Sprintf("failed to compute descriptor path for %s", m.md.GetFullyQualifiedName()))
+ }
+ }
+ // reverse the path
+ i := 0
+ j := len(path) - 1
+ for i < j {
+ path[i], path[j] = path[j], path[i]
+ i++
+ j--
+ }
+
+ return zippedBytes.Bytes(), path
+}
+
+// XXX_MessageName returns the fully qualified name of this message's type. This
+// allows dynamic messages to be used with proto.MessageName.
+func (m *Message) XXX_MessageName() string {
+ return m.md.GetFullyQualifiedName()
+}
+
+// FindFieldDescriptor returns a field descriptor for the given tag number. This
+// searches known fields in the descriptor, known fields discovered during calls
+// to GetField or SetField, and extension fields known by the message's extension
+// registry. It returns nil if the tag is unknown.
+func (m *Message) FindFieldDescriptor(tagNumber int32) *desc.FieldDescriptor {
+ fd := m.md.FindFieldByNumber(tagNumber)
+ if fd != nil {
+ return fd
+ }
+ fd = m.er.FindExtension(m.md.GetFullyQualifiedName(), tagNumber)
+ if fd != nil {
+ return fd
+ }
+ return m.extraFields[tagNumber]
+}
+
+// FindFieldDescriptorByName returns a field descriptor for the given field
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. It returns nil if the name is unknown. If the
+// given name refers to an extension, it should be fully qualified and may be
+// optionally enclosed in parentheses or brackets.
+func (m *Message) FindFieldDescriptorByName(name string) *desc.FieldDescriptor {
+ if name == "" {
+ return nil
+ }
+ fd := m.md.FindFieldByName(name)
+ if fd != nil {
+ return fd
+ }
+ mustBeExt := false
+ if name[0] == '(' {
+ if name[len(name)-1] != ')' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ } else if name[0] == '[' {
+ if name[len(name)-1] != ']' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ }
+ fd = m.er.FindExtensionByName(m.md.GetFullyQualifiedName(), name)
+ if fd != nil {
+ return fd
+ }
+ for _, fd := range m.extraFields {
+ if fd.IsExtension() && name == fd.GetFullyQualifiedName() {
+ return fd
+ } else if !mustBeExt && !fd.IsExtension() && name == fd.GetName() {
+ return fd
+ }
+ }
+
+ return nil
+}
+
+// FindFieldDescriptorByJSONName returns a field descriptor for the given JSON
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. If no field matches the given JSON name, it
+// will fall back to searching field names (e.g. FindFieldDescriptorByName). If
+// this also yields no match, nil is returned.
+func (m *Message) FindFieldDescriptorByJSONName(name string) *desc.FieldDescriptor {
+ if name == "" {
+ return nil
+ }
+ fd := m.md.FindFieldByJSONName(name)
+ if fd != nil {
+ return fd
+ }
+ mustBeExt := false
+ if name[0] == '(' {
+ if name[len(name)-1] != ')' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ } else if name[0] == '[' {
+ if name[len(name)-1] != ']' {
+ // malformed name
+ return nil
+ }
+ mustBeExt = true
+ name = name[1 : len(name)-1]
+ }
+ fd = m.er.FindExtensionByJSONName(m.md.GetFullyQualifiedName(), name)
+ if fd != nil {
+ return fd
+ }
+ for _, fd := range m.extraFields {
+ if fd.IsExtension() && name == fd.GetFullyQualifiedJSONName() {
+ return fd
+ } else if !mustBeExt && !fd.IsExtension() && name == fd.GetJSONName() {
+ return fd
+ }
+ }
+
+ // try non-JSON names
+ return m.FindFieldDescriptorByName(name)
+}
+
+func (m *Message) checkField(fd *desc.FieldDescriptor) error {
+ return checkField(fd, m.md)
+}
+
+func checkField(fd *desc.FieldDescriptor, md *desc.MessageDescriptor) error {
+ if fd.GetOwner().GetFullyQualifiedName() != md.GetFullyQualifiedName() {
+ return fmt.Errorf("given field, %s, is for wrong message type: %s; expecting %s", fd.GetName(), fd.GetOwner().GetFullyQualifiedName(), md.GetFullyQualifiedName())
+ }
+ if fd.IsExtension() && !md.IsExtension(fd.GetNumber()) {
+ return fmt.Errorf("given field, %s, is an extension but is not in message extension range: %v", fd.GetFullyQualifiedName(), md.GetExtensionRanges())
+ }
+ return nil
+}
+
+// GetField returns the value for the given field descriptor. It panics if an
+// error is encountered. See TryGetField.
+func (m *Message) GetField(fd *desc.FieldDescriptor) interface{} {
+ if v, err := m.TryGetField(fd); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetField returns the value for the given field descriptor. An error is
+// returned if the given field descriptor does not belong to the right message
+// type.
+//
+// The Go type of the returned value, for scalar fields, is the same as protoc
+// would generate for the field (in a non-dynamic message). The table below
+// lists the scalar types and the corresponding Go types.
+//
+// +-------------------------+-----------+
+// | Declared Type | Go Type |
+// +-------------------------+-----------+
+// | int32, sint32, sfixed32 | int32 |
+// | int64, sint64, sfixed64 | int64 |
+// | uint32, fixed32 | uint32 |
+// | uint64, fixed64 | uint64 |
+// | float | float32 |
+// | double | double32 |
+// | bool | bool |
+// | string | string |
+// | bytes | []byte |
+// +-------------------------+-----------+
+//
+// Values for enum fields will always be int32 values. You can use the enum
+// descriptor associated with the field to lookup value names with those values.
+// Values for message type fields may be an instance of the generated type *or*
+// may be another *dynamic.Message that represents the type.
+//
+// If the given field is a map field, the returned type will be
+// map[interface{}]interface{}. The actual concrete types of keys and values is
+// as described above. If the given field is a (non-map) repeated field, the
+// returned type is always []interface{}; the type of the actual elements is as
+// described above.
+//
+// If this message has no value for the given field, its default value is
+// returned. If the message is defined in a file with "proto3" syntax, the
+// default is always the zero value for the field. The default value for map and
+// repeated fields is a nil map or slice (respectively). For field's whose types
+// is a message, the default value is an empty message for "proto2" syntax or a
+// nil message for "proto3" syntax. Note that the in the latter case, a non-nil
+// interface with a nil pointer is returned, not a nil interface. Also note that
+// whether the returned value is an empty message or nil depends on if *this*
+// message was defined as "proto3" syntax, not the message type referred to by
+// the field's type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be returned, or an error will
+// be returned if the unknown value cannot be parsed according to the field
+// descriptor's type information.
+func (m *Message) TryGetField(fd *desc.FieldDescriptor) (interface{}, error) {
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getField(fd)
+}
+
+// GetFieldByName returns the value for the field with the given name. It panics
+// if an error is encountered. See TryGetFieldByName.
+func (m *Message) GetFieldByName(name string) interface{} {
+ if v, err := m.TryGetFieldByName(name); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetFieldByName returns the value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByName(name string) (interface{}, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getField(fd)
+}
+
+// GetFieldByNumber returns the value for the field with the given tag number.
+// It panics if an error is encountered. See TryGetFieldByNumber.
+func (m *Message) GetFieldByNumber(tagNumber int) interface{} {
+ if v, err := m.TryGetFieldByNumber(tagNumber); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetFieldByNumber returns the value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByNumber(tagNumber int) (interface{}, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getField(fd)
+}
+
+func (m *Message) getField(fd *desc.FieldDescriptor) (interface{}, error) {
+ return m.doGetField(fd, false)
+}
+
+func (m *Message) doGetField(fd *desc.FieldDescriptor, nilIfAbsent bool) (interface{}, error) {
+ res := m.values[fd.GetNumber()]
+ if res == nil {
+ var err error
+ if res, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ }
+ if res == nil {
+ if nilIfAbsent {
+ return nil, nil
+ } else {
+ def := fd.GetDefaultValue()
+ if def != nil {
+ return def, nil
+ }
+ // GetDefaultValue only returns nil for message types
+ md := fd.GetMessageType()
+ if m.md.IsProto3() {
+ return nilMessage(md), nil
+ } else {
+ // for proto2, return default instance of message
+ return m.mf.NewMessage(md), nil
+ }
+ }
+ }
+ }
+ rt := reflect.TypeOf(res)
+ if rt.Kind() == reflect.Map {
+ // make defensive copies to prevent caller from storing illegal keys and values
+ m := res.(map[interface{}]interface{})
+ res := map[interface{}]interface{}{}
+ for k, v := range m {
+ res[k] = v
+ }
+ return res, nil
+ } else if rt.Kind() == reflect.Slice && rt != typeOfBytes {
+ // make defensive copies to prevent caller from storing illegal elements
+ sl := res.([]interface{})
+ res := make([]interface{}, len(sl))
+ copy(res, sl)
+ return res, nil
+ }
+ return res, nil
+}
+
+func nilMessage(md *desc.MessageDescriptor) interface{} {
+ // try to return a proper nil pointer
+ msgType := proto.MessageType(md.GetFullyQualifiedName())
+ if msgType != nil && msgType.Implements(typeOfProtoMessage) {
+ return reflect.Zero(msgType).Interface().(proto.Message)
+ }
+ // fallback to nil dynamic message pointer
+ return (*Message)(nil)
+}
+
+// HasField returns true if this message has a value for the given field. If the
+// given field is not valid (e.g. belongs to a different message type), false is
+// returned. If this message is defined in a file with "proto3" syntax, this
+// will return false even if a field was explicitly assigned its zero value (the
+// zero values for a field are intentionally indistinguishable from absent).
+func (m *Message) HasField(fd *desc.FieldDescriptor) bool {
+ if err := m.checkField(fd); err != nil {
+ return false
+ }
+ return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldName returns true if this message has a value for a field with the
+// given name. If the given name is unknown, this returns false.
+func (m *Message) HasFieldName(name string) bool {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return false
+ }
+ return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldNumber returns true if this message has a value for a field with the
+// given tag number. If the given tag is unknown, this returns false.
+func (m *Message) HasFieldNumber(tagNumber int) bool {
+ if _, ok := m.values[int32(tagNumber)]; ok {
+ return true
+ }
+ _, ok := m.unknownFields[int32(tagNumber)]
+ return ok
+}
+
+// SetField sets the value for the given field descriptor to the given value. It
+// panics if an error is encountered. See TrySetField.
+func (m *Message) SetField(fd *desc.FieldDescriptor, val interface{}) {
+ if err := m.TrySetField(fd, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetField sets the value for the given field descriptor to the given value.
+// An error is returned if the given field descriptor does not belong to the
+// right message type or if the given value is not a correct/compatible type for
+// the given field.
+//
+// The Go type expected for a field is the same as TryGetField would return for
+// the field. So message values can be supplied as either the correct generated
+// message type or as a *dynamic.Message.
+//
+// Since it is cumbersome to work with dynamic messages, some concessions are
+// made to simplify usage regarding types:
+//
+// 1. If a numeric type is provided that can be converted *without loss or
+// overflow*, it is accepted. This allows for setting int64 fields using int
+// or int32 values. Similarly for uint64 with uint and uint32 values and for
+// float64 fields with float32 values.
+// 2. The value can be a named type, as long as its underlying type is correct.
+// 3. Map and repeated fields can be set using any kind of concrete map or
+// slice type, as long as the values within are all of the correct type. So
+// a field defined as a 'map<string, int32>` can be set using a
+// map[string]int32, a map[string]interface{}, or even a
+// map[interface{}]interface{}.
+// 4. Finally, dynamic code that chooses to not treat maps as a special-case
+// find that they can set map fields using a slice where each element is a
+// message that matches the implicit map-entry field message type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TrySetField(fd *desc.FieldDescriptor, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.setField(fd, val)
+}
+
+// SetFieldByName sets the value for the field with the given name to the given
+// value. It panics if an error is encountered. See TrySetFieldByName.
+func (m *Message) SetFieldByName(name string, val interface{}) {
+ if err := m.TrySetFieldByName(name, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetFieldByName sets the value for the field with the given name to the
+// given value. An error is returned if the given name is unknown or if the
+// given value has an incorrect type. If the given name refers to an extension
+// field, it should be fully qualified and optionally enclosed in parenthesis or
+// brackets.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByName(name string, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.setField(fd, val)
+}
+
+// SetFieldByNumber sets the value for the field with the given tag number to
+// the given value. It panics if an error is encountered. See
+// TrySetFieldByNumber.
+func (m *Message) SetFieldByNumber(tagNumber int, val interface{}) {
+ if err := m.TrySetFieldByNumber(tagNumber, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetFieldByNumber sets the value for the field with the given tag number to
+// the given value. An error is returned if the given tag is unknown or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByNumber(tagNumber int, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.setField(fd, val)
+}
+
+func (m *Message) setField(fd *desc.FieldDescriptor, val interface{}) error {
+ var err error
+ if val, err = validFieldValue(fd, val); err != nil {
+ return err
+ }
+ m.internalSetField(fd, val)
+ return nil
+}
+
+func (m *Message) internalSetField(fd *desc.FieldDescriptor, val interface{}) {
+ if fd.IsRepeated() {
+ // Unset fields and zero-length fields are indistinguishable, in both
+ // proto2 and proto3 syntax
+ if reflect.ValueOf(val).Len() == 0 {
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ return
+ }
+ } else if m.md.IsProto3() && fd.GetOneOf() == nil {
+ // proto3 considers fields that are set to their zero value as unset
+ // (we already handled repeated fields above)
+ var equal bool
+ if b, ok := val.([]byte); ok {
+ // can't compare slices, so we have to special-case []byte values
+ equal = ok && bytes.Equal(b, fd.GetDefaultValue().([]byte))
+ } else {
+ defVal := fd.GetDefaultValue()
+ equal = defVal == val
+ if !equal && defVal == nil {
+ // above just checks if value is the nil interface,
+ // but we should also test if the given value is a
+ // nil pointer
+ rv := reflect.ValueOf(val)
+ if rv.Kind() == reflect.Ptr && rv.IsNil() {
+ equal = true
+ }
+ }
+ }
+ if equal {
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ return
+ }
+ }
+ if m.values == nil {
+ m.values = map[int32]interface{}{}
+ }
+ m.values[fd.GetNumber()] = val
+ // if this field is part of a one-of, make sure all other one-of choices are cleared
+ od := fd.GetOneOf()
+ if od != nil {
+ for _, other := range od.GetChoices() {
+ if other.GetNumber() != fd.GetNumber() {
+ delete(m.values, other.GetNumber())
+ }
+ }
+ }
+ // also clear any unknown fields
+ if m.unknownFields != nil {
+ delete(m.unknownFields, fd.GetNumber())
+ }
+ // and add this field if it was previously unknown
+ if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+ m.addField(fd)
+ }
+}
+
+func (m *Message) addField(fd *desc.FieldDescriptor) {
+ if m.extraFields == nil {
+ m.extraFields = map[int32]*desc.FieldDescriptor{}
+ }
+ m.extraFields[fd.GetNumber()] = fd
+}
+
+// ClearField removes any value for the given field. It panics if an error is
+// encountered. See TryClearField.
+func (m *Message) ClearField(fd *desc.FieldDescriptor) {
+ if err := m.TryClearField(fd); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearField removes any value for the given field. An error is returned if
+// the given field descriptor does not belong to the right message type.
+func (m *Message) TryClearField(fd *desc.FieldDescriptor) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ m.clearField(fd)
+ return nil
+}
+
+// ClearFieldByName removes any value for the field with the given name. It
+// panics if an error is encountered. See TryClearFieldByName.
+func (m *Message) ClearFieldByName(name string) {
+ if err := m.TryClearFieldByName(name); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearFieldByName removes any value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+func (m *Message) TryClearFieldByName(name string) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ m.clearField(fd)
+ return nil
+}
+
+// ClearFieldByNumber removes any value for the field with the given tag number.
+// It panics if an error is encountered. See TryClearFieldByNumber.
+func (m *Message) ClearFieldByNumber(tagNumber int) {
+ if err := m.TryClearFieldByNumber(tagNumber); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearFieldByNumber removes any value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+func (m *Message) TryClearFieldByNumber(tagNumber int) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ m.clearField(fd)
+ return nil
+}
+
+func (m *Message) clearField(fd *desc.FieldDescriptor) {
+ // clear value
+ if m.values != nil {
+ delete(m.values, fd.GetNumber())
+ }
+ // also clear any unknown fields
+ if m.unknownFields != nil {
+ delete(m.unknownFields, fd.GetNumber())
+ }
+ // and add this field if it was previously unknown
+ if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+ m.addField(fd)
+ }
+}
+
+// GetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. It panics if an error is encountered. See
+// TryGetOneOfField.
+func (m *Message) GetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}) {
+ if fd, val, err := m.TryGetOneOfField(od); err != nil {
+ panic(err.Error())
+ } else {
+ return fd, val
+ }
+}
+
+// TryGetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. An error is returned if the given one-of belongs to the
+// wrong message type. If the given one-of has no field set, this method will
+// return nil, nil.
+//
+// The type of the value, if one is set, is the same as would be returned by
+// TryGetField using the returned field descriptor.
+//
+// Like with TryGetField, if the given one-of contains any fields that are not
+// known (e.g. not present in this message's descriptor), they will become known
+// and any unknown value will be parsed (and become a known value on success).
+func (m *Message) TryGetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}, error) {
+ if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return nil, nil, fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ for _, fd := range od.GetChoices() {
+ val, err := m.doGetField(fd, true)
+ if err != nil {
+ return nil, nil, err
+ }
+ if val != nil {
+ return fd, val, nil
+ }
+ }
+ return nil, nil, nil
+}
+
+// ClearOneOfField removes any value for any of the given one-of's fields. It
+// panics if an error is encountered. See TryClearOneOfField.
+func (m *Message) ClearOneOfField(od *desc.OneOfDescriptor) {
+ if err := m.TryClearOneOfField(od); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryClearOneOfField removes any value for any of the given one-of's fields. An
+// error is returned if the given one-of descriptor does not belong to the right
+// message type.
+func (m *Message) TryClearOneOfField(od *desc.OneOfDescriptor) error {
+ if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ for _, fd := range od.GetChoices() {
+ m.clearField(fd)
+ }
+ return nil
+}
+
+// GetMapField returns the value for the given map field descriptor and given
+// key. It panics if an error is encountered. See TryGetMapField.
+func (m *Message) GetMapField(fd *desc.FieldDescriptor, key interface{}) interface{} {
+ if v, err := m.TryGetMapField(fd, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapField returns the value for the given map field descriptor and given
+// key. An error is returned if the given field descriptor does not belong to
+// the right message type or if it is not a map field.
+//
+// If the map field does not contain the requested key, this method returns
+// nil, nil. The Go type of the value returned mirrors the type that protoc
+// would generate for the field. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryGetMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getMapField(fd, key)
+}
+
+// GetMapFieldByName returns the value for the map field with the given name and
+// given key. It panics if an error is encountered. See TryGetMapFieldByName.
+func (m *Message) GetMapFieldByName(name string, key interface{}) interface{} {
+ if v, err := m.TryGetMapFieldByName(name, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapFieldByName returns the value for the map field with the given name
+// and given key. An error is returned if the given name is unknown or if it
+// names a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByName(name string, key interface{}) (interface{}, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getMapField(fd, key)
+}
+
+// GetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. It panics if an error is encountered. See
+// TryGetMapFieldByNumber.
+func (m *Message) GetMapFieldByNumber(tagNumber int, key interface{}) interface{} {
+ if v, err := m.TryGetMapFieldByNumber(tagNumber, key); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. An error is returned if the given tag is unknown or if
+// it indicates a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByNumber(tagNumber int, key interface{}) (interface{}, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getMapField(fd, key)
+}
+
+func (m *Message) getMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+ if !fd.IsMap() {
+ return nil, FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key, false)
+ if err != nil {
+ return nil, err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ } else if mp == nil {
+ return nil, nil
+ }
+ }
+ return mp.(map[interface{}]interface{})[ki], nil
+}
+
+// ForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. It stops iteration if the function
+// returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntry.
+func (m *Message) ForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntry(fd, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. An error is returned if the given field
+// descriptor does not belong to the right message type or if it is not a map
+// field.
+//
+// Iteration ends either when all entries have been examined or when the given
+// function returns false. So the function is expected to return true for normal
+// iteration and false to break out. If this message has no value for the given
+// field, it returns without invoking the given function.
+//
+// The Go type of the key and value supplied to the function mirrors the type
+// that protoc would generate for the field. (See TryGetField for more details
+// on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByName executes the given function for each entry in the
+// map value for the field with the given name. It stops iteration if the
+// function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByName.
+func (m *Message) ForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntryByName(name, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntryByName executes the given function for each entry in
+// the map value for the field with the given name. It stops iteration if the
+// function returns false. An error is returned if the given name is unknown or
+// if it names a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByNumber.
+func (m *Message) ForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) {
+ if err := m.TryForEachMapFieldEntryByNumber(tagNumber, fn); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. An error is returned if the given tag is unknown
+// or if it indicates a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.forEachMapFieldEntry(fd, fn)
+}
+
+func (m *Message) forEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err := m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ return nil
+ }
+ }
+ for k, v := range mp.(map[interface{}]interface{}) {
+ if !fn(k, v) {
+ break
+ }
+ }
+ return nil
+}
+
+// PutMapField sets the value for the given map field descriptor and given key
+// to the given value. It panics if an error is encountered. See TryPutMapField.
+func (m *Message) PutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) {
+ if err := m.TryPutMapField(fd, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapField sets the value for the given map field descriptor and given
+// key to the given value. An error is returned if the given field descriptor
+// does not belong to the right message type, if the given field is not a map
+// field, or if the given value is not a correct/compatible type for the given
+// field.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a field with the same type as the map's value type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TryPutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByName sets the value for the map field with the given name and
+// given key to the given value. It panics if an error is encountered. See
+// TryPutMapFieldByName.
+func (m *Message) PutMapFieldByName(name string, key interface{}, val interface{}) {
+ if err := m.TryPutMapFieldByName(name, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapFieldByName sets the value for the map field with the given name and
+// the given key to the given value. An error is returned if the given name is
+// unknown, if it names a field that is not a map, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByName(name string, key interface{}, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByNumber sets the value for the map field with the given tag
+// number and given key to the given value. It panics if an error is
+// encountered. See TryPutMapFieldByNumber.
+func (m *Message) PutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) {
+ if err := m.TryPutMapFieldByNumber(tagNumber, key, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryPutMapFieldByNumber sets the value for the map field with the given tag
+// number and the given key to the given value. An error is returned if the
+// given tag is unknown, if it indicates a field that is not a map, or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.putMapField(fd, key, val)
+}
+
+func (m *Message) putMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key, false)
+ if err != nil {
+ return err
+ }
+ vfd := fd.GetMessageType().GetFields()[1]
+ vi, err := validElementFieldValue(vfd, val, true)
+ if err != nil {
+ return err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ m.internalSetField(fd, map[interface{}]interface{}{ki: vi})
+ return nil
+ }
+ }
+ mp.(map[interface{}]interface{})[ki] = vi
+ return nil
+}
+
+// RemoveMapField changes the value for the given field descriptor by removing
+// any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapField.
+func (m *Message) RemoveMapField(fd *desc.FieldDescriptor, key interface{}) {
+ if err := m.TryRemoveMapField(fd, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapField changes the value for the given field descriptor by
+// removing any value associated with the given key. An error is returned if the
+// given field descriptor does not belong to the right message type or if the
+// given field is not a map field.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and any value for the given key
+// removed.
+func (m *Message) TryRemoveMapField(fd *desc.FieldDescriptor, key interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByName changes the value for the field with the given name by
+// removing any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapFieldByName.
+func (m *Message) RemoveMapFieldByName(name string, key interface{}) {
+ if err := m.TryRemoveMapFieldByName(name, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapFieldByName changes the value for the field with the given name
+// by removing any value associated with the given key. An error is returned if
+// the given name is unknown or if it names a field that is not a map.
+func (m *Message) TryRemoveMapFieldByName(name string, key interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. It panics if an
+// error is encountered. See TryRemoveMapFieldByNumber.
+func (m *Message) RemoveMapFieldByNumber(tagNumber int, key interface{}) {
+ if err := m.TryRemoveMapFieldByNumber(tagNumber, key); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryRemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. An error is
+// returned if the given tag is unknown or if it indicates a field that is not
+// a map.
+func (m *Message) TryRemoveMapFieldByNumber(tagNumber int, key interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.removeMapField(fd, key)
+}
+
+func (m *Message) removeMapField(fd *desc.FieldDescriptor, key interface{}) error {
+ if !fd.IsMap() {
+ return FieldIsNotMapError
+ }
+ kfd := fd.GetMessageType().GetFields()[0]
+ ki, err := validElementFieldValue(kfd, key, false)
+ if err != nil {
+ return err
+ }
+ mp := m.values[fd.GetNumber()]
+ if mp == nil {
+ if mp, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if mp == nil {
+ return nil
+ }
+ }
+ res := mp.(map[interface{}]interface{})
+ delete(res, ki)
+ if len(res) == 0 {
+ delete(m.values, fd.GetNumber())
+ }
+ return nil
+}
+
+// FieldLength returns the number of elements in this message for the given
+// field descriptor. It panics if an error is encountered. See TryFieldLength.
+func (m *Message) FieldLength(fd *desc.FieldDescriptor) int {
+ l, err := m.TryFieldLength(fd)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLength returns the number of elements in this message for the given
+// field descriptor. An error is returned if the given field descriptor does not
+// belong to the right message type or if it is neither a map field nor a
+// repeated field.
+func (m *Message) TryFieldLength(fd *desc.FieldDescriptor) (int, error) {
+ if err := m.checkField(fd); err != nil {
+ return 0, err
+ }
+ return m.fieldLength(fd)
+}
+
+// FieldLengthByName returns the number of elements in this message for the
+// field with the given name. It panics if an error is encountered. See
+// TryFieldLengthByName.
+func (m *Message) FieldLengthByName(name string) int {
+ l, err := m.TryFieldLengthByName(name)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLengthByName returns the number of elements in this message for the
+// field with the given name. An error is returned if the given name is unknown
+// or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByName(name string) (int, error) {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return 0, UnknownFieldNameError
+ }
+ return m.fieldLength(fd)
+}
+
+// FieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. It panics if an error is encountered. See
+// TryFieldLengthByNumber.
+func (m *Message) FieldLengthByNumber(tagNumber int32) int {
+ l, err := m.TryFieldLengthByNumber(tagNumber)
+ if err != nil {
+ panic(err.Error())
+ }
+ return l
+}
+
+// TryFieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. An error is returned if the given tag is
+// unknown or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByNumber(tagNumber int32) (int, error) {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return 0, UnknownTagNumberError
+ }
+ return m.fieldLength(fd)
+}
+
+func (m *Message) fieldLength(fd *desc.FieldDescriptor) (int, error) {
+ if !fd.IsRepeated() {
+ return 0, FieldIsNotRepeatedError
+ }
+ val := m.values[fd.GetNumber()]
+ if val == nil {
+ var err error
+ if val, err = m.parseUnknownField(fd); err != nil {
+ return 0, err
+ } else if val == nil {
+ return 0, nil
+ }
+ }
+ if sl, ok := val.([]interface{}); ok {
+ return len(sl), nil
+ } else if mp, ok := val.(map[interface{}]interface{}); ok {
+ return len(mp), nil
+ }
+ return 0, nil
+}
+
+// GetRepeatedField returns the value for the given repeated field descriptor at
+// the given index. It panics if an error is encountered. See
+// TryGetRepeatedField.
+func (m *Message) GetRepeatedField(fd *desc.FieldDescriptor, index int) interface{} {
+ if v, err := m.TryGetRepeatedField(fd, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedField returns the value for the given repeated field descriptor
+// at the given index. An error is returned if the given field descriptor does
+// not belong to the right message type, if it is not a repeated field, or if
+// the given index is out of range (less than zero or greater than or equal to
+// the length of the repeated field). Also, even though map fields technically
+// are repeated fields, if the given field is a map field an error will result:
+// map representation does not lend itself to random access by index.
+//
+// The Go type of the value returned mirrors the type that protoc would generate
+// for the field's element type. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The value at the given index in the parsed value
+// will be returned. An error will be returned if the unknown value cannot be
+// parsed according to the field descriptor's type information.
+func (m *Message) TryGetRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ if err := m.checkField(fd); err != nil {
+ return nil, err
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. It panics if an error is encountered. See
+// TryGetRepeatedFieldByName.
+func (m *Message) GetRepeatedFieldByName(name string, index int) interface{} {
+ if v, err := m.TryGetRepeatedFieldByName(name, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. An error is returned if the given name is
+// unknown, if it names a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater
+// than or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByName(name string, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return nil, UnknownFieldNameError
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. It panics if an error is encountered.
+// See TryGetRepeatedFieldByNumber.
+func (m *Message) GetRepeatedFieldByNumber(tagNumber int, index int) interface{} {
+ if v, err := m.TryGetRepeatedFieldByNumber(tagNumber, index); err != nil {
+ panic(err.Error())
+ } else {
+ return v
+ }
+}
+
+// TryGetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. An error is returned if the given tag is
+// unknown, if it indicates a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater than
+// or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByNumber(tagNumber int, index int) (interface{}, error) {
+ if index < 0 {
+ return nil, IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return nil, UnknownTagNumberError
+ }
+ return m.getRepeatedField(fd, index)
+}
+
+func (m *Message) getRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+ if fd.IsMap() || !fd.IsRepeated() {
+ return nil, FieldIsNotRepeatedError
+ }
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ var err error
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return nil, err
+ } else if sl == nil {
+ return nil, IndexOutOfRangeError
+ }
+ }
+ res := sl.([]interface{})
+ if index >= len(res) {
+ return nil, IndexOutOfRangeError
+ }
+ return res[index], nil
+}
+
+// AddRepeatedField appends the given value to the given repeated field. It
+// panics if an error is encountered. See TryAddRepeatedField.
+func (m *Message) AddRepeatedField(fd *desc.FieldDescriptor, val interface{}) {
+ if err := m.TryAddRepeatedField(fd, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedField appends the given value to the given repeated field. An
+// error is returned if the given field descriptor does not belong to the right
+// message type, if the given field is not repeated, or if the given value is
+// not a correct/compatible type for the given field. If the given field is a
+// map field, the call will succeed if the given value is an instance of the
+// map's entry message type.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the given value is appended to
+// it.
+func (m *Message) TryAddRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByName appends the given value to the repeated field with the
+// given name. It panics if an error is encountered. See
+// TryAddRepeatedFieldByName.
+func (m *Message) AddRepeatedFieldByName(name string, val interface{}) {
+ if err := m.TryAddRepeatedFieldByName(name, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedFieldByName appends the given value to the repeated field with
+// the given name. An error is returned if the given name is unknown, if it
+// names a field that is not repeated, or if the given value has an incorrect
+// type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByName(name string, val interface{}) error {
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByNumber appends the given value to the repeated field with
+// the given tag number. It panics if an error is encountered. See
+// TryAddRepeatedFieldByNumber.
+func (m *Message) AddRepeatedFieldByNumber(tagNumber int, val interface{}) {
+ if err := m.TryAddRepeatedFieldByNumber(tagNumber, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TryAddRepeatedFieldByNumber appends the given value to the repeated field
+// with the given tag number. An error is returned if the given tag is unknown,
+// if it indicates a field that is not repeated, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByNumber(tagNumber int, val interface{}) error {
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.addRepeatedField(fd, val)
+}
+
+func (m *Message) addRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+ if !fd.IsRepeated() {
+ return FieldIsNotRepeatedError
+ }
+ val, err := validElementFieldValue(fd, val, false)
+ if err != nil {
+ return err
+ }
+
+ if fd.IsMap() {
+ // We're lenient. Just as we allow setting a map field to a slice of entry messages, we also allow
+ // adding entries one at a time (as if the field were a normal repeated field).
+ msg := val.(proto.Message)
+ dm, err := asDynamicMessage(msg, fd.GetMessageType(), m.mf)
+ if err != nil {
+ return err
+ }
+ k, err := dm.TryGetFieldByNumber(1)
+ if err != nil {
+ return err
+ }
+ v, err := dm.TryGetFieldByNumber(2)
+ if err != nil {
+ return err
+ }
+ return m.putMapField(fd, k, v)
+ }
+
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if sl == nil {
+ sl = []interface{}{}
+ }
+ }
+ res := sl.([]interface{})
+ res = append(res, val)
+ m.internalSetField(fd, res)
+ return nil
+}
+
+// SetRepeatedField sets the value for the given repeated field descriptor and
+// given index to the given value. It panics if an error is encountered. See
+// SetRepeatedField.
+func (m *Message) SetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) {
+ if err := m.TrySetRepeatedField(fd, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedField sets the value for the given repeated field descriptor
+// and given index to the given value. An error is returned if the given field
+// descriptor does not belong to the right message type, if the given field is
+// not repeated, or if the given value is not a correct/compatible type for the
+// given field. Also, even though map fields technically are repeated fields, if
+// the given field is a map field an error will result: map representation does
+// not lend itself to random access by index.
+//
+// The Go type expected for a field is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the element at the given index
+// is replaced with the given value.
+func (m *Message) TrySetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ if err := m.checkField(fd); err != nil {
+ return err
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByName sets the value for the repeated field with the given
+// name and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByName.
+func (m *Message) SetRepeatedFieldByName(name string, index int, val interface{}) {
+ if err := m.TrySetRepeatedFieldByName(name, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedFieldByName sets the value for the repeated field with the
+// given name and the given index to the given value. An error is returned if
+// the given name is unknown, if it names a field that is not repeated (or is a
+// map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByName(name string, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptorByName(name)
+ if fd == nil {
+ return UnknownFieldNameError
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByNumber sets the value for the repeated field with the given
+// tag number and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByNumber.
+func (m *Message) SetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) {
+ if err := m.TrySetRepeatedFieldByNumber(tagNumber, index, val); err != nil {
+ panic(err.Error())
+ }
+}
+
+// TrySetRepeatedFieldByNumber sets the value for the repeated field with the
+// given tag number and the given index to the given value. An error is returned
+// if the given tag is unknown, if it indicates a field that is not repeated (or
+// is a map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) error {
+ if index < 0 {
+ return IndexOutOfRangeError
+ }
+ fd := m.FindFieldDescriptor(int32(tagNumber))
+ if fd == nil {
+ return UnknownTagNumberError
+ }
+ return m.setRepeatedField(fd, index, val)
+}
+
+func (m *Message) setRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+ if fd.IsMap() || !fd.IsRepeated() {
+ return FieldIsNotRepeatedError
+ }
+ val, err := validElementFieldValue(fd, val, false)
+ if err != nil {
+ return err
+ }
+ sl := m.values[fd.GetNumber()]
+ if sl == nil {
+ if sl, err = m.parseUnknownField(fd); err != nil {
+ return err
+ } else if sl == nil {
+ return IndexOutOfRangeError
+ }
+ }
+ res := sl.([]interface{})
+ if index >= len(res) {
+ return IndexOutOfRangeError
+ }
+ res[index] = val
+ return nil
+}
+
+// GetUnknownField gets the value(s) for the given unknown tag number. If this
+// message has no unknown fields with the given tag, nil is returned.
+func (m *Message) GetUnknownField(tagNumber int32) []UnknownField {
+ if u, ok := m.unknownFields[tagNumber]; ok {
+ return u
+ } else {
+ return nil
+ }
+}
+
+func (m *Message) parseUnknownField(fd *desc.FieldDescriptor) (interface{}, error) {
+ unks, ok := m.unknownFields[fd.GetNumber()]
+ if !ok {
+ return nil, nil
+ }
+ var v interface{}
+ var sl []interface{}
+ var mp map[interface{}]interface{}
+ if fd.IsMap() {
+ mp = map[interface{}]interface{}{}
+ }
+ var err error
+ for _, unk := range unks {
+ var val interface{}
+ if unk.Encoding == proto.WireBytes || unk.Encoding == proto.WireStartGroup {
+ val, err = codec.DecodeLengthDelimitedField(fd, unk.Contents, m.mf)
+ } else {
+ val, err = codec.DecodeScalarField(fd, unk.Value)
+ }
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsMap() {
+ newEntry := val.(*Message)
+ kk, err := newEntry.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := newEntry.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ v = mp
+ } else if fd.IsRepeated() {
+ t := reflect.TypeOf(val)
+ if t.Kind() == reflect.Slice && t != typeOfBytes {
+ // append slices if we unmarshalled a packed repeated field
+ newVals := val.([]interface{})
+ sl = append(sl, newVals...)
+ } else {
+ sl = append(sl, val)
+ }
+ v = sl
+ } else {
+ v = val
+ }
+ }
+ m.internalSetField(fd, v)
+ return v, nil
+}
+
+func validFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+ return validFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ if fd.IsMap() && val.Kind() == reflect.Map {
+ return validFieldValueForMapField(fd, val)
+ }
+
+ if fd.IsRepeated() { // this will also catch map fields where given value was not a map
+ if val.Kind() != reflect.Array && val.Kind() != reflect.Slice {
+ if fd.IsMap() {
+ return nil, fmt.Errorf("value for map field must be a map; instead was %v", val.Type())
+ } else {
+ return nil, fmt.Errorf("value for repeated field must be a slice; instead was %v", val.Type())
+ }
+ }
+
+ if fd.IsMap() {
+ // value should be a slice of entry messages that we need convert into a map[interface{}]interface{}
+ m := map[interface{}]interface{}{}
+ for i := 0; i < val.Len(); i++ {
+ e, err := validElementFieldValue(fd, val.Index(i).Interface(), false)
+ if err != nil {
+ return nil, err
+ }
+ msg := e.(proto.Message)
+ dm, err := asDynamicMessage(msg, fd.GetMessageType(), nil)
+ if err != nil {
+ return nil, err
+ }
+ k, err := dm.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ v, err := dm.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ m[k] = v
+ }
+ return m, nil
+ }
+
+ // make a defensive copy while checking contents (also converts to []interface{})
+ s := make([]interface{}, val.Len())
+ for i := 0; i < val.Len(); i++ {
+ ev := val.Index(i)
+ if ev.Kind() == reflect.Interface {
+ // unwrap it
+ ev = reflect.ValueOf(ev.Interface())
+ }
+ e, err := validElementFieldValueForRv(fd, ev, false)
+ if err != nil {
+ return nil, err
+ }
+ s[i] = e
+ }
+
+ return s, nil
+ }
+
+ return validElementFieldValueForRv(fd, val, false)
+}
+
+func asDynamicMessage(m proto.Message, md *desc.MessageDescriptor, mf *MessageFactory) (*Message, error) {
+ if dm, ok := m.(*Message); ok {
+ return dm, nil
+ }
+ dm := NewMessageWithMessageFactory(md, mf)
+ if err := dm.mergeFrom(m); err != nil {
+ return nil, err
+ }
+ return dm, nil
+}
+
+func validElementFieldValue(fd *desc.FieldDescriptor, val interface{}, allowNilMessage bool) (interface{}, error) {
+ return validElementFieldValueForRv(fd, reflect.ValueOf(val), allowNilMessage)
+}
+
+func validElementFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value, allowNilMessage bool) (interface{}, error) {
+ t := fd.GetType()
+ if !val.IsValid() {
+ return nil, typeError(fd, nil)
+ }
+
+ switch t {
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_ENUM:
+ return toInt32(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64:
+ return toInt64(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT32:
+ return toUint32(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT64:
+ return toUint64(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ return toFloat32(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ return toFloat64(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ return toBool(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ return toBytes(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ return toString(reflect.Indirect(val), fd)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptorpb.FieldDescriptorProto_TYPE_GROUP:
+ m, err := asMessage(val, fd.GetFullyQualifiedName())
+ // check that message is correct type
+ if err != nil {
+ return nil, err
+ }
+ var msgType string
+ if dm, ok := m.(*Message); ok {
+ if allowNilMessage && dm == nil {
+ // if dm == nil, we'll panic below, so early out if that is allowed
+ // (only allowed for map values, to indicate an entry w/ no value)
+ return m, nil
+ }
+ msgType = dm.GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ msgType = proto.MessageName(m)
+ }
+ if msgType != fd.GetMessageType().GetFullyQualifiedName() {
+ return nil, fmt.Errorf("message field %s requires value of type %s; received %s", fd.GetFullyQualifiedName(), fd.GetMessageType().GetFullyQualifiedName(), msgType)
+ }
+ return m, nil
+
+ default:
+ return nil, fmt.Errorf("unable to handle unrecognized field type: %v", fd.GetType())
+ }
+}
+
+func toInt32(v reflect.Value, fd *desc.FieldDescriptor) (int32, error) {
+ if v.Kind() == reflect.Int32 {
+ return int32(v.Int()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toUint32(v reflect.Value, fd *desc.FieldDescriptor) (uint32, error) {
+ if v.Kind() == reflect.Uint32 {
+ return uint32(v.Uint()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toFloat32(v reflect.Value, fd *desc.FieldDescriptor) (float32, error) {
+ if v.Kind() == reflect.Float32 {
+ return float32(v.Float()), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toInt64(v reflect.Value, fd *desc.FieldDescriptor) (int64, error) {
+ if v.Kind() == reflect.Int64 || v.Kind() == reflect.Int || v.Kind() == reflect.Int32 {
+ return v.Int(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toUint64(v reflect.Value, fd *desc.FieldDescriptor) (uint64, error) {
+ if v.Kind() == reflect.Uint64 || v.Kind() == reflect.Uint || v.Kind() == reflect.Uint32 {
+ return v.Uint(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toFloat64(v reflect.Value, fd *desc.FieldDescriptor) (float64, error) {
+ if v.Kind() == reflect.Float64 || v.Kind() == reflect.Float32 {
+ return v.Float(), nil
+ }
+ return 0, typeError(fd, v.Type())
+}
+
+func toBool(v reflect.Value, fd *desc.FieldDescriptor) (bool, error) {
+ if v.Kind() == reflect.Bool {
+ return v.Bool(), nil
+ }
+ return false, typeError(fd, v.Type())
+}
+
+func toBytes(v reflect.Value, fd *desc.FieldDescriptor) ([]byte, error) {
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+ return v.Bytes(), nil
+ }
+ return nil, typeError(fd, v.Type())
+}
+
+func toString(v reflect.Value, fd *desc.FieldDescriptor) (string, error) {
+ if v.Kind() == reflect.String {
+ return v.String(), nil
+ }
+ return "", typeError(fd, v.Type())
+}
+
+func typeError(fd *desc.FieldDescriptor, t reflect.Type) error {
+ return fmt.Errorf(
+ "%s field %s is not compatible with value of type %v",
+ getTypeString(fd), fd.GetFullyQualifiedName(), t)
+}
+
+func getTypeString(fd *desc.FieldDescriptor) string {
+ return strings.ToLower(fd.GetType().String())
+}
+
+func asMessage(v reflect.Value, fieldName string) (proto.Message, error) {
+ t := v.Type()
+ // we need a pointer to a struct that implements proto.Message
+ if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct || !t.Implements(typeOfProtoMessage) {
+ return nil, fmt.Errorf("message field %s requires is not compatible with value of type %v", fieldName, v.Type())
+ }
+ return v.Interface().(proto.Message), nil
+}
+
+// Reset resets this message to an empty message. It removes all values set in
+// the message.
+func (m *Message) Reset() {
+ for k := range m.values {
+ delete(m.values, k)
+ }
+ for k := range m.unknownFields {
+ delete(m.unknownFields, k)
+ }
+}
+
+// String returns this message rendered in compact text format.
+func (m *Message) String() string {
+ b, err := m.MarshalText()
+ if err != nil {
+ panic(fmt.Sprintf("Failed to create string representation of message: %s", err.Error()))
+ }
+ return string(b)
+}
+
+// ProtoMessage is present to satisfy the proto.Message interface.
+func (m *Message) ProtoMessage() {
+}
+
+// ConvertTo converts this dynamic message into the given message. This is
+// shorthand for resetting then merging:
+//
+// target.Reset()
+// m.MergeInto(target)
+func (m *Message) ConvertTo(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+
+ target.Reset()
+ return m.mergeInto(target, defaultDeterminism)
+}
+
+// ConvertToDeterministic converts this dynamic message into the given message.
+// It is just like ConvertTo, but it attempts to produce deterministic results.
+// That means that if the target is a generated message (not another dynamic
+// message) and the current runtime is unaware of any fields or extensions that
+// are present in m, they will be serialized into the target's unrecognized
+// fields deterministically.
+func (m *Message) ConvertToDeterministic(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+
+ target.Reset()
+ return m.mergeInto(target, true)
+}
+
+// ConvertFrom converts the given message into this dynamic message. This is
+// shorthand for resetting then merging:
+//
+// m.Reset()
+// m.MergeFrom(target)
+func (m *Message) ConvertFrom(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+
+ m.Reset()
+ return m.mergeFrom(target)
+}
+
+// MergeInto merges this dynamic message into the given message. All field
+// values in this message will be set on the given message. For map fields,
+// entries are added to the given message (if the given message has existing
+// values for like keys, they are overwritten). For slice fields, elements are
+// added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in this message to be represented as unknown fields in the
+// given message after merging, and vice versa.
+func (m *Message) MergeInto(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+ return m.mergeInto(target, defaultDeterminism)
+}
+
+// MergeIntoDeterministic merges this dynamic message into the given message.
+// It is just like MergeInto, but it attempts to produce deterministic results.
+// That means that if the target is a generated message (not another dynamic
+// message) and the current runtime is unaware of any fields or extensions that
+// are present in m, they will be serialized into the target's unrecognized
+// fields deterministically.
+func (m *Message) MergeIntoDeterministic(target proto.Message) error {
+ if err := m.checkType(target); err != nil {
+ return err
+ }
+ return m.mergeInto(target, true)
+}
+
+// MergeFrom merges the given message into this dynamic message. All field
+// values in the given message will be set on this message. For map fields,
+// entries are added to this message (if this message has existing values for
+// like keys, they are overwritten). For slice fields, elements are added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in that message to be represented as unknown fields in this
+// message after merging, and vice versa.
+func (m *Message) MergeFrom(source proto.Message) error {
+ if err := m.checkType(source); err != nil {
+ return err
+ }
+ return m.mergeFrom(source)
+}
+
+// Merge implements the proto.Merger interface so that dynamic messages are
+// compatible with the proto.Merge function. It delegates to MergeFrom but will
+// panic on error as the proto.Merger interface doesn't allow for returning an
+// error.
+//
+// Unlike nearly all other methods, this method can work if this message's type
+// is not defined (such as instantiating the message without using NewMessage).
+// This is strictly so that dynamic message's are compatible with the
+// proto.Clone function, which instantiates a new message via reflection (thus
+// its message descriptor will not be set) and than calls Merge.
+func (m *Message) Merge(source proto.Message) {
+ if m.md == nil {
+ // To support proto.Clone, initialize the descriptor from the source.
+ if dm, ok := source.(*Message); ok {
+ m.md = dm.md
+ // also make sure the clone uses the same message factory and
+ // extensions and also knows about the same extra fields (if any)
+ m.mf = dm.mf
+ m.er = dm.er
+ m.extraFields = dm.extraFields
+ } else if md, err := desc.LoadMessageDescriptorForMessage(source); err != nil {
+ panic(err.Error())
+ } else {
+ m.md = md
+ }
+ }
+
+ if err := m.MergeFrom(source); err != nil {
+ panic(err.Error())
+ }
+}
+
+func (m *Message) checkType(target proto.Message) error {
+ if dm, ok := target.(*Message); ok {
+ if dm.md.GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given message has wrong type: %q; expecting %q", dm.md.GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+ }
+ return nil
+ }
+
+ msgName := proto.MessageName(target)
+ if msgName != m.md.GetFullyQualifiedName() {
+ return fmt.Errorf("given message has wrong type: %q; expecting %q", msgName, m.md.GetFullyQualifiedName())
+ }
+ return nil
+}
+
+func (m *Message) mergeInto(pm proto.Message, deterministic bool) error {
+ if dm, ok := pm.(*Message); ok {
+ return dm.mergeFrom(m)
+ }
+
+ target := reflect.ValueOf(pm)
+ if target.Kind() == reflect.Ptr {
+ target = target.Elem()
+ }
+
+ // track tags for which the dynamic message has data but the given
+ // message doesn't know about it
+ unknownTags := map[int32]struct{}{}
+ for tag := range m.values {
+ unknownTags[tag] = struct{}{}
+ }
+
+ // check that we can successfully do the merge
+ structProps := proto.GetProperties(reflect.TypeOf(pm).Elem())
+ for _, prop := range structProps.Prop {
+ if prop.Tag == 0 {
+ continue // one-of or special field (such as XXX_unrecognized, etc.)
+ }
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ f := target.FieldByName(prop.Name)
+ ft := f.Type()
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+ // check one-of fields
+ for _, oop := range structProps.OneofTypes {
+ prop := oop.Prop
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ stf, ok := oop.Type.Elem().FieldByName(prop.Name)
+ if !ok {
+ return fmt.Errorf("one-of field indicates struct field name %s, but type %v has no such field", prop.Name, oop.Type.Elem())
+ }
+ ft := stf.Type
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+ // and check extensions, too
+ for tag, ext := range proto.RegisteredExtensions(pm) {
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ if unknownTags != nil {
+ delete(unknownTags, tag)
+ }
+ ft := reflect.TypeOf(ext.ExtensionType)
+ val := reflect.ValueOf(v)
+ if !canConvert(val, ft) {
+ return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+ }
+ }
+
+ // now actually perform the merge
+ for _, prop := range structProps.Prop {
+ v, ok := m.values[int32(prop.Tag)]
+ if !ok {
+ continue
+ }
+ f := target.FieldByName(prop.Name)
+ if err := mergeVal(reflect.ValueOf(v), f, deterministic); err != nil {
+ return err
+ }
+ }
+ // merge one-ofs
+ for _, oop := range structProps.OneofTypes {
+ prop := oop.Prop
+ tag := int32(prop.Tag)
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ oov := reflect.New(oop.Type.Elem())
+ f := oov.Elem().FieldByName(prop.Name)
+ if err := mergeVal(reflect.ValueOf(v), f, deterministic); err != nil {
+ return err
+ }
+ target.Field(oop.Field).Set(oov)
+ }
+ // merge extensions, too
+ for tag, ext := range proto.RegisteredExtensions(pm) {
+ v, ok := m.values[tag]
+ if !ok {
+ continue
+ }
+ e := reflect.New(reflect.TypeOf(ext.ExtensionType)).Elem()
+ if err := mergeVal(reflect.ValueOf(v), e, deterministic); err != nil {
+ return err
+ }
+ if err := proto.SetExtension(pm, ext, e.Interface()); err != nil {
+ // shouldn't happen since we already checked that the extension type was compatible above
+ return err
+ }
+ }
+
+ // if we have fields that the given message doesn't know about, add to its unknown fields
+ if len(unknownTags) > 0 {
+ var b codec.Buffer
+ b.SetDeterministic(deterministic)
+ if deterministic {
+ // if we need to emit things deterministically, sort the
+ // extensions by their tag number
+ sortedUnknownTags := make([]int32, 0, len(unknownTags))
+ for tag := range unknownTags {
+ sortedUnknownTags = append(sortedUnknownTags, tag)
+ }
+ sort.Slice(sortedUnknownTags, func(i, j int) bool {
+ return sortedUnknownTags[i] < sortedUnknownTags[j]
+ })
+ for _, tag := range sortedUnknownTags {
+ fd := m.FindFieldDescriptor(tag)
+ if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil {
+ return err
+ }
+ }
+ } else {
+ for tag := range unknownTags {
+ fd := m.FindFieldDescriptor(tag)
+ if err := b.EncodeFieldValue(fd, m.values[tag]); err != nil {
+ return err
+ }
+ }
+ }
+
+ internal.SetUnrecognized(pm, b.Bytes())
+ }
+
+ // finally, convey unknown fields into the given message by letting it unmarshal them
+ // (this will append to its unknown fields if not known; if somehow the given message recognizes
+ // a field even though the dynamic message did not, it will get correctly unmarshalled)
+ if unknownTags != nil && len(m.unknownFields) > 0 {
+ var b codec.Buffer
+ _ = m.marshalUnknownFields(&b)
+ _ = proto.UnmarshalMerge(b.Bytes(), pm)
+ }
+
+ return nil
+}
+
+func canConvert(src reflect.Value, target reflect.Type) bool {
+ if src.Kind() == reflect.Interface {
+ src = reflect.ValueOf(src.Interface())
+ }
+ srcType := src.Type()
+ // we allow convertible types instead of requiring exact types so that calling
+ // code can, for example, assign an enum constant to an enum field. In that case,
+ // one type is the enum type (a sub-type of int32) and the other may be the int32
+ // type. So we automatically do the conversion in that case.
+ if srcType.ConvertibleTo(target) {
+ return true
+ } else if target.Kind() == reflect.Ptr && srcType.ConvertibleTo(target.Elem()) {
+ return true
+ } else if target.Kind() == reflect.Slice {
+ if srcType.Kind() != reflect.Slice {
+ return false
+ }
+ et := target.Elem()
+ for i := 0; i < src.Len(); i++ {
+ if !canConvert(src.Index(i), et) {
+ return false
+ }
+ }
+ return true
+ } else if target.Kind() == reflect.Map {
+ if srcType.Kind() != reflect.Map {
+ return false
+ }
+ return canConvertMap(src, target)
+ } else if srcType == typeOfDynamicMessage && target.Implements(typeOfProtoMessage) {
+ z := reflect.Zero(target).Interface()
+ msgType := proto.MessageName(z.(proto.Message))
+ return msgType == src.Interface().(*Message).GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ return false
+ }
+}
+
+func mergeVal(src, target reflect.Value, deterministic bool) error {
+ if src.Kind() == reflect.Interface && !src.IsNil() {
+ src = src.Elem()
+ }
+ srcType := src.Type()
+ targetType := target.Type()
+ if srcType.ConvertibleTo(targetType) {
+ if targetType.Implements(typeOfProtoMessage) && !target.IsNil() {
+ Merge(target.Interface().(proto.Message), src.Convert(targetType).Interface().(proto.Message))
+ } else {
+ target.Set(src.Convert(targetType))
+ }
+ } else if targetType.Kind() == reflect.Ptr && srcType.ConvertibleTo(targetType.Elem()) {
+ if !src.CanAddr() {
+ target.Set(reflect.New(targetType.Elem()))
+ target.Elem().Set(src.Convert(targetType.Elem()))
+ } else {
+ target.Set(src.Addr().Convert(targetType))
+ }
+ } else if targetType.Kind() == reflect.Slice {
+ l := target.Len()
+ newL := l + src.Len()
+ if target.Cap() < newL {
+ // expand capacity of the slice and copy
+ newSl := reflect.MakeSlice(targetType, newL, newL)
+ for i := 0; i < target.Len(); i++ {
+ newSl.Index(i).Set(target.Index(i))
+ }
+ target.Set(newSl)
+ } else {
+ target.SetLen(newL)
+ }
+ for i := 0; i < src.Len(); i++ {
+ dest := target.Index(l + i)
+ if dest.Kind() == reflect.Ptr {
+ dest.Set(reflect.New(dest.Type().Elem()))
+ }
+ if err := mergeVal(src.Index(i), dest, deterministic); err != nil {
+ return err
+ }
+ }
+ } else if targetType.Kind() == reflect.Map {
+ return mergeMapVal(src, target, targetType, deterministic)
+ } else if srcType == typeOfDynamicMessage && targetType.Implements(typeOfProtoMessage) {
+ dm := src.Interface().(*Message)
+ if target.IsNil() {
+ target.Set(reflect.New(targetType.Elem()))
+ }
+ m := target.Interface().(proto.Message)
+ if err := dm.mergeInto(m, deterministic); err != nil {
+ return err
+ }
+ } else {
+ return fmt.Errorf("cannot convert %v to %v", srcType, targetType)
+ }
+ return nil
+}
+
+func (m *Message) mergeFrom(pm proto.Message) error {
+ if dm, ok := pm.(*Message); ok {
+ // if given message is also a dynamic message, we merge differently
+ for tag, v := range dm.values {
+ fd := m.FindFieldDescriptor(tag)
+ if fd == nil {
+ fd = dm.FindFieldDescriptor(tag)
+ }
+ if err := mergeField(m, fd, v); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ pmrv := reflect.ValueOf(pm)
+ if pmrv.IsNil() {
+ // nil is an empty message, so nothing to do
+ return nil
+ }
+
+ // check that we can successfully do the merge
+ src := pmrv.Elem()
+ values := map[*desc.FieldDescriptor]interface{}{}
+ props := proto.GetProperties(reflect.TypeOf(pm).Elem())
+ if props == nil {
+ return fmt.Errorf("could not determine message properties to merge for %v", reflect.TypeOf(pm).Elem())
+ }
+
+ // regular fields
+ for _, prop := range props.Prop {
+ if prop.Tag == 0 {
+ continue // one-of or special field (such as XXX_unrecognized, etc.)
+ }
+ fd := m.FindFieldDescriptor(int32(prop.Tag))
+ if fd == nil {
+ // Our descriptor has different fields than this message object. So
+ // try to reflect on the message object's fields.
+ md, err := desc.LoadMessageDescriptorForMessage(pm)
+ if err != nil {
+ return err
+ }
+ fd = md.FindFieldByNumber(int32(prop.Tag))
+ if fd == nil {
+ return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name)
+ }
+ }
+ rv := src.FieldByName(prop.Name)
+ if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Slice) && rv.IsNil() {
+ continue
+ }
+ if v, err := validFieldValueForRv(fd, rv); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // one-of fields
+ for _, oop := range props.OneofTypes {
+ oov := src.Field(oop.Field).Elem()
+ if !oov.IsValid() || oov.Type() != oop.Type {
+ // this field is unset (in other words, one-of message field is not currently set to this option)
+ continue
+ }
+ prop := oop.Prop
+ rv := oov.Elem().FieldByName(prop.Name)
+ fd := m.FindFieldDescriptor(int32(prop.Tag))
+ if fd == nil {
+ // Our descriptor has different fields than this message object. So
+ // try to reflect on the message object's fields.
+ md, err := desc.LoadMessageDescriptorForMessage(pm)
+ if err != nil {
+ return err
+ }
+ fd = md.FindFieldByNumber(int32(prop.Tag))
+ if fd == nil {
+ return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q in one-of %q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name, src.Type().Field(oop.Field).Name)
+ }
+ }
+ if v, err := validFieldValueForRv(fd, rv); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // extension fields
+ rexts, _ := proto.ExtensionDescs(pm)
+ for _, ed := range rexts {
+ v, _ := proto.GetExtension(pm, ed)
+ if v == nil {
+ continue
+ }
+ if ed.ExtensionType == nil {
+ // unrecognized extension: we'll handle that below when we
+ // handle other unrecognized fields
+ continue
+ }
+ fd := m.er.FindExtension(m.md.GetFullyQualifiedName(), ed.Field)
+ if fd == nil {
+ var err error
+ if fd, err = desc.LoadFieldDescriptorForExtension(ed); err != nil {
+ return err
+ }
+ }
+ if v, err := validFieldValue(fd, v); err != nil {
+ return err
+ } else {
+ values[fd] = v
+ }
+ }
+
+ // With API v2, it is possible that the new protoreflect interfaces
+ // were used to store an extension, which means it can't be returned
+ // by proto.ExtensionDescs and it's also not in the unrecognized data.
+ // So we have a separate loop to trawl through it...
+ var err error
+ proto.MessageReflect(pm).Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ if !fld.IsExtension() {
+ // normal field... we already got it above
+ return true
+ }
+ xt := fld.(protoreflect.ExtensionTypeDescriptor)
+ if _, ok := xt.Type().(*proto.ExtensionDesc); ok {
+ // known extension... we already got it above
+ return true
+ }
+ var fd *desc.FieldDescriptor
+ fd, err = desc.WrapField(fld)
+ if err != nil {
+ return false
+ }
+ v := convertProtoReflectValue(val)
+ if v, err = validFieldValue(fd, v); err != nil {
+ return false
+ }
+ values[fd] = v
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ // unrecognized extensions fields:
+ // In API v2 of proto, some extensions may NEITHER be included in ExtensionDescs
+ // above NOR included in unrecognized fields below. These are extensions that use
+ // a custom extension type (not a generated one -- i.e. not a linked in extension).
+ mr := proto.MessageReflect(pm)
+ var extBytes []byte
+ var retErr error
+ mr.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ if !fld.IsExtension() {
+ // normal field, already processed above
+ return true
+ }
+ if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok {
+ if _, ok := extd.Type().(*proto.ExtensionDesc); ok {
+ // normal known extension, already processed above
+ return true
+ }
+ }
+
+ // marshal the extension to bytes and then handle as unknown field below
+ mr.New()
+ mr.Set(fld, val)
+ extBytes, retErr = protov2.MarshalOptions{}.MarshalAppend(extBytes, mr.Interface())
+ return retErr == nil
+ })
+ if retErr != nil {
+ return retErr
+ }
+
+ // now actually perform the merge
+ for fd, v := range values {
+ if err := mergeField(m, fd, v); err != nil {
+ return err
+ }
+ }
+
+ if len(extBytes) > 0 {
+ // treating unrecognized extensions like unknown fields: best-effort
+ // ignore any error returned: pulling in unknown fields is best-effort
+ _ = m.UnmarshalMerge(extBytes)
+ }
+
+ data := internal.GetUnrecognized(pm)
+ if len(data) > 0 {
+ // ignore any error returned: pulling in unknown fields is best-effort
+ _ = m.UnmarshalMerge(data)
+ }
+
+ return nil
+}
+
+func convertProtoReflectValue(v protoreflect.Value) interface{} {
+ val := v.Interface()
+ switch val := val.(type) {
+ case protoreflect.Message:
+ return val.Interface()
+ case protoreflect.Map:
+ mp := make(map[interface{}]interface{}, val.Len())
+ val.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ mp[convertProtoReflectValue(k.Value())] = convertProtoReflectValue(v)
+ return true
+ })
+ return mp
+ case protoreflect.List:
+ sl := make([]interface{}, val.Len())
+ for i := 0; i < val.Len(); i++ {
+ sl[i] = convertProtoReflectValue(val.Get(i))
+ }
+ return sl
+ case protoreflect.EnumNumber:
+ return int32(val)
+ default:
+ return val
+ }
+}
+
+// Validate checks that all required fields are present. It returns an error if any are absent.
+func (m *Message) Validate() error {
+ missingFields := m.findMissingFields()
+ if len(missingFields) == 0 {
+ return nil
+ }
+ return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+}
+
+func (m *Message) findMissingFields() []string {
+ if m.md.IsProto3() {
+ // proto3 does not allow required fields
+ return nil
+ }
+ var missingFields []string
+ for _, fd := range m.md.GetFields() {
+ if fd.IsRequired() {
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ missingFields = append(missingFields, fd.GetName())
+ }
+ }
+ }
+ return missingFields
+}
+
+// ValidateRecursive checks that all required fields are present and also
+// recursively validates all fields who are also messages. It returns an error
+// if any required fields, in this message or nested within, are absent.
+func (m *Message) ValidateRecursive() error {
+ return m.validateRecursive("")
+}
+
+func (m *Message) validateRecursive(prefix string) error {
+ if missingFields := m.findMissingFields(); len(missingFields) > 0 {
+ for i := range missingFields {
+ missingFields[i] = fmt.Sprintf("%s%s", prefix, missingFields[i])
+ }
+ return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+ }
+
+ for tag, fld := range m.values {
+ fd := m.FindFieldDescriptor(tag)
+ var chprefix string
+ var md *desc.MessageDescriptor
+ checkMsg := func(pm proto.Message) error {
+ var dm *Message
+ if d, ok := pm.(*Message); ok {
+ dm = d
+ } else if pm != nil {
+ dm = m.mf.NewDynamicMessage(md)
+ if err := dm.ConvertFrom(pm); err != nil {
+ return nil
+ }
+ }
+ if dm == nil {
+ return nil
+ }
+ if err := dm.validateRecursive(chprefix); err != nil {
+ return err
+ }
+ return nil
+ }
+ isMap := fd.IsMap()
+ if isMap && fd.GetMapValueType().GetMessageType() != nil {
+ md = fd.GetMapValueType().GetMessageType()
+ mp := fld.(map[interface{}]interface{})
+ for k, v := range mp {
+ chprefix = fmt.Sprintf("%s%s[%v].", prefix, getName(fd), k)
+ if err := checkMsg(v.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ } else if !isMap && fd.GetMessageType() != nil {
+ md = fd.GetMessageType()
+ if fd.IsRepeated() {
+ sl := fld.([]interface{})
+ for i, v := range sl {
+ chprefix = fmt.Sprintf("%s%s[%d].", prefix, getName(fd), i)
+ if err := checkMsg(v.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ } else {
+ chprefix = fmt.Sprintf("%s%s.", prefix, getName(fd))
+ if err := checkMsg(fld.(proto.Message)); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func getName(fd *desc.FieldDescriptor) string {
+ if fd.IsExtension() {
+ return fmt.Sprintf("(%s)", fd.GetFullyQualifiedName())
+ } else {
+ return fd.GetName()
+ }
+}
+
+// knownFieldTags return tags of present and recognized fields, in sorted order.
+func (m *Message) knownFieldTags() []int {
+ if len(m.values) == 0 {
+ return []int(nil)
+ }
+
+ keys := make([]int, len(m.values))
+ i := 0
+ for k := range m.values {
+ keys[i] = int(k)
+ i++
+ }
+
+ sort.Ints(keys)
+ return keys
+}
+
+// allKnownFieldTags return tags of present and recognized fields, including
+// those that are unset, in sorted order. This only includes extensions that are
+// present. Known but not-present extensions are not included in the returned
+// set of tags.
+func (m *Message) allKnownFieldTags() []int {
+ fds := m.md.GetFields()
+ keys := make([]int, 0, len(fds)+len(m.extraFields))
+
+ for k := range m.values {
+ keys = append(keys, int(k))
+ }
+
+ // also include known fields that are not present
+ for _, fd := range fds {
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ keys = append(keys, int(fd.GetNumber()))
+ }
+ }
+ for _, fd := range m.extraFields {
+ if !fd.IsExtension() { // skip extensions that are not present
+ if _, ok := m.values[fd.GetNumber()]; !ok {
+ keys = append(keys, int(fd.GetNumber()))
+ }
+ }
+ }
+
+ sort.Ints(keys)
+ return keys
+}
+
+// unknownFieldTags return tags of present but unrecognized fields, in sorted order.
+func (m *Message) unknownFieldTags() []int {
+ if len(m.unknownFields) == 0 {
+ return []int(nil)
+ }
+ keys := make([]int, len(m.unknownFields))
+ i := 0
+ for k := range m.unknownFields {
+ keys[i] = int(k)
+ i++
+ }
+ sort.Ints(keys)
+ return keys
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/equal.go b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
new file mode 100644
index 0000000..e44c6c5
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
@@ -0,0 +1,157 @@
+package dynamic
+
+import (
+ "bytes"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they
+// have the same message type and same fields set to equal values. For proto3 messages, fields set
+// to their zero value are considered unset.
+func Equal(a, b *Message) bool {
+ if a == b {
+ return true
+ }
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() {
+ return false
+ }
+ if len(a.values) != len(b.values) {
+ return false
+ }
+ if len(a.unknownFields) != len(b.unknownFields) {
+ return false
+ }
+ for tag, aval := range a.values {
+ bval, ok := b.values[tag]
+ if !ok {
+ return false
+ }
+ if !fieldsEqual(aval, bval) {
+ return false
+ }
+ }
+ for tag, au := range a.unknownFields {
+ bu, ok := b.unknownFields[tag]
+ if !ok {
+ return false
+ }
+ if len(au) != len(bu) {
+ return false
+ }
+ for i, aval := range au {
+ bval := bu[i]
+ if aval.Encoding != bval.Encoding {
+ return false
+ }
+ if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup {
+ if !bytes.Equal(aval.Contents, bval.Contents) {
+ return false
+ }
+ } else if aval.Value != bval.Value {
+ return false
+ }
+ }
+ }
+ // all checks pass!
+ return true
+}
+
+func fieldsEqual(aval, bval interface{}) bool {
+ arv := reflect.ValueOf(aval)
+ brv := reflect.ValueOf(bval)
+ if arv.Type() != brv.Type() {
+ // it is possible that one is a dynamic message and one is not
+ apm, ok := aval.(proto.Message)
+ if !ok {
+ return false
+ }
+ bpm, ok := bval.(proto.Message)
+ if !ok {
+ return false
+ }
+ return MessagesEqual(apm, bpm)
+
+ } else {
+ switch arv.Kind() {
+ case reflect.Ptr:
+ apm, ok := aval.(proto.Message)
+ if !ok {
+ // Don't know how to compare pointer values that aren't messages!
+ // Maybe this should panic?
+ return false
+ }
+ bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type
+ return MessagesEqual(apm, bpm)
+
+ case reflect.Map:
+ return mapsEqual(arv, brv)
+
+ case reflect.Slice:
+ if arv.Type() == typeOfBytes {
+ return bytes.Equal(aval.([]byte), bval.([]byte))
+ } else {
+ return slicesEqual(arv, brv)
+ }
+
+ default:
+ return aval == bval
+ }
+ }
+}
+
+func slicesEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ for i := 0; i < a.Len(); i++ {
+ ai := a.Index(i)
+ bi := b.Index(i)
+ if !fieldsEqual(ai.Interface(), bi.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal
+// when one or both of the messages might be a dynamic message.
+func MessagesEqual(a, b proto.Message) bool {
+ da, aok := a.(*Message)
+ db, bok := b.(*Message)
+ // Both dynamic messages
+ if aok && bok {
+ return Equal(da, db)
+ }
+ // Neither dynamic messages
+ if !aok && !bok {
+ return proto.Equal(a, b)
+ }
+ // Mixed
+ if bok {
+ // we want a to be the dynamic one
+ b, da = a, db
+ }
+
+ // Instead of panic'ing below if we have a nil dynamic message, check
+ // now and return false if the input message is not also nil.
+ if da == nil {
+ return isNil(b)
+ }
+
+ md, err := desc.LoadMessageDescriptorForMessage(b)
+ if err != nil {
+ return false
+ }
+ db = NewMessageWithMessageFactory(md, da.mf)
+ if db.ConvertFrom(b) != nil {
+ return false
+ }
+ return Equal(da, db)
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension.go b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
new file mode 100644
index 0000000..1d38161
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
@@ -0,0 +1,46 @@
+package dynamic
+
+import (
+ "fmt"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/codec"
+ "github.com/jhump/protoreflect/desc"
+)
+
+// SetExtension sets the given extension value. If the given message is not a
+// dynamic message, the given extension may not be recognized (or may differ
+// from the compiled and linked in version of the extension. So in that case,
+// this function will serialize the given value to bytes and then use
+// proto.SetRawExtension to set the value.
+func SetExtension(msg proto.Message, extd *desc.FieldDescriptor, val interface{}) error {
+ if !extd.IsExtension() {
+ return fmt.Errorf("given field %s is not an extension", extd.GetFullyQualifiedName())
+ }
+
+ if dm, ok := msg.(*Message); ok {
+ return dm.TrySetField(extd, val)
+ }
+
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return err
+ }
+ if err := checkField(extd, md); err != nil {
+ return err
+ }
+
+ val, err = validFieldValue(extd, val)
+ if err != nil {
+ return err
+ }
+
+ var b codec.Buffer
+ b.SetDeterministic(defaultDeterminism)
+ if err := b.EncodeFieldValue(extd, val); err != nil {
+ return err
+ }
+ proto.SetRawExtension(msg, extd.GetNumber(), b.Bytes())
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
new file mode 100644
index 0000000..6876827
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
@@ -0,0 +1,241 @@
+package dynamic
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// ExtensionRegistry is a registry of known extension fields. This is used to parse
+// extension fields encountered when de-serializing a dynamic message.
+type ExtensionRegistry struct {
+ includeDefault bool
+ mu sync.RWMutex
+ exts map[string]map[int32]*desc.FieldDescriptor
+}
+
+// NewExtensionRegistryWithDefaults is a registry that includes all "default" extensions,
+// which are those that are statically linked into the current program (e.g. registered by
+// protoc-generated code via proto.RegisterExtension). Extensions explicitly added to the
+// registry will override any default extensions that are for the same extendee and have the
+// same tag number and/or name.
+func NewExtensionRegistryWithDefaults() *ExtensionRegistry {
+ return &ExtensionRegistry{includeDefault: true}
+}
+
+// AddExtensionDesc adds the given extensions to the registry.
+func (r *ExtensionRegistry) AddExtensionDesc(exts ...*proto.ExtensionDesc) error {
+ flds := make([]*desc.FieldDescriptor, len(exts))
+ for i, ext := range exts {
+ fd, err := desc.LoadFieldDescriptorForExtension(ext)
+ if err != nil {
+ return err
+ }
+ flds[i] = fd
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, fd := range flds {
+ r.putExtensionLocked(fd)
+ }
+ return nil
+}
+
+// AddExtension adds the given extensions to the registry. The given extensions
+// will overwrite any previously added extensions that are for the same extendee
+// message and same extension tag number.
+func (r *ExtensionRegistry) AddExtension(exts ...*desc.FieldDescriptor) error {
+ for _, ext := range exts {
+ if !ext.IsExtension() {
+ return fmt.Errorf("given field is not an extension: %s", ext.GetFullyQualifiedName())
+ }
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, ext := range exts {
+ r.putExtensionLocked(ext)
+ }
+ return nil
+}
+
+// AddExtensionsFromFile adds to the registry all extension fields defined in the given file descriptor.
+func (r *ExtensionRegistry) AddExtensionsFromFile(fd *desc.FileDescriptor) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.addExtensionsFromFileLocked(fd, false, nil)
+}
+
+// AddExtensionsFromFileRecursively adds to the registry all extension fields defined in the give file
+// descriptor and also recursively adds all extensions defined in that file's dependencies. This adds
+// extensions from the entire transitive closure for the given file.
+func (r *ExtensionRegistry) AddExtensionsFromFileRecursively(fd *desc.FileDescriptor) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ already := map[*desc.FileDescriptor]struct{}{}
+ r.addExtensionsFromFileLocked(fd, true, already)
+}
+
+func (r *ExtensionRegistry) addExtensionsFromFileLocked(fd *desc.FileDescriptor, recursive bool, alreadySeen map[*desc.FileDescriptor]struct{}) {
+ if _, ok := alreadySeen[fd]; ok {
+ return
+ }
+
+ if r.exts == nil {
+ r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+ }
+ for _, ext := range fd.GetExtensions() {
+ r.putExtensionLocked(ext)
+ }
+ for _, msg := range fd.GetMessageTypes() {
+ r.addExtensionsFromMessageLocked(msg)
+ }
+
+ if recursive {
+ alreadySeen[fd] = struct{}{}
+ for _, dep := range fd.GetDependencies() {
+ r.addExtensionsFromFileLocked(dep, recursive, alreadySeen)
+ }
+ }
+}
+
+func (r *ExtensionRegistry) addExtensionsFromMessageLocked(md *desc.MessageDescriptor) {
+ for _, ext := range md.GetNestedExtensions() {
+ r.putExtensionLocked(ext)
+ }
+ for _, msg := range md.GetNestedMessageTypes() {
+ r.addExtensionsFromMessageLocked(msg)
+ }
+}
+
+func (r *ExtensionRegistry) putExtensionLocked(fd *desc.FieldDescriptor) {
+ msgName := fd.GetOwner().GetFullyQualifiedName()
+ m := r.exts[msgName]
+ if m == nil {
+ m = map[int32]*desc.FieldDescriptor{}
+ r.exts[msgName] = m
+ }
+ m[fd.GetNumber()] = fd
+}
+
+// FindExtension queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and tag number. If no extension is known, nil is returned.
+func (r *ExtensionRegistry) FindExtension(messageName string, tagNumber int32) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ fd := r.exts[messageName][tagNumber]
+ if fd == nil && r.includeDefault {
+ ext := getDefaultExtensions(messageName)[tagNumber]
+ if ext != nil {
+ fd, _ = desc.LoadFieldDescriptorForExtension(ext)
+ }
+ }
+ return fd
+}
+
+// FindExtensionByName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and field name (must also be a fully-qualified extension name). If no extension is known, nil
+// is returned.
+func (r *ExtensionRegistry) FindExtensionByName(messageName string, fieldName string) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ for _, fd := range r.exts[messageName] {
+ if fd.GetFullyQualifiedName() == fieldName {
+ return fd
+ }
+ }
+ if r.includeDefault {
+ for _, ext := range getDefaultExtensions(messageName) {
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd.GetFullyQualifiedName() == fieldName {
+ return fd
+ }
+ }
+ }
+ return nil
+}
+
+// FindExtensionByJSONName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and JSON field name (must also be a fully-qualified name). If no extension is known, nil is returned.
+// The fully-qualified JSON name is the same as the extension's normal fully-qualified name except that the last
+// component uses the field's JSON name (if present).
+func (r *ExtensionRegistry) FindExtensionByJSONName(messageName string, fieldName string) *desc.FieldDescriptor {
+ if r == nil {
+ return nil
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ for _, fd := range r.exts[messageName] {
+ if fd.GetFullyQualifiedJSONName() == fieldName {
+ return fd
+ }
+ }
+ if r.includeDefault {
+ for _, ext := range getDefaultExtensions(messageName) {
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd.GetFullyQualifiedJSONName() == fieldName {
+ return fd
+ }
+ }
+ }
+ return nil
+}
+
+func getDefaultExtensions(messageName string) map[int32]*proto.ExtensionDesc {
+ t := proto.MessageType(messageName)
+ if t != nil {
+ msg := reflect.Zero(t).Interface().(proto.Message)
+ return proto.RegisteredExtensions(msg)
+ }
+ return nil
+}
+
+// AllExtensionsForType returns all known extension fields for the given extendee name (must be a
+// fully-qualified message name).
+func (r *ExtensionRegistry) AllExtensionsForType(messageName string) []*desc.FieldDescriptor {
+ if r == nil {
+ return []*desc.FieldDescriptor(nil)
+ }
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ flds := r.exts[messageName]
+ var ret []*desc.FieldDescriptor
+ if r.includeDefault {
+ exts := getDefaultExtensions(messageName)
+ if len(exts) > 0 || len(flds) > 0 {
+ ret = make([]*desc.FieldDescriptor, 0, len(exts)+len(flds))
+ }
+ for tag, ext := range exts {
+ if _, ok := flds[tag]; ok {
+ // skip default extension and use the one explicitly registered instead
+ continue
+ }
+ fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+ if fd != nil {
+ ret = append(ret, fd)
+ }
+ }
+ } else if len(flds) > 0 {
+ ret = make([]*desc.FieldDescriptor, 0, len(flds))
+ }
+
+ for _, ext := range flds {
+ ret = append(ret, ext)
+ }
+ return ret
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
new file mode 100644
index 0000000..6fca393
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
@@ -0,0 +1,310 @@
+// Package grpcdynamic provides a dynamic RPC stub. It can be used to invoke RPC
+// method where only method descriptors are known. The actual request and response
+// messages may be dynamic messages.
+package grpcdynamic
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/dynamic"
+)
+
+// Stub is an RPC client stub, used for dynamically dispatching RPCs to a server.
+type Stub struct {
+ channel Channel
+ mf *dynamic.MessageFactory
+}
+
+// Channel represents the operations necessary to issue RPCs via gRPC. The
+// *grpc.ClientConn type provides this interface and will typically the concrete
+// type used to construct Stubs. But the use of this interface allows
+// construction of stubs that use alternate concrete types as the transport for
+// RPC operations.
+type Channel = grpc.ClientConnInterface
+
+// NewStub creates a new RPC stub that uses the given channel for dispatching RPCs.
+func NewStub(channel Channel) Stub {
+ return NewStubWithMessageFactory(channel, nil)
+}
+
+// NewStubWithMessageFactory creates a new RPC stub that uses the given channel for
+// dispatching RPCs and the given MessageFactory for creating response messages.
+func NewStubWithMessageFactory(channel Channel, mf *dynamic.MessageFactory) Stub {
+ return Stub{channel: channel, mf: mf}
+}
+
+func requestMethod(md *desc.MethodDescriptor) string {
+ return fmt.Sprintf("/%s/%s", md.GetService().GetFullyQualifiedName(), md.GetName())
+}
+
+// InvokeRpc sends a unary RPC and returns the response. Use this for unary methods.
+func (s Stub) InvokeRpc(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (proto.Message, error) {
+ if method.IsClientStreaming() || method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpc is for unary methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ if err := checkMessageType(method.GetInputType(), request); err != nil {
+ return nil, err
+ }
+ resp := s.mf.NewMessage(method.GetOutputType())
+ if err := s.channel.Invoke(ctx, requestMethod(method), request, resp, opts...); err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+// InvokeRpcServerStream sends a unary RPC and returns the response stream. Use this for server-streaming methods.
+func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (*ServerStream, error) {
+ if method.IsClientStreaming() || !method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcServerStream is for server-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ if err := checkMessageType(method.GetInputType(), request); err != nil {
+ return nil, err
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ cancel()
+ return nil, err
+ } else {
+ err = cs.SendMsg(request)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ err = cs.CloseSend()
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+ go func() {
+ // when the new stream is finished, also cleanup the parent context
+ <-cs.Context().Done()
+ cancel()
+ }()
+ return &ServerStream{cs, method.GetOutputType(), s.mf}, nil
+ }
+}
+
+// InvokeRpcClientStream creates a new stream that is used to send request messages and, at the end,
+// receive the response message. Use this for client-streaming methods.
+func (s Stub) InvokeRpcClientStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*ClientStream, error) {
+ if !method.IsClientStreaming() || method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcClientStream is for client-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ ctx, cancel := context.WithCancel(ctx)
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ cancel()
+ return nil, err
+ } else {
+ go func() {
+ // when the new stream is finished, also cleanup the parent context
+ <-cs.Context().Done()
+ cancel()
+ }()
+ return &ClientStream{cs, method, s.mf, cancel}, nil
+ }
+}
+
+// InvokeRpcBidiStream creates a new stream that is used to both send request messages and receive response
+// messages. Use this for bidi-streaming methods.
+func (s Stub) InvokeRpcBidiStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*BidiStream, error) {
+ if !method.IsClientStreaming() || !method.IsServerStreaming() {
+ return nil, fmt.Errorf("InvokeRpcBidiStream is for bidi-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+ }
+ sd := grpc.StreamDesc{
+ StreamName: method.GetName(),
+ ServerStreams: method.IsServerStreaming(),
+ ClientStreams: method.IsClientStreaming(),
+ }
+ if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ return nil, err
+ } else {
+ return &BidiStream{cs, method.GetInputType(), method.GetOutputType(), s.mf}, nil
+ }
+}
+
+func methodType(md *desc.MethodDescriptor) string {
+ if md.IsClientStreaming() && md.IsServerStreaming() {
+ return "bidi-streaming"
+ } else if md.IsClientStreaming() {
+ return "client-streaming"
+ } else if md.IsServerStreaming() {
+ return "server-streaming"
+ } else {
+ return "unary"
+ }
+}
+
+func checkMessageType(md *desc.MessageDescriptor, msg proto.Message) error {
+ var typeName string
+ if dm, ok := msg.(*dynamic.Message); ok {
+ typeName = dm.GetMessageDescriptor().GetFullyQualifiedName()
+ } else {
+ typeName = proto.MessageName(msg)
+ }
+ if typeName != md.GetFullyQualifiedName() {
+ return fmt.Errorf("expecting message of type %s; got %s", md.GetFullyQualifiedName(), typeName)
+ }
+ return nil
+}
+
+// ServerStream represents a response stream from a server. Messages in the stream can be queried
+// as can header and trailer metadata sent by the server.
+type ServerStream struct {
+ stream grpc.ClientStream
+ respType *desc.MessageDescriptor
+ mf *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ServerStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ServerStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ServerStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *ServerStream) RecvMsg() (proto.Message, error) {
+ resp := s.mf.NewMessage(s.respType)
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ } else {
+ return resp, nil
+ }
+}
+
+// ClientStream represents a response stream from a client. Messages in the stream can be sent
+// and, when done, the unary server message and header and trailer metadata can be queried.
+type ClientStream struct {
+ stream grpc.ClientStream
+ method *desc.MethodDescriptor
+ mf *dynamic.MessageFactory
+ cancel context.CancelFunc
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ClientStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ClientStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ClientStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *ClientStream) SendMsg(m proto.Message) error {
+ if err := checkMessageType(s.method.GetInputType(), m); err != nil {
+ return err
+ }
+ return s.stream.SendMsg(m)
+}
+
+// CloseAndReceive closes the outgoing request stream and then blocks for the server's response.
+func (s *ClientStream) CloseAndReceive() (proto.Message, error) {
+ if err := s.stream.CloseSend(); err != nil {
+ return nil, err
+ }
+ resp := s.mf.NewMessage(s.method.GetOutputType())
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ }
+ // make sure we get EOF for a second message
+ if err := s.stream.RecvMsg(resp); err != io.EOF {
+ if err == nil {
+ s.cancel()
+ return nil, fmt.Errorf("client-streaming method %q returned more than one response message", s.method.GetFullyQualifiedName())
+ } else {
+ return nil, err
+ }
+ }
+ return resp, nil
+}
+
+// BidiStream represents a bi-directional stream for sending messages to and receiving
+// messages from a server. The header and trailer metadata sent by the server can also be
+// queried.
+type BidiStream struct {
+ stream grpc.ClientStream
+ reqType *desc.MessageDescriptor
+ respType *desc.MessageDescriptor
+ mf *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *BidiStream) Header() (metadata.MD, error) {
+ return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *BidiStream) Trailer() metadata.MD {
+ return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *BidiStream) Context() context.Context {
+ return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *BidiStream) SendMsg(m proto.Message) error {
+ if err := checkMessageType(s.reqType, m); err != nil {
+ return err
+ }
+ return s.stream.SendMsg(m)
+}
+
+// CloseSend indicates the request stream has ended. Invoke this after all request messages
+// are sent (even if there are zero such messages).
+func (s *BidiStream) CloseSend() error {
+ return s.stream.CloseSend()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *BidiStream) RecvMsg() (proto.Message, error) {
+ resp := s.mf.NewMessage(s.respType)
+ if err := s.stream.RecvMsg(resp); err != nil {
+ return nil, err
+ } else {
+ return resp, nil
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/indent.go b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
new file mode 100644
index 0000000..bd7fcaa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
@@ -0,0 +1,76 @@
+package dynamic
+
+import "bytes"
+
+type indentBuffer struct {
+ bytes.Buffer
+ indent string
+ indentCount int
+ comma bool
+}
+
+func (b *indentBuffer) start() error {
+ if b.indentCount >= 0 {
+ b.indentCount++
+ return b.newLine(false)
+ }
+ return nil
+}
+
+func (b *indentBuffer) sep() error {
+ if b.indentCount >= 0 {
+ _, err := b.WriteString(": ")
+ return err
+ } else {
+ return b.WriteByte(':')
+ }
+}
+
+func (b *indentBuffer) end() error {
+ if b.indentCount >= 0 {
+ b.indentCount--
+ return b.newLine(false)
+ }
+ return nil
+}
+
+func (b *indentBuffer) maybeNext(first *bool) error {
+ if *first {
+ *first = false
+ return nil
+ } else {
+ return b.next()
+ }
+}
+
+func (b *indentBuffer) next() error {
+ if b.indentCount >= 0 {
+ return b.newLine(b.comma)
+ } else if b.comma {
+ return b.WriteByte(',')
+ } else {
+ return b.WriteByte(' ')
+ }
+}
+
+func (b *indentBuffer) newLine(comma bool) error {
+ if comma {
+ err := b.WriteByte(',')
+ if err != nil {
+ return err
+ }
+ }
+
+ err := b.WriteByte('\n')
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < b.indentCount; i++ {
+ _, err := b.WriteString(b.indent)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
new file mode 100644
index 0000000..9081965
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -0,0 +1,1256 @@
+package dynamic
+
+// JSON marshalling and unmarshalling for dynamic messages
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/jsonpb"
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
+ // link in the well-known-types that have a special JSON format
+ _ "google.golang.org/protobuf/types/known/anypb"
+ _ "google.golang.org/protobuf/types/known/durationpb"
+ _ "google.golang.org/protobuf/types/known/emptypb"
+ _ "google.golang.org/protobuf/types/known/structpb"
+ _ "google.golang.org/protobuf/types/known/timestamppb"
+ _ "google.golang.org/protobuf/types/known/wrapperspb"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+var wellKnownTypeNames = map[string]struct{}{
+ "google.protobuf.Any": {},
+ "google.protobuf.Empty": {},
+ "google.protobuf.Duration": {},
+ "google.protobuf.Timestamp": {},
+ // struct.proto
+ "google.protobuf.Struct": {},
+ "google.protobuf.Value": {},
+ "google.protobuf.ListValue": {},
+ // wrappers.proto
+ "google.protobuf.DoubleValue": {},
+ "google.protobuf.FloatValue": {},
+ "google.protobuf.Int64Value": {},
+ "google.protobuf.UInt64Value": {},
+ "google.protobuf.Int32Value": {},
+ "google.protobuf.UInt32Value": {},
+ "google.protobuf.BoolValue": {},
+ "google.protobuf.StringValue": {},
+ "google.protobuf.BytesValue": {},
+}
+
+// MarshalJSON serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a compact form: no newlines, and spaces between fields and
+// between field identifiers and values are elided.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+// m.MarshalJSONPB(&jsonpb.Marshaler{})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSON() ([]byte, error) {
+ return m.MarshalJSONPB(&jsonpb.Marshaler{})
+}
+
+// MarshalJSONIndent serializes this message to bytes in JSON format, returning
+// an error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values. Indentation of two spaces is
+// used.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+// m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSONIndent() ([]byte, error) {
+ return m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "})
+}
+
+// MarshalJSONPB serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string. The given marshaler is used to convey options used during marshaling.
+//
+// If this message contains nested messages that are generated message types (as
+// opposed to dynamic messages), the given marshaler is used to marshal it.
+//
+// When marshaling any nested messages, any jsonpb.AnyResolver configured in the
+// given marshaler is augmented with knowledge of message types known to this
+// message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) MarshalJSONPB(opts *jsonpb.Marshaler) ([]byte, error) {
+ var b indentBuffer
+ b.indent = opts.Indent
+ if len(opts.Indent) == 0 {
+ b.indentCount = -1
+ }
+ b.comma = true
+ if err := m.marshalJSON(&b, opts); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+func (m *Message) marshalJSON(b *indentBuffer, opts *jsonpb.Marshaler) error {
+ if m == nil {
+ _, err := b.WriteString("null")
+ return err
+ }
+ if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+ newOpts := *opts
+ newOpts.AnyResolver = r
+ opts = &newOpts
+ }
+
+ if ok, err := marshalWellKnownType(m, b, opts); ok {
+ return err
+ }
+
+ err := b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ var tags []int
+ if opts.EmitDefaults {
+ tags = m.allKnownFieldTags()
+ } else {
+ tags = m.knownFieldTags()
+ }
+
+ first := true
+
+ for _, tag := range tags {
+ itag := int32(tag)
+ fd := m.FindFieldDescriptor(itag)
+
+ v, ok := m.values[itag]
+ if !ok {
+ if fd.GetOneOf() != nil {
+ // don't print defaults for fields in a oneof
+ continue
+ }
+ v = fd.GetDefaultValue()
+ }
+
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldJSON(b, fd, v, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func marshalWellKnownType(m *Message, b *indentBuffer, opts *jsonpb.Marshaler) (bool, error) {
+ fqn := m.md.GetFullyQualifiedName()
+ if _, ok := wellKnownTypeNames[fqn]; !ok {
+ return false, nil
+ }
+
+ msgType := proto.MessageType(fqn)
+ if msgType == nil {
+ // wtf?
+ panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+ }
+
+ // convert dynamic message to well-known type and let jsonpb marshal it
+ msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+ if err := m.MergeInto(msg); err != nil {
+ return true, err
+ }
+ return true, opts.Marshal(b, msg)
+}
+
+func marshalKnownFieldJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+ var jsonName string
+ if opts.OrigName {
+ jsonName = fd.GetName()
+ } else {
+ jsonName = fd.AsFieldDescriptorProto().GetJsonName()
+ if jsonName == "" {
+ jsonName = fd.GetName()
+ }
+ }
+ if fd.IsExtension() {
+ var scope string
+ switch parent := fd.GetParent().(type) {
+ case *desc.FileDescriptor:
+ scope = parent.GetPackage()
+ default:
+ scope = parent.GetFullyQualifiedName()
+ }
+ if scope == "" {
+ jsonName = fmt.Sprintf("[%s]", jsonName)
+ } else {
+ jsonName = fmt.Sprintf("[%s.%s]", scope, jsonName)
+ }
+ }
+ err := writeJsonString(b, jsonName)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+
+ if isNil(v) {
+ _, err := b.WriteString("null")
+ return err
+ }
+
+ if fd.IsMap() {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ md := fd.GetMessageType()
+ vfd := md.FindFieldByNumber(2)
+
+ mp := v.(map[interface{}]interface{})
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ first := true
+ for _, mk := range keys {
+ mv := mp[mk]
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+
+ err = marshalKnownFieldMapEntryJSON(b, mk, vfd, mv, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte('}')
+
+ } else if fd.IsRepeated() {
+ err = b.WriteByte('[')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ sl := v.([]interface{})
+ first := true
+ for _, slv := range sl {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldValueJSON(b, fd, slv, opts)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte(']')
+
+ } else {
+ return marshalKnownFieldValueJSON(b, fd, v, opts)
+ }
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+ return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+ vi := s[i]
+ vj := s[j]
+ switch reflect.TypeOf(vi).Kind() {
+ case reflect.Int32:
+ return vi.(int32) < vj.(int32)
+ case reflect.Int64:
+ return vi.(int64) < vj.(int64)
+ case reflect.Uint32:
+ return vi.(uint32) < vj.(uint32)
+ case reflect.Uint64:
+ return vi.(uint64) < vj.(uint64)
+ case reflect.String:
+ return vi.(string) < vj.(string)
+ case reflect.Bool:
+ return !vi.(bool) && vj.(bool)
+ default:
+ panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+ }
+}
+
+func (s sortable) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func isNil(v interface{}) bool {
+ if v == nil {
+ return true
+ }
+ rv := reflect.ValueOf(v)
+ return rv.Kind() == reflect.Ptr && rv.IsNil()
+}
+
+func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}, opts *jsonpb.Marshaler) error {
+ rk := reflect.ValueOf(mk)
+ var strkey string
+ switch rk.Kind() {
+ case reflect.Bool:
+ strkey = strconv.FormatBool(rk.Bool())
+ case reflect.Int32, reflect.Int64:
+ strkey = strconv.FormatInt(rk.Int(), 10)
+ case reflect.Uint32, reflect.Uint64:
+ strkey = strconv.FormatUint(rk.Uint(), 10)
+ case reflect.String:
+ strkey = rk.String()
+ default:
+ return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type())
+ }
+ err := writeJsonString(b, strkey)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ return marshalKnownFieldValueJSON(b, vfd, mv, opts)
+}
+
+func marshalKnownFieldValueJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Int64:
+ return writeJsonString(b, strconv.FormatInt(rv.Int(), 10))
+ case reflect.Int32:
+ ed := fd.GetEnumType()
+ if !opts.EnumsAsInts && ed != nil {
+ n := int32(rv.Int())
+ vd := ed.FindValueByNumber(n)
+ if vd == nil {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ } else {
+ return writeJsonString(b, vd.GetName())
+ }
+ } else {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ }
+ case reflect.Uint64:
+ return writeJsonString(b, strconv.FormatUint(rv.Uint(), 10))
+ case reflect.Uint32:
+ _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+ return err
+ case reflect.Float32, reflect.Float64:
+ f := rv.Float()
+ var str string
+ if math.IsNaN(f) {
+ str = `"NaN"`
+ } else if math.IsInf(f, 1) {
+ str = `"Infinity"`
+ } else if math.IsInf(f, -1) {
+ str = `"-Infinity"`
+ } else {
+ var bits int
+ if rv.Kind() == reflect.Float32 {
+ bits = 32
+ } else {
+ bits = 64
+ }
+ str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+ }
+ _, err := b.WriteString(str)
+ return err
+ case reflect.Bool:
+ _, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+ return err
+ case reflect.Slice:
+ bstr := base64.StdEncoding.EncodeToString(rv.Bytes())
+ return writeJsonString(b, bstr)
+ case reflect.String:
+ return writeJsonString(b, rv.String())
+ default:
+ // must be a message
+ if isNil(v) {
+ _, err := b.WriteString("null")
+ return err
+ }
+
+ if dm, ok := v.(*Message); ok {
+ return dm.marshalJSON(b, opts)
+ }
+
+ var err error
+ if b.indentCount <= 0 || len(b.indent) == 0 {
+ err = opts.Marshal(b, v.(proto.Message))
+ } else {
+ str, err := opts.MarshalToString(v.(proto.Message))
+ if err != nil {
+ return err
+ }
+ indent := strings.Repeat(b.indent, b.indentCount)
+ pos := 0
+ // add indention prefix to each line
+ for pos < len(str) {
+ start := pos
+ nextPos := strings.Index(str[pos:], "\n")
+ if nextPos == -1 {
+ nextPos = len(str)
+ } else {
+ nextPos = pos + nextPos + 1 // include newline
+ }
+ line := str[start:nextPos]
+ if pos > 0 {
+ _, err = b.WriteString(indent)
+ if err != nil {
+ return err
+ }
+ }
+ _, err = b.WriteString(line)
+ if err != nil {
+ return err
+ }
+ pos = nextPos
+ }
+ }
+ return err
+ }
+}
+
+func writeJsonString(b *indentBuffer, s string) error {
+ if sbytes, err := json.Marshal(s); err != nil {
+ return err
+ } else {
+ _, err := b.Write(sbytes)
+ return err
+ }
+}
+
+// UnmarshalJSON de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// This method is shorthand for invoking UnmarshalJSONPB with a default (zero
+// value) unmarshaler:
+//
+// m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+//
+// So unknown fields will result in an error, and no provided jsonpb.AnyResolver
+// will be used when parsing google.protobuf.Any messages.
+func (m *Message) UnmarshalJSON(js []byte) error {
+ return m.UnmarshalJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalMergeJSON de-serializes the message that is present, in JSON format,
+// in the given bytes into this message. Unlike UnmarshalJSON, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSON(js []byte) error {
+ return m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalJSONPB de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. The given unmarshaler conveys options used
+// when parsing the JSON. This function first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// The decoding is lenient:
+// 1. The JSON can refer to fields either by their JSON name or by their
+// declared name.
+// 2. The JSON can use either numeric values or string names for enum values.
+//
+// When instantiating nested messages, if this message's associated factory
+// returns a generated message type (as opposed to a dynamic message), the given
+// unmarshaler is used to unmarshal it.
+//
+// When unmarshaling any nested messages, any jsonpb.AnyResolver configured in
+// the given unmarshaler is augmented with knowledge of message types known to
+// this message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) UnmarshalJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMergeJSONPB(opts, js); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMergeJSONPB de-serializes the message that is present, in JSON
+// format, in the given bytes into this message. The given unmarshaler conveys
+// options used when parsing the JSON. Unlike UnmarshalJSONPB, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+ r := newJsReader(js)
+ err := m.unmarshalJson(r, opts)
+ if err != nil {
+ return err
+ }
+ if t, err := r.poll(); err != io.EOF {
+ b, _ := ioutil.ReadAll(r.unread())
+ s := fmt.Sprintf("%v%s", t, string(b))
+ return fmt.Errorf("superfluous data found after JSON object: %q", s)
+ }
+ return nil
+}
+
+func unmarshalWellKnownType(m *Message, r *jsReader, opts *jsonpb.Unmarshaler) (bool, error) {
+ fqn := m.md.GetFullyQualifiedName()
+ if _, ok := wellKnownTypeNames[fqn]; !ok {
+ return false, nil
+ }
+
+ msgType := proto.MessageType(fqn)
+ if msgType == nil {
+ // wtf?
+ panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+ }
+
+ // extract json value from r
+ var js json.RawMessage
+ if err := json.NewDecoder(r.unread()).Decode(&js); err != nil {
+ return true, err
+ }
+ if err := r.skip(); err != nil {
+ return true, err
+ }
+
+ // unmarshal into well-known type and then convert to dynamic message
+ msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+ if err := opts.Unmarshal(bytes.NewReader(js), msg); err != nil {
+ return true, err
+ }
+ return true, m.MergeFrom(msg)
+}
+
+func (m *Message) unmarshalJson(r *jsReader, opts *jsonpb.Unmarshaler) error {
+ if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+ newOpts := *opts
+ newOpts.AnyResolver = r
+ opts = &newOpts
+ }
+
+ if ok, err := unmarshalWellKnownType(m, r, opts); ok {
+ return err
+ }
+
+ t, err := r.peek()
+ if err != nil {
+ return err
+ }
+ if t == nil {
+ // if json is simply "null" we do nothing
+ r.poll()
+ return nil
+ }
+
+ if err := r.beginObject(); err != nil {
+ return err
+ }
+
+ for r.hasNext() {
+ f, err := r.nextObjectKey()
+ if err != nil {
+ return err
+ }
+ fd := m.FindFieldDescriptorByJSONName(f)
+ if fd == nil {
+ if opts.AllowUnknownFields {
+ r.skip()
+ continue
+ }
+ return fmt.Errorf("message type %s has no known field named %s", m.md.GetFullyQualifiedName(), f)
+ }
+ v, err := unmarshalJsField(fd, r, m.mf, opts)
+ if err != nil {
+ return err
+ }
+ if v != nil {
+ if err := mergeField(m, fd, v); err != nil {
+ return err
+ }
+ } else if fd.GetOneOf() != nil {
+ // preserve explicit null for oneof fields (this is a little odd but
+ // mimics the behavior of jsonpb with oneofs in generated message types)
+ if fd.GetMessageType() != nil {
+ typ := m.mf.GetKnownTypeRegistry().GetKnownType(fd.GetMessageType().GetFullyQualifiedName())
+ if typ != nil {
+ // typed nil
+ if typ.Kind() != reflect.Ptr {
+ typ = reflect.PtrTo(typ)
+ }
+ v = reflect.Zero(typ).Interface()
+ } else {
+ // can't use nil dynamic message, so we just use empty one instead
+ v = m.mf.NewDynamicMessage(fd.GetMessageType())
+ }
+ if err := m.setField(fd, v); err != nil {
+ return err
+ }
+ } else {
+ // not a message... explicit null makes no sense
+ return fmt.Errorf("message type %s cannot set field %s to null: it is not a message type", m.md.GetFullyQualifiedName(), f)
+ }
+ } else {
+ m.clearField(fd)
+ }
+ }
+
+ if err := r.endObject(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func isWellKnownValue(fd *desc.FieldDescriptor) bool {
+ return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
+ fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
+}
+
+func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
+ // we look for ListValue; but we also look for Value, which can be assigned a ListValue
+ return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
+ (fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" ||
+ fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value")
+}
+
+func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+ t, err := r.peek()
+ if err != nil {
+ return nil, err
+ }
+ if t == nil && !isWellKnownValue(fd) {
+ // if value is null, just return nil
+ // (unless field is google.protobuf.Value, in which case
+ // we fall through to parse it as an instance where its
+ // underlying value is set to a NullValue)
+ r.poll()
+ return nil, nil
+ }
+
+ if t == json.Delim('{') && fd.IsMap() {
+ entryType := fd.GetMessageType()
+ keyType := entryType.FindFieldByNumber(1)
+ valueType := entryType.FindFieldByNumber(2)
+ mp := map[interface{}]interface{}{}
+
+ // TODO: if there are just two map keys "key" and "value" and they have the right type of values,
+ // treat this JSON object as a single map entry message. (In keeping with support of map fields as
+ // if they were normal repeated field of entry messages as well as supporting a transition from
+ // optional to repeated...)
+
+ if err := r.beginObject(); err != nil {
+ return nil, err
+ }
+ for r.hasNext() {
+ kk, err := unmarshalJsFieldElement(keyType, r, mf, opts, false)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := unmarshalJsFieldElement(valueType, r, mf, opts, true)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ }
+ if err := r.endObject(); err != nil {
+ return nil, err
+ }
+
+ return mp, nil
+ } else if t == json.Delim('[') && !isWellKnownListValue(fd) {
+ // We support parsing an array, even if field is not repeated, to mimic support in proto
+ // binary wire format that supports changing an optional field to repeated and vice versa.
+ // If the field is not repeated, we only keep the last value in the array.
+
+ if err := r.beginArray(); err != nil {
+ return nil, err
+ }
+ var sl []interface{}
+ var v interface{}
+ for r.hasNext() {
+ var err error
+ v, err = unmarshalJsFieldElement(fd, r, mf, opts, false)
+ if err != nil {
+ return nil, err
+ }
+ if fd.IsRepeated() && v != nil {
+ sl = append(sl, v)
+ }
+ }
+ if err := r.endArray(); err != nil {
+ return nil, err
+ }
+ if fd.IsMap() {
+ mp := map[interface{}]interface{}{}
+ for _, m := range sl {
+ msg := m.(*Message)
+ kk, err := msg.TryGetFieldByNumber(1)
+ if err != nil {
+ return nil, err
+ }
+ vv, err := msg.TryGetFieldByNumber(2)
+ if err != nil {
+ return nil, err
+ }
+ mp[kk] = vv
+ }
+ return mp, nil
+ } else if fd.IsRepeated() {
+ return sl, nil
+ } else {
+ return v, nil
+ }
+ } else {
+ // We support parsing a singular value, even if field is repeated, to mimic support in proto
+ // binary wire format that supports changing an optional field to repeated and vice versa.
+ // If the field is repeated, we store value as singleton slice of that one value.
+
+ v, err := unmarshalJsFieldElement(fd, r, mf, opts, false)
+ if err != nil {
+ return nil, err
+ }
+ if v == nil {
+ return nil, nil
+ }
+ if fd.IsRepeated() {
+ return []interface{}{v}, nil
+ } else {
+ return v, nil
+ }
+ }
+}
+
+func unmarshalJsFieldElement(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler, allowNilMessage bool) (interface{}, error) {
+ t, err := r.peek()
+ if err != nil {
+ return nil, err
+ }
+
+ switch fd.GetType() {
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptorpb.FieldDescriptorProto_TYPE_GROUP:
+
+ if t == nil && allowNilMessage {
+ // if json is simply "null" return a nil pointer
+ r.poll()
+ return nilMessage(fd.GetMessageType()), nil
+ }
+
+ m := mf.NewMessage(fd.GetMessageType())
+ if dm, ok := m.(*Message); ok {
+ if err := dm.unmarshalJson(r, opts); err != nil {
+ return nil, err
+ }
+ } else {
+ var msg json.RawMessage
+ if err := json.NewDecoder(r.unread()).Decode(&msg); err != nil {
+ return nil, err
+ }
+ if err := r.skip(); err != nil {
+ return nil, err
+ }
+ if err := opts.Unmarshal(bytes.NewReader([]byte(msg)), m); err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
+ if e, err := r.nextNumber(); err != nil {
+ return nil, err
+ } else {
+ // value could be string or number
+ if i, err := e.Int64(); err != nil {
+ // number cannot be parsed, so see if it's an enum value name
+ vd := fd.GetEnumType().FindValueByName(string(e))
+ if vd != nil {
+ return vd.GetNumber(), nil
+ } else {
+ return nil, fmt.Errorf("enum %q does not have value named %q", fd.GetEnumType().GetFullyQualifiedName(), e)
+ }
+ } else if i > math.MaxInt32 || i < math.MinInt32 {
+ return nil, NumericOverflowError
+ } else {
+ return int32(i), err
+ }
+ }
+
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
+ if i, err := r.nextInt(); err != nil {
+ return nil, err
+ } else if i > math.MaxInt32 || i < math.MinInt32 {
+ return nil, NumericOverflowError
+ } else {
+ return int32(i), err
+ }
+
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
+ return r.nextInt()
+
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
+ if i, err := r.nextUint(); err != nil {
+ return nil, err
+ } else if i > math.MaxUint32 {
+ return nil, NumericOverflowError
+ } else {
+ return uint32(i), err
+ }
+
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
+ return r.nextUint()
+
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ if str, ok := t.(string); ok {
+ if str == "true" {
+ r.poll() // consume token
+ return true, err
+ } else if str == "false" {
+ r.poll() // consume token
+ return false, err
+ }
+ }
+ return r.nextBool()
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ if f, err := r.nextFloat(); err != nil {
+ return nil, err
+ } else {
+ return float32(f), nil
+ }
+
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ return r.nextFloat()
+
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ return r.nextBytes()
+
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ return r.nextString()
+
+ default:
+ return nil, fmt.Errorf("unknown field type: %v", fd.GetType())
+ }
+}
+
+type jsReader struct {
+ reader *bytes.Reader
+ dec *json.Decoder
+ current json.Token
+ peeked bool
+}
+
+func newJsReader(b []byte) *jsReader {
+ reader := bytes.NewReader(b)
+ dec := json.NewDecoder(reader)
+ dec.UseNumber()
+ return &jsReader{reader: reader, dec: dec}
+}
+
+func (r *jsReader) unread() io.Reader {
+ bufs := make([]io.Reader, 3)
+ var peeked []byte
+ if r.peeked {
+ if _, ok := r.current.(json.Delim); ok {
+ peeked = []byte(fmt.Sprintf("%v", r.current))
+ } else {
+ peeked, _ = json.Marshal(r.current)
+ }
+ }
+ readerCopy := *r.reader
+ decCopy := *r.dec
+
+ bufs[0] = bytes.NewReader(peeked)
+ bufs[1] = decCopy.Buffered()
+ bufs[2] = &readerCopy
+ return &concatReader{bufs: bufs}
+}
+
+func (r *jsReader) hasNext() bool {
+ return r.dec.More()
+}
+
+func (r *jsReader) peek() (json.Token, error) {
+ if r.peeked {
+ return r.current, nil
+ }
+ t, err := r.dec.Token()
+ if err != nil {
+ return nil, err
+ }
+ r.peeked = true
+ r.current = t
+ return t, nil
+}
+
+func (r *jsReader) poll() (json.Token, error) {
+ if r.peeked {
+ ret := r.current
+ r.current = nil
+ r.peeked = false
+ return ret, nil
+ }
+ return r.dec.Token()
+}
+
+func (r *jsReader) beginObject() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('{') }, nil, "start of JSON object: '{'")
+ return err
+}
+
+func (r *jsReader) endObject() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('}') }, nil, "end of JSON object: '}'")
+ return err
+}
+
+func (r *jsReader) beginArray() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim('[') }, nil, "start of array: '['")
+ return err
+}
+
+func (r *jsReader) endArray() error {
+ _, err := r.expect(func(t json.Token) bool { return t == json.Delim(']') }, nil, "end of array: ']'")
+ return err
+}
+
+func (r *jsReader) nextObjectKey() (string, error) {
+ return r.nextString()
+}
+
+func (r *jsReader) nextString() (string, error) {
+ t, err := r.expect(func(t json.Token) bool { _, ok := t.(string); return ok }, "", "string")
+ if err != nil {
+ return "", err
+ }
+ return t.(string), nil
+}
+
+func (r *jsReader) nextBytes() ([]byte, error) {
+ str, err := r.nextString()
+ if err != nil {
+ return nil, err
+ }
+ return base64.StdEncoding.DecodeString(str)
+}
+
+func (r *jsReader) nextBool() (bool, error) {
+ t, err := r.expect(func(t json.Token) bool { _, ok := t.(bool); return ok }, false, "boolean")
+ if err != nil {
+ return false, err
+ }
+ return t.(bool), nil
+}
+
+func (r *jsReader) nextInt() (int64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return n.Int64()
+}
+
+func (r *jsReader) nextUint() (uint64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(string(n), 10, 64)
+}
+
+func (r *jsReader) nextFloat() (float64, error) {
+ n, err := r.nextNumber()
+ if err != nil {
+ return 0, err
+ }
+ return n.Float64()
+}
+
+func (r *jsReader) nextNumber() (json.Number, error) {
+ t, err := r.expect(func(t json.Token) bool { return reflect.TypeOf(t).Kind() == reflect.String }, "0", "number")
+ if err != nil {
+ return "", err
+ }
+ switch t := t.(type) {
+ case json.Number:
+ return t, nil
+ case string:
+ return json.Number(t), nil
+ }
+ return "", fmt.Errorf("expecting a number but got %v", t)
+}
+
+func (r *jsReader) skip() error {
+ t, err := r.poll()
+ if err != nil {
+ return err
+ }
+ if t == json.Delim('[') {
+ if err := r.skipArray(); err != nil {
+ return err
+ }
+ } else if t == json.Delim('{') {
+ if err := r.skipObject(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *jsReader) skipArray() error {
+ for r.hasNext() {
+ if err := r.skip(); err != nil {
+ return err
+ }
+ }
+ if err := r.endArray(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *jsReader) skipObject() error {
+ for r.hasNext() {
+ // skip object key
+ if err := r.skip(); err != nil {
+ return err
+ }
+ // and value
+ if err := r.skip(); err != nil {
+ return err
+ }
+ }
+ if err := r.endObject(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *jsReader) expect(predicate func(json.Token) bool, ifNil interface{}, expected string) (interface{}, error) {
+ t, err := r.poll()
+ if err != nil {
+ return nil, err
+ }
+ if t == nil && ifNil != nil {
+ return ifNil, nil
+ }
+ if !predicate(t) {
+ return t, fmt.Errorf("bad input: expecting %s ; instead got %v", expected, t)
+ }
+ return t, nil
+}
+
+type concatReader struct {
+ bufs []io.Reader
+ curr int
+}
+
+func (r *concatReader) Read(p []byte) (n int, err error) {
+ for {
+ if r.curr >= len(r.bufs) {
+ err = io.EOF
+ return
+ }
+ var c int
+ c, err = r.bufs[r.curr].Read(p)
+ n += c
+ if err != io.EOF {
+ return
+ }
+ r.curr++
+ p = p[c:]
+ }
+}
+
+// AnyResolver returns a jsonpb.AnyResolver that uses the given file descriptors
+// to resolve message names. It uses the given factory, which may be nil, to
+// instantiate messages. The messages that it returns when resolving a type name
+// may often be dynamic messages.
+func AnyResolver(mf *MessageFactory, files ...*desc.FileDescriptor) jsonpb.AnyResolver {
+ return &anyResolver{mf: mf, files: files}
+}
+
+type anyResolver struct {
+ mf *MessageFactory
+ files []*desc.FileDescriptor
+ ignored map[*desc.FileDescriptor]struct{}
+ other jsonpb.AnyResolver
+}
+
+func wrapResolver(r jsonpb.AnyResolver, mf *MessageFactory, f *desc.FileDescriptor) (jsonpb.AnyResolver, bool) {
+ if r, ok := r.(*anyResolver); ok {
+ if _, ok := r.ignored[f]; ok {
+ // if the current resolver is ignoring this file, it's because another
+ // (upstream) resolver is already handling it, so nothing to do
+ return r, false
+ }
+ for _, file := range r.files {
+ if file == f {
+ // no need to wrap!
+ return r, false
+ }
+ }
+ // ignore files that will be checked by the resolver we're wrapping
+ // (we'll just delegate and let it search those files)
+ ignored := map[*desc.FileDescriptor]struct{}{}
+ for i := range r.ignored {
+ ignored[i] = struct{}{}
+ }
+ ignore(r.files, ignored)
+ return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, ignored: ignored, other: r}, true
+ }
+ return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, other: r}, true
+}
+
+func ignore(files []*desc.FileDescriptor, ignored map[*desc.FileDescriptor]struct{}) {
+ for _, f := range files {
+ if _, ok := ignored[f]; ok {
+ continue
+ }
+ ignored[f] = struct{}{}
+ ignore(f.GetDependencies(), ignored)
+ }
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+
+ // see if the user-specified resolver is able to do the job
+ if r.other != nil {
+ msg, err := r.other.Resolve(typeUrl)
+ if err == nil {
+ return msg, nil
+ }
+ }
+
+ // try to find the message in our known set of files
+ checked := map[*desc.FileDescriptor]struct{}{}
+ for _, f := range r.files {
+ md := r.findMessage(f, mname, checked)
+ if md != nil {
+ return r.mf.NewMessage(md), nil
+ }
+ }
+ // failing that, see if the message factory knows about this type
+ var ktr *KnownTypeRegistry
+ if r.mf != nil {
+ ktr = r.mf.ktr
+ } else {
+ ktr = (*KnownTypeRegistry)(nil)
+ }
+ m := ktr.CreateIfKnown(mname)
+ if m != nil {
+ return m, nil
+ }
+
+ // no other resolver to fallback to? mimic default behavior
+ mt := proto.MessageType(mname)
+ if mt == nil {
+ return nil, fmt.Errorf("unknown message type %q", mname)
+ }
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+func (r *anyResolver) findMessage(fd *desc.FileDescriptor, msgName string, checked map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+ // if this is an ignored descriptor, skip
+ if _, ok := r.ignored[fd]; ok {
+ return nil
+ }
+
+ // bail if we've already checked this file
+ if _, ok := checked[fd]; ok {
+ return nil
+ }
+ checked[fd] = struct{}{}
+
+ // see if this file has the message
+ md := fd.FindMessage(msgName)
+ if md != nil {
+ return md
+ }
+
+ // if not, recursively search the file's imports
+ for _, dep := range fd.GetDependencies() {
+ md = r.findMessage(dep, msgName, checked)
+ if md != nil {
+ return md
+ }
+ }
+ return nil
+}
+
+var _ jsonpb.AnyResolver = (*anyResolver)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
new file mode 100644
index 0000000..69969fc
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -0,0 +1,131 @@
+//go:build !go1.12
+// +build !go1.12
+
+package dynamic
+
+import (
+ "reflect"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
+// iterate a map. (We can be more efficient in Go 1.12 and up...)
+
+func mapsEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ if a.Len() == 0 && b.Len() == 0 {
+ // Optimize the case where maps are frequently empty because MapKeys()
+ // function allocates heavily.
+ return true
+ }
+
+ for _, k := range a.MapKeys() {
+ av := a.MapIndex(k)
+ bv := b.MapIndex(k)
+ if !bv.IsValid() {
+ return false
+ }
+ if !fieldsEqual(av.Interface(), bv.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ // make a defensive copy while we check the contents
+ // (also converts to map[interface{}]interface{} if it's some other type)
+ keyField := fd.GetMessageType().GetFields()[0]
+ valField := fd.GetMessageType().GetFields()[1]
+ m := map[interface{}]interface{}{}
+ for _, k := range val.MapKeys() {
+ if k.Kind() == reflect.Interface {
+ // unwrap it
+ k = reflect.ValueOf(k.Interface())
+ }
+ kk, err := validElementFieldValueForRv(keyField, k, false)
+ if err != nil {
+ return nil, err
+ }
+ v := val.MapIndex(k)
+ if v.Kind() == reflect.Interface {
+ // unwrap it
+ v = reflect.ValueOf(v.Interface())
+ }
+ vv, err := validElementFieldValueForRv(valField, v, true)
+ if err != nil {
+ return nil, err
+ }
+ m[kk] = vv
+ }
+ return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+ kt := target.Key()
+ vt := target.Elem()
+ for _, k := range src.MapKeys() {
+ if !canConvert(k, kt) {
+ return false
+ }
+ if !canConvert(src.MapIndex(k), vt) {
+ return false
+ }
+ }
+ return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type, deterministic bool) error {
+ tkt := targetType.Key()
+ tvt := targetType.Elem()
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ skt := k.Type()
+ svt := v.Type()
+ var nk, nv reflect.Value
+ if tkt == skt {
+ nk = k
+ } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+ nk = k.Addr()
+ } else {
+ nk = reflect.New(tkt).Elem()
+ if err := mergeVal(k, nk, deterministic); err != nil {
+ return err
+ }
+ }
+ if tvt == svt {
+ nv = v
+ } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+ nv = v.Addr()
+ } else {
+ nv = reflect.New(tvt).Elem()
+ if err := mergeVal(v, nv, deterministic); err != nil {
+ return err
+ }
+ }
+ if target.IsNil() {
+ target.Set(reflect.MakeMap(targetType))
+ }
+ target.SetMapIndex(nk, nv)
+ }
+ return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+ for _, k := range rv.MapKeys() {
+ if k.Kind() == reflect.Interface && !k.IsNil() {
+ k = k.Elem()
+ }
+ v := rv.MapIndex(k)
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
new file mode 100644
index 0000000..fb353cf
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -0,0 +1,139 @@
+//go:build go1.12
+// +build go1.12
+
+package dynamic
+
+import (
+ "reflect"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
+// over maps more efficiently than using reflect.Value.MapKeys.
+
+func mapsEqual(a, b reflect.Value) bool {
+ if a.Len() != b.Len() {
+ return false
+ }
+ if a.Len() == 0 && b.Len() == 0 {
+ // Optimize the case where maps are frequently empty
+ return true
+ }
+
+ iter := a.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ av := iter.Value()
+ bv := b.MapIndex(k)
+ if !bv.IsValid() {
+ return false
+ }
+ if !fieldsEqual(av.Interface(), bv.Interface()) {
+ return false
+ }
+ }
+ return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+ // make a defensive copy while we check the contents
+ // (also converts to map[interface{}]interface{} if it's some other type)
+ keyField := fd.GetMessageType().GetFields()[0]
+ valField := fd.GetMessageType().GetFields()[1]
+ m := map[interface{}]interface{}{}
+ iter := val.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ if k.Kind() == reflect.Interface {
+ // unwrap it
+ k = reflect.ValueOf(k.Interface())
+ }
+ kk, err := validElementFieldValueForRv(keyField, k, false)
+ if err != nil {
+ return nil, err
+ }
+ v := iter.Value()
+ if v.Kind() == reflect.Interface {
+ // unwrap it
+ v = reflect.ValueOf(v.Interface())
+ }
+ vv, err := validElementFieldValueForRv(valField, v, true)
+ if err != nil {
+ return nil, err
+ }
+ m[kk] = vv
+ }
+ return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+ kt := target.Key()
+ vt := target.Elem()
+ iter := src.MapRange()
+ for iter.Next() {
+ if !canConvert(iter.Key(), kt) {
+ return false
+ }
+ if !canConvert(iter.Value(), vt) {
+ return false
+ }
+ }
+ return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type, deterministic bool) error {
+ tkt := targetType.Key()
+ tvt := targetType.Elem()
+ iter := src.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ skt := k.Type()
+ svt := v.Type()
+ var nk, nv reflect.Value
+ if tkt == skt {
+ nk = k
+ } else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+ nk = k.Addr()
+ } else {
+ nk = reflect.New(tkt).Elem()
+ if err := mergeVal(k, nk, deterministic); err != nil {
+ return err
+ }
+ }
+ if tvt == svt {
+ nv = v
+ } else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+ nv = v.Addr()
+ } else {
+ nv = reflect.New(tvt).Elem()
+ if err := mergeVal(v, nv, deterministic); err != nil {
+ return err
+ }
+ }
+ if target.IsNil() {
+ target.Set(reflect.MakeMap(targetType))
+ }
+ target.SetMapIndex(nk, nv)
+ }
+ return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+ iter := rv.MapRange()
+ for iter.Next() {
+ k := iter.Key()
+ v := iter.Value()
+ if k.Kind() == reflect.Interface && !k.IsNil() {
+ k = k.Elem()
+ }
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/merge.go b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
new file mode 100644
index 0000000..ce727fd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
@@ -0,0 +1,100 @@
+package dynamic
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// Merge merges the given source message into the given destination message. Use
+// use this instead of proto.Merge when one or both of the messages might be a
+// a dynamic message. If there is a problem merging the messages, such as the
+// two messages having different types, then this method will panic (just as
+// proto.Merges does).
+func Merge(dst, src proto.Message) {
+ if dm, ok := dst.(*Message); ok {
+ if err := dm.MergeFrom(src); err != nil {
+ panic(err.Error())
+ }
+ } else if dm, ok := src.(*Message); ok {
+ if err := dm.MergeInto(dst); err != nil {
+ panic(err.Error())
+ }
+ } else {
+ proto.Merge(dst, src)
+ }
+}
+
+// TryMerge merges the given source message into the given destination message.
+// You can use this instead of proto.Merge when one or both of the messages
+// might be a dynamic message. Unlike proto.Merge, this method will return an
+// error on failure instead of panic'ing.
+func TryMerge(dst, src proto.Message) error {
+ if dm, ok := dst.(*Message); ok {
+ if err := dm.MergeFrom(src); err != nil {
+ return err
+ }
+ } else if dm, ok := src.(*Message); ok {
+ if err := dm.MergeInto(dst); err != nil {
+ return err
+ }
+ } else {
+ // proto.Merge panics on bad input, so we first verify
+ // inputs and return error instead of panic
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ return errors.New("proto: nil destination")
+ }
+ in := reflect.ValueOf(src)
+ if in.Type() != out.Type() {
+ return errors.New("proto: type mismatch")
+ }
+ proto.Merge(dst, src)
+ }
+ return nil
+}
+
+func mergeField(m *Message, fd *desc.FieldDescriptor, val interface{}) error {
+ rv := reflect.ValueOf(val)
+
+ if fd.IsMap() && rv.Kind() == reflect.Map {
+ return mergeMapField(m, fd, rv)
+ }
+
+ if fd.IsRepeated() && rv.Kind() == reflect.Slice && rv.Type() != typeOfBytes {
+ for i := 0; i < rv.Len(); i++ {
+ e := rv.Index(i)
+ if e.Kind() == reflect.Interface && !e.IsNil() {
+ e = e.Elem()
+ }
+ if err := m.addRepeatedField(fd, e.Interface()); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ if fd.IsRepeated() {
+ return m.addRepeatedField(fd, val)
+ } else if fd.GetMessageType() == nil {
+ return m.setField(fd, val)
+ }
+
+ // it's a message type, so we want to merge contents
+ var err error
+ if val, err = validFieldValue(fd, val); err != nil {
+ return err
+ }
+
+ existing, _ := m.doGetField(fd, true)
+ if existing != nil && !reflect.ValueOf(existing).IsNil() {
+ return TryMerge(existing.(proto.Message), val.(proto.Message))
+ }
+
+ // no existing message, so just set field
+ m.internalSetField(fd, val)
+ return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
new file mode 100644
index 0000000..683e7b3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -0,0 +1,207 @@
+package dynamic
+
+import (
+ "reflect"
+ "sync"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// MessageFactory can be used to create new empty message objects. A default instance
+// (without extension registry or known-type registry specified) will always return
+// dynamic messages (e.g. type will be *dynamic.Message) except for "well-known" types.
+// The well-known types include primitive wrapper types and a handful of other special
+// types defined in standard protobuf definitions, like Any, Duration, and Timestamp.
+type MessageFactory struct {
+ er *ExtensionRegistry
+ ktr *KnownTypeRegistry
+}
+
+// NewMessageFactoryWithExtensionRegistry creates a new message factory where any
+// dynamic messages produced will use the given extension registry to recognize and
+// parse extension fields.
+func NewMessageFactoryWithExtensionRegistry(er *ExtensionRegistry) *MessageFactory {
+ return NewMessageFactoryWithRegistries(er, nil)
+}
+
+// NewMessageFactoryWithKnownTypeRegistry creates a new message factory where the
+// known types, per the given registry, will be returned as normal protobuf messages
+// (e.g. generated structs, instead of dynamic messages).
+func NewMessageFactoryWithKnownTypeRegistry(ktr *KnownTypeRegistry) *MessageFactory {
+ return NewMessageFactoryWithRegistries(nil, ktr)
+}
+
+// NewMessageFactoryWithDefaults creates a new message factory where all "default" types
+// (those for which protoc-generated code is statically linked into the Go program) are
+// known types. If any dynamic messages are produced, they will recognize and parse all
+// "default" extension fields. This is the equivalent of:
+//
+// NewMessageFactoryWithRegistries(
+// NewExtensionRegistryWithDefaults(),
+// NewKnownTypeRegistryWithDefaults())
+func NewMessageFactoryWithDefaults() *MessageFactory {
+ return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
+}
+
+// NewMessageFactoryWithRegistries creates a new message factory with the given extension
+// and known type registries.
+func NewMessageFactoryWithRegistries(er *ExtensionRegistry, ktr *KnownTypeRegistry) *MessageFactory {
+ return &MessageFactory{
+ er: er,
+ ktr: ktr,
+ }
+}
+
+// NewMessage creates a new empty message that corresponds to the given descriptor.
+// If the given descriptor describes a "known type" then that type is instantiated.
+// Otherwise, an empty dynamic message is returned.
+func (f *MessageFactory) NewMessage(md *desc.MessageDescriptor) proto.Message {
+ var ktr *KnownTypeRegistry
+ if f != nil {
+ ktr = f.ktr
+ }
+ if m := ktr.CreateIfKnown(md.GetFullyQualifiedName()); m != nil {
+ return m
+ }
+ return NewMessageWithMessageFactory(md, f)
+}
+
+// NewDynamicMessage creates a new empty dynamic message that corresponds to the given
+// descriptor. This is like f.NewMessage(md) except the known type registry is not
+// consulted so the return value is always a dynamic message.
+//
+// This is also like dynamic.NewMessage(md) except that the returned message will use
+// this factory when creating other messages, like during de-serialization of fields
+// that are themselves message types.
+func (f *MessageFactory) NewDynamicMessage(md *desc.MessageDescriptor) *Message {
+ return NewMessageWithMessageFactory(md, f)
+}
+
+// GetKnownTypeRegistry returns the known type registry that this factory uses to
+// instantiate known (e.g. generated) message types.
+func (f *MessageFactory) GetKnownTypeRegistry() *KnownTypeRegistry {
+ if f == nil {
+ return nil
+ }
+ return f.ktr
+}
+
+// GetExtensionRegistry returns the extension registry that this factory uses to
+// create dynamic messages. The registry is used by dynamic messages to recognize
+// and parse extension fields during de-serialization.
+func (f *MessageFactory) GetExtensionRegistry() *ExtensionRegistry {
+ if f == nil {
+ return nil
+ }
+ return f.er
+}
+
+type wkt interface {
+ XXX_WellKnownType() string
+}
+
+var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem()
+
+// KnownTypeRegistry is a registry of known message types, as identified by their
+// fully-qualified name. A known message type is one for which a protoc-generated
+// struct exists, so a dynamic message is not necessary to represent it. A
+// MessageFactory uses a KnownTypeRegistry to decide whether to create a generated
+// struct or a dynamic message. The zero-value registry (including the behavior of
+// a nil pointer) only knows about the "well-known types" in protobuf. These
+// include only the wrapper types and a handful of other special types like Any,
+// Duration, and Timestamp.
+type KnownTypeRegistry struct {
+ excludeWkt bool
+ includeDefault bool
+ mu sync.RWMutex
+ types map[string]reflect.Type
+}
+
+// NewKnownTypeRegistryWithDefaults creates a new registry that knows about all
+// "default" types (those for which protoc-generated code is statically linked
+// into the Go program).
+func NewKnownTypeRegistryWithDefaults() *KnownTypeRegistry {
+ return &KnownTypeRegistry{includeDefault: true}
+}
+
+// NewKnownTypeRegistryWithoutWellKnownTypes creates a new registry that does *not*
+// include the "well-known types" in protobuf. So even well-known types would be
+// represented by a dynamic message.
+func NewKnownTypeRegistryWithoutWellKnownTypes() *KnownTypeRegistry {
+ return &KnownTypeRegistry{excludeWkt: true}
+}
+
+// AddKnownType adds the types of the given messages as known types.
+func (r *KnownTypeRegistry) AddKnownType(kts ...proto.Message) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.types == nil {
+ r.types = map[string]reflect.Type{}
+ }
+ for _, kt := range kts {
+ r.types[proto.MessageName(kt)] = reflect.TypeOf(kt)
+ }
+}
+
+// CreateIfKnown will construct an instance of the given message if it is a known type.
+// If the given name is unknown, nil is returned.
+func (r *KnownTypeRegistry) CreateIfKnown(messageName string) proto.Message {
+ msgType := r.GetKnownType(messageName)
+ if msgType == nil {
+ return nil
+ }
+
+ if msgType.Kind() == reflect.Ptr {
+ return reflect.New(msgType.Elem()).Interface().(proto.Message)
+ } else {
+ return reflect.New(msgType).Elem().Interface().(proto.Message)
+ }
+}
+
+func isWellKnownType(t reflect.Type) bool {
+ if t.Implements(typeOfWkt) {
+ return true
+ }
+ if msg, ok := reflect.Zero(t).Interface().(proto.Message); ok {
+ name := proto.MessageName(msg)
+ _, ok := wellKnownTypeNames[name]
+ return ok
+ }
+ return false
+}
+
+// GetKnownType will return the reflect.Type for the given message name if it is
+// known. If it is not known, nil is returned.
+func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
+ if r == nil {
+ // a nil registry behaves the same as zero value instance: only know of well-known types
+ t := proto.MessageType(messageName)
+ if t != nil && isWellKnownType(t) {
+ return t
+ }
+ return nil
+ }
+
+ if r.includeDefault {
+ t := proto.MessageType(messageName)
+ if t != nil && isMessage(t) {
+ return t
+ }
+ } else if !r.excludeWkt {
+ t := proto.MessageType(messageName)
+ if t != nil && isWellKnownType(t) {
+ return t
+ }
+ }
+
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ return r.types[messageName]
+}
+
+func isMessage(t reflect.Type) bool {
+ _, ok := reflect.Zero(t).Interface().(proto.Message)
+ return ok
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
new file mode 100644
index 0000000..5680dc2
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -0,0 +1,1177 @@
+package dynamic
+
+// Marshalling and unmarshalling of dynamic messages to/from proto's standard text format
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "text/scanner"
+ "unicode"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/codec"
+ "github.com/jhump/protoreflect/desc"
+)
+
+// MarshalText serializes this message to bytes in the standard text format,
+// returning an error if the operation fails. The resulting bytes will be a
+// valid UTF8 string.
+//
+// This method uses a compact form: no newlines, and spaces between field
+// identifiers and values are elided.
+func (m *Message) MarshalText() ([]byte, error) {
+ var b indentBuffer
+ b.indentCount = -1 // no indentation
+ if err := m.marshalText(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+// MarshalTextIndent serializes this message to bytes in the standard text
+// format, returning an error if the operation fails. The resulting bytes will
+// be a valid UTF8 string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values.
+func (m *Message) MarshalTextIndent() ([]byte, error) {
+ var b indentBuffer
+ b.indent = " " // TODO: option for indent?
+ if err := m.marshalText(&b); err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
+
+func (m *Message) marshalText(b *indentBuffer) error {
+ // TODO: option for emitting extended Any format?
+ first := true
+ // first the known fields
+ for _, tag := range m.knownFieldTags() {
+ itag := int32(tag)
+ v := m.values[itag]
+ fd := m.FindFieldDescriptor(itag)
+ if fd.IsMap() {
+ md := fd.GetMessageType()
+ kfd := md.FindFieldByNumber(1)
+ vfd := md.FindFieldByNumber(2)
+ mp := v.(map[interface{}]interface{})
+ keys := make([]interface{}, 0, len(mp))
+ for k := range mp {
+ keys = append(keys, k)
+ }
+ sort.Sort(sortable(keys))
+ for _, mk := range keys {
+ mv := mp[mk]
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldMapEntryText(b, fd, kfd, mk, vfd, mv)
+ if err != nil {
+ return err
+ }
+ }
+ } else if fd.IsRepeated() {
+ sl := v.([]interface{})
+ for _, slv := range sl {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldText(b, fd, slv)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ err = marshalKnownFieldText(b, fd, v)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ // then the unknown fields
+ for _, tag := range m.unknownFieldTags() {
+ itag := int32(tag)
+ ufs := m.unknownFields[itag]
+ for _, uf := range ufs {
+ err := b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(b, "%d", tag)
+ if err != nil {
+ return err
+ }
+ if uf.Encoding == proto.WireStartGroup {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ in := codec.NewBuffer(uf.Contents)
+ err = marshalUnknownGroupText(b, in, true)
+ if err != nil {
+ return err
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+ } else {
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ if uf.Encoding == proto.WireBytes {
+ err = writeString(b, string(uf.Contents))
+ if err != nil {
+ return err
+ }
+ } else {
+ _, err = b.WriteString(strconv.FormatUint(uf.Value, 10))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func marshalKnownFieldMapEntryText(b *indentBuffer, fd *desc.FieldDescriptor, kfd *desc.FieldDescriptor, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}) error {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+ } else {
+ name = fd.GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+
+ err = b.WriteByte('<')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+
+ err = marshalKnownFieldText(b, kfd, mk)
+ if err != nil {
+ return err
+ }
+ err = b.next()
+ if err != nil {
+ return err
+ }
+ if !isNil(mv) {
+ err = marshalKnownFieldText(b, vfd, mv)
+ if err != nil {
+ return err
+ }
+ }
+
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ return b.WriteByte('>')
+}
+
+func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
+ group := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP
+ if group {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetMessageType().GetFullyQualifiedName())
+ } else {
+ name = fd.GetMessageType().GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ } else {
+ var name string
+ if fd.IsExtension() {
+ name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+ } else {
+ name = fd.GetName()
+ }
+ _, err := b.WriteString(name)
+ if err != nil {
+ return err
+ }
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ }
+ rv := reflect.ValueOf(v)
+ switch rv.Kind() {
+ case reflect.Int32, reflect.Int64:
+ ed := fd.GetEnumType()
+ if ed != nil {
+ n := int32(rv.Int())
+ vd := ed.FindValueByNumber(n)
+ if vd == nil {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ } else {
+ _, err := b.WriteString(vd.GetName())
+ return err
+ }
+ } else {
+ _, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+ return err
+ }
+ case reflect.Uint32, reflect.Uint64:
+ _, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+ return err
+ case reflect.Float32, reflect.Float64:
+ f := rv.Float()
+ var str string
+ if math.IsNaN(f) {
+ str = "nan"
+ } else if math.IsInf(f, 1) {
+ str = "inf"
+ } else if math.IsInf(f, -1) {
+ str = "-inf"
+ } else {
+ var bits int
+ if rv.Kind() == reflect.Float32 {
+ bits = 32
+ } else {
+ bits = 64
+ }
+ str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+ }
+ _, err := b.WriteString(str)
+ return err
+ case reflect.Bool:
+ _, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+ return err
+ case reflect.Slice:
+ return writeString(b, string(rv.Bytes()))
+ case reflect.String:
+ return writeString(b, rv.String())
+ default:
+ var err error
+ if group {
+ err = b.WriteByte('{')
+ } else {
+ err = b.WriteByte('<')
+ }
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ // must be a message
+ if dm, ok := v.(*Message); ok {
+ err = dm.marshalText(b)
+ if err != nil {
+ return err
+ }
+ } else {
+ err = proto.CompactText(b, v.(proto.Message))
+ if err != nil {
+ return err
+ }
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ if group {
+ return b.WriteByte('}')
+ } else {
+ return b.WriteByte('>')
+ }
+ }
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(b *indentBuffer, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := b.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = b.WriteString("\\n")
+ case '\r':
+ _, err = b.WriteString("\\r")
+ case '\t':
+ _, err = b.WriteString("\\t")
+ case '"':
+ _, err = b.WriteString("\\\"")
+ case '\\':
+ _, err = b.WriteString("\\\\")
+ default:
+ if c >= 0x20 && c < 0x7f {
+ err = b.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(b, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return b.WriteByte('"')
+}
+
+func marshalUnknownGroupText(b *indentBuffer, in *codec.Buffer, topLevel bool) error {
+ first := true
+ for {
+ if in.EOF() {
+ if topLevel {
+ return nil
+ }
+ // this is a nested message: we are expecting an end-group tag, not EOF!
+ return io.ErrUnexpectedEOF
+ }
+ tag, wireType, err := in.DecodeTagAndWireType()
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireEndGroup {
+ return nil
+ }
+ err = b.maybeNext(&first)
+ if err != nil {
+ return err
+ }
+ _, err = fmt.Fprintf(b, "%d", tag)
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireStartGroup {
+ err = b.WriteByte('{')
+ if err != nil {
+ return err
+ }
+ err = b.start()
+ if err != nil {
+ return err
+ }
+ err = marshalUnknownGroupText(b, in, false)
+ if err != nil {
+ return err
+ }
+ err = b.end()
+ if err != nil {
+ return err
+ }
+ err = b.WriteByte('}')
+ if err != nil {
+ return err
+ }
+ continue
+ } else {
+ err = b.sep()
+ if err != nil {
+ return err
+ }
+ if wireType == proto.WireBytes {
+ contents, err := in.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ err = writeString(b, string(contents))
+ if err != nil {
+ return err
+ }
+ } else {
+ var v uint64
+ switch wireType {
+ case proto.WireVarint:
+ v, err = in.DecodeVarint()
+ case proto.WireFixed32:
+ v, err = in.DecodeFixed32()
+ case proto.WireFixed64:
+ v, err = in.DecodeFixed64()
+ default:
+ return proto.ErrInternalBadWireType
+ }
+ if err != nil {
+ return err
+ }
+ _, err = b.WriteString(strconv.FormatUint(v, 10))
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+}
+
+// UnmarshalText de-serializes the message that is present, in text format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in the standard text format
+func (m *Message) UnmarshalText(text []byte) error {
+ m.Reset()
+ if err := m.UnmarshalMergeText(text); err != nil {
+ return err
+ }
+ return m.Validate()
+}
+
+// UnmarshalMergeText de-serializes the message that is present, in text format,
+// in the given bytes into this message. Unlike UnmarshalText, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeText(text []byte) error {
+ return m.unmarshalText(newReader(text), tokenEOF)
+}
+
+func (m *Message) unmarshalText(tr *txtReader, end tokenType) error {
+ for {
+ tok := tr.next()
+ if tok.tokTyp == end {
+ return nil
+ }
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ var fd *desc.FieldDescriptor
+ var extendedAnyType *desc.MessageDescriptor
+ if tok.tokTyp == tokenInt {
+ // tag number (indicates unknown field)
+ tag, err := strconv.ParseInt(tok.val.(string), 10, 32)
+ if err != nil {
+ return err
+ }
+ itag := int32(tag)
+ fd = m.FindFieldDescriptor(itag)
+ if fd == nil {
+ // can't parse the value w/out field descriptor, so skip it
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenOpenBrace {
+ if err := skipMessageText(tr, true); err != nil {
+ return err
+ }
+ } else if tok.tokTyp == tokenColon {
+ if err := skipFieldValueText(tr); err != nil {
+ return err
+ }
+ } else {
+ return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+ }
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ continue
+ }
+ } else {
+ fieldName, err := unmarshalFieldNameText(tr, tok)
+ if err != nil {
+ return err
+ }
+ fd = m.FindFieldDescriptorByName(fieldName)
+ if fd == nil {
+ // See if it's a group name
+ for _, field := range m.md.GetFields() {
+ if field.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+ fd = field
+ break
+ }
+ }
+ if fd == nil {
+ // maybe this is an extended Any
+ if m.md.GetFullyQualifiedName() == "google.protobuf.Any" && fieldName[0] == '[' && strings.Contains(fieldName, "/") {
+ // strip surrounding "[" and "]" and extract type name from URL
+ typeUrl := fieldName[1 : len(fieldName)-1]
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ // TODO: add a way to weave an AnyResolver to this point
+ extendedAnyType = findMessageDescriptor(mname, m.md.GetFile())
+ if extendedAnyType == nil {
+ return textError(tok, "could not parse Any with unknown type URL %q", fieldName)
+ }
+ // field 1 is "type_url"
+ typeUrlField := m.md.FindFieldByNumber(1)
+ if err := m.TrySetField(typeUrlField, typeUrl); err != nil {
+ return err
+ }
+ } else {
+ // TODO: add a flag to just ignore unrecognized field names
+ return textError(tok, "%q is not a recognized field name of %q", fieldName, m.md.GetFullyQualifiedName())
+ }
+ }
+ }
+ }
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ if extendedAnyType != nil {
+ // consume optional colon; make sure this is a "start message" token
+ if tok.tokTyp == tokenColon {
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+ }
+ if tok.tokTyp.EndToken() == tokenError {
+ return textError(tok, "Expecting a '<' or '{'; instead got %q", tok.txt)
+ }
+
+ // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+ g := m.mf.NewDynamicMessage(extendedAnyType)
+ if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+ return err
+ }
+ // now we marshal the message to bytes and store in the Any
+ b, err := g.Marshal()
+ if err != nil {
+ return err
+ }
+ // field 2 is "value"
+ anyValueField := m.md.FindFieldByNumber(2)
+ if err := m.TrySetField(anyValueField, b); err != nil {
+ return err
+ }
+
+ } else if (fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP ||
+ fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) &&
+ tok.tokTyp.EndToken() != tokenError {
+
+ // TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+ g := m.mf.NewDynamicMessage(fd.GetMessageType())
+ if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+ return err
+ }
+ if fd.IsRepeated() {
+ if err := m.TryAddRepeatedField(fd, g); err != nil {
+ return err
+ }
+ } else {
+ if err := m.TrySetField(fd, g); err != nil {
+ return err
+ }
+ }
+ } else {
+ if tok.tokTyp != tokenColon {
+ return textError(tok, "Expecting a colon ':'; instead got %q", tok.txt)
+ }
+ if err := m.unmarshalFieldValueText(fd, tr); err != nil {
+ return err
+ }
+ }
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+}
+func findMessageDescriptor(name string, fd *desc.FileDescriptor) *desc.MessageDescriptor {
+ md := findMessageInTransitiveDeps(name, fd, map[*desc.FileDescriptor]struct{}{})
+ if md == nil {
+ // couldn't find it; see if we have this message linked in
+ md, _ = desc.LoadMessageDescriptor(name)
+ }
+ return md
+}
+
+func findMessageInTransitiveDeps(name string, fd *desc.FileDescriptor, seen map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+ if _, ok := seen[fd]; ok {
+ // already checked this file
+ return nil
+ }
+ seen[fd] = struct{}{}
+ md := fd.FindMessage(name)
+ if md != nil {
+ return md
+ }
+ // not in this file so recursively search its deps
+ for _, dep := range fd.GetDependencies() {
+ md = findMessageInTransitiveDeps(name, dep, seen)
+ if md != nil {
+ return md
+ }
+ }
+ // couldn't find it
+ return nil
+}
+
+func textError(tok *token, format string, args ...interface{}) error {
+ var msg string
+ if tok.tokTyp == tokenError {
+ msg = tok.val.(error).Error()
+ } else {
+ msg = fmt.Sprintf(format, args...)
+ }
+ return fmt.Errorf("line %d, col %d: %s", tok.pos.Line, tok.pos.Column, msg)
+}
+
+type setFunction func(*Message, *desc.FieldDescriptor, interface{}) error
+
+func (m *Message) unmarshalFieldValueText(fd *desc.FieldDescriptor, tr *txtReader) error {
+ var set setFunction
+ if fd.IsRepeated() {
+ set = (*Message).addRepeatedField
+ } else {
+ set = mergeField
+ }
+ tok := tr.peek()
+ if tok.tokTyp == tokenOpenBracket {
+ tr.next() // consume tok
+ for {
+ if err := m.unmarshalFieldElementText(fd, tr, set); err != nil {
+ return err
+ }
+ tok = tr.peek()
+ if tok.tokTyp == tokenCloseBracket {
+ tr.next() // consume tok
+ return nil
+ } else if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+ }
+ return m.unmarshalFieldElementText(fd, tr, set)
+}
+
+func (m *Message) unmarshalFieldElementText(fd *desc.FieldDescriptor, tr *txtReader, set setFunction) error {
+ tok := tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ }
+
+ var expected string
+ switch fd.GetType() {
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ if tok.tokTyp == tokenIdent {
+ if tok.val.(string) == "true" {
+ return set(m, fd, true)
+ } else if tok.val.(string) == "false" {
+ return set(m, fd, false)
+ }
+ }
+ expected = "boolean value"
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ if tok.tokTyp == tokenString {
+ return set(m, fd, []byte(tok.val.(string)))
+ }
+ expected = "bytes string value"
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ if tok.tokTyp == tokenString {
+ return set(m, fd, tok.val)
+ }
+ expected = "string value"
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ switch tok.tokTyp {
+ case tokenFloat:
+ return set(m, fd, float32(tok.val.(float64)))
+ case tokenInt:
+ if f, err := strconv.ParseFloat(tok.val.(string), 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, float32(f))
+ }
+ case tokenIdent:
+ ident := strings.ToLower(tok.val.(string))
+ if ident == "inf" {
+ return set(m, fd, float32(math.Inf(1)))
+ } else if ident == "nan" {
+ return set(m, fd, float32(math.NaN()))
+ }
+ case tokenMinus:
+ peeked := tr.peek()
+ if peeked.tokTyp == tokenIdent {
+ ident := strings.ToLower(peeked.val.(string))
+ if ident == "inf" {
+ tr.next() // consume peeked token
+ return set(m, fd, float32(math.Inf(-1)))
+ }
+ }
+ }
+ expected = "float value"
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ switch tok.tokTyp {
+ case tokenFloat:
+ return set(m, fd, tok.val)
+ case tokenInt:
+ if f, err := strconv.ParseFloat(tok.val.(string), 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, f)
+ }
+ case tokenIdent:
+ ident := strings.ToLower(tok.val.(string))
+ if ident == "inf" {
+ return set(m, fd, math.Inf(1))
+ } else if ident == "nan" {
+ return set(m, fd, math.NaN())
+ }
+ case tokenMinus:
+ peeked := tr.peek()
+ if peeked.tokTyp == tokenIdent {
+ ident := strings.ToLower(peeked.val.(string))
+ if ident == "inf" {
+ tr.next() // consume peeked token
+ return set(m, fd, math.Inf(-1))
+ }
+ }
+ }
+ expected = "float value"
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, int32(i))
+ }
+ }
+ expected = "int value"
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, i)
+ }
+ }
+ expected = "int value"
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, uint32(i))
+ }
+ }
+ expected = "unsigned int value"
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
+ if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
+ return err
+ } else {
+ return set(m, fd, i)
+ }
+ }
+ expected = "unsigned int value"
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
+ if tok.tokTyp == tokenIdent {
+ // TODO: add a flag to just ignore unrecognized enum value names?
+ vd := fd.GetEnumType().FindValueByName(tok.val.(string))
+ if vd != nil {
+ return set(m, fd, vd.GetNumber())
+ }
+ } else if tok.tokTyp == tokenInt {
+ if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+ return err
+ } else {
+ return set(m, fd, int32(i))
+ }
+ }
+ expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptorpb.FieldDescriptorProto_TYPE_GROUP:
+
+ endTok := tok.tokTyp.EndToken()
+ if endTok != tokenError {
+ dm := m.mf.NewDynamicMessage(fd.GetMessageType())
+ if err := dm.unmarshalText(tr, endTok); err != nil {
+ return err
+ }
+ // TODO: ideally we would use mf.NewMessage and, if not a dynamic message, use
+ // proto package to unmarshal it. But the text parser isn't particularly amenable
+ // to that, so we instead convert a dynamic message to a generated one if the
+ // known-type registry knows about the generated type...
+ var ktr *KnownTypeRegistry
+ if m.mf != nil {
+ ktr = m.mf.ktr
+ }
+ pm := ktr.CreateIfKnown(fd.GetMessageType().GetFullyQualifiedName())
+ if pm != nil {
+ if err := dm.ConvertTo(pm); err != nil {
+ return set(m, fd, pm)
+ }
+ }
+ return set(m, fd, dm)
+ }
+ expected = fmt.Sprintf("message %s value", fd.GetMessageType().GetFullyQualifiedName())
+ default:
+ return fmt.Errorf("field %q of message %q has unrecognized type: %v", fd.GetFullyQualifiedName(), m.md.GetFullyQualifiedName(), fd.GetType())
+ }
+
+ // if we get here, token was wrong type; create error message
+ var article string
+ if strings.Contains("aieou", expected[0:1]) {
+ article = "an"
+ } else {
+ article = "a"
+ }
+ return textError(tok, "Expecting %s %s; got %q", article, expected, tok.txt)
+}
+
+func unmarshalFieldNameText(tr *txtReader, tok *token) (string, error) {
+ if tok.tokTyp == tokenOpenBracket || tok.tokTyp == tokenOpenParen {
+ // extension name
+ var closeType tokenType
+ var closeChar string
+ if tok.tokTyp == tokenOpenBracket {
+ closeType = tokenCloseBracket
+ closeChar = "close bracket ']'"
+ } else {
+ closeType = tokenCloseParen
+ closeChar = "close paren ')'"
+ }
+ // must be followed by an identifier
+ idents := make([]string, 0, 1)
+ for {
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return "", io.ErrUnexpectedEOF
+ } else if tok.tokTyp != tokenIdent {
+ return "", textError(tok, "Expecting an identifier; instead got %q", tok.txt)
+ }
+ idents = append(idents, tok.val.(string))
+ // and then close bracket/paren, or "/" to keep adding URL elements to name
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return "", io.ErrUnexpectedEOF
+ } else if tok.tokTyp == closeType {
+ break
+ } else if tok.tokTyp != tokenSlash {
+ return "", textError(tok, "Expecting a %s; instead got %q", closeChar, tok.txt)
+ }
+ }
+ return "[" + strings.Join(idents, "/") + "]", nil
+ } else if tok.tokTyp == tokenIdent {
+ // normal field name
+ return tok.val.(string), nil
+ } else {
+ return "", textError(tok, "Expecting an identifier or tag number; instead got %q", tok.txt)
+ }
+}
+
+func skipFieldNameText(tr *txtReader) error {
+ tok := tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenInt || tok.tokTyp == tokenIdent {
+ return nil
+ } else {
+ _, err := unmarshalFieldNameText(tr, tok)
+ return err
+ }
+}
+
+func skipFieldValueText(tr *txtReader) error {
+ tok := tr.peek()
+ if tok.tokTyp == tokenOpenBracket {
+ tr.next() // consume tok
+ for {
+ if err := skipFieldElementText(tr); err != nil {
+ return err
+ }
+ tok = tr.peek()
+ if tok.tokTyp == tokenCloseBracket {
+ tr.next() // consume tok
+ return nil
+ } else if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+
+ }
+ }
+ return skipFieldElementText(tr)
+}
+
+func skipFieldElementText(tr *txtReader) error {
+ tok := tr.next()
+ switch tok.tokTyp {
+ case tokenEOF:
+ return io.ErrUnexpectedEOF
+ case tokenInt, tokenFloat, tokenString, tokenIdent:
+ return nil
+ case tokenOpenAngle:
+ return skipMessageText(tr, false)
+ default:
+ return textError(tok, "Expecting an angle bracket '<' or a value; instead got %q", tok.txt)
+ }
+}
+
+func skipMessageText(tr *txtReader, isGroup bool) error {
+ for {
+ tok := tr.peek()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if isGroup && tok.tokTyp == tokenCloseBrace {
+ return nil
+ } else if !isGroup && tok.tokTyp == tokenCloseAngle {
+ return nil
+ }
+
+ // field name or tag
+ if err := skipFieldNameText(tr); err != nil {
+ return err
+ }
+
+ // field value
+ tok = tr.next()
+ if tok.tokTyp == tokenEOF {
+ return io.ErrUnexpectedEOF
+ } else if tok.tokTyp == tokenOpenBrace {
+ if err := skipMessageText(tr, true); err != nil {
+ return err
+ }
+ } else if tok.tokTyp == tokenColon {
+ if err := skipFieldValueText(tr); err != nil {
+ return err
+ }
+ } else {
+ return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+ }
+
+ tok = tr.peek()
+ if tok.tokTyp.IsSep() {
+ tr.next() // consume separator
+ }
+ }
+}
+
+type tokenType int
+
+const (
+ tokenError tokenType = iota
+ tokenEOF
+ tokenIdent
+ tokenString
+ tokenInt
+ tokenFloat
+ tokenColon
+ tokenComma
+ tokenSemiColon
+ tokenOpenBrace
+ tokenCloseBrace
+ tokenOpenBracket
+ tokenCloseBracket
+ tokenOpenAngle
+ tokenCloseAngle
+ tokenOpenParen
+ tokenCloseParen
+ tokenSlash
+ tokenMinus
+)
+
+func (t tokenType) IsSep() bool {
+ return t == tokenComma || t == tokenSemiColon
+}
+
+func (t tokenType) EndToken() tokenType {
+ switch t {
+ case tokenOpenAngle:
+ return tokenCloseAngle
+ case tokenOpenBrace:
+ return tokenCloseBrace
+ default:
+ return tokenError
+ }
+}
+
+type token struct {
+ tokTyp tokenType
+ val interface{}
+ txt string
+ pos scanner.Position
+}
+
+type txtReader struct {
+ scanner scanner.Scanner
+ peeked token
+ havePeeked bool
+}
+
+func newReader(text []byte) *txtReader {
+ sc := scanner.Scanner{}
+ sc.Init(bytes.NewReader(text))
+ sc.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars |
+ scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+ // identifiers are same restrictions as Go identifiers, except we also allow dots since
+ // we accept fully-qualified names
+ sc.IsIdentRune = func(ch rune, i int) bool {
+ return ch == '_' || unicode.IsLetter(ch) ||
+ (i > 0 && unicode.IsDigit(ch)) ||
+ (i > 0 && ch == '.')
+ }
+ // ignore errors; we handle them if/when we see malformed tokens
+ sc.Error = func(s *scanner.Scanner, msg string) {}
+ return &txtReader{scanner: sc}
+}
+
+func (p *txtReader) peek() *token {
+ if p.havePeeked {
+ return &p.peeked
+ }
+ t := p.scanner.Scan()
+ if t == scanner.EOF {
+ p.peeked.tokTyp = tokenEOF
+ p.peeked.val = nil
+ p.peeked.txt = ""
+ p.peeked.pos = p.scanner.Position
+ } else if err := p.processToken(t, p.scanner.TokenText(), p.scanner.Position); err != nil {
+ p.peeked.tokTyp = tokenError
+ p.peeked.val = err
+ }
+ p.havePeeked = true
+ return &p.peeked
+}
+
+func (p *txtReader) processToken(t rune, text string, pos scanner.Position) error {
+ p.peeked.pos = pos
+ p.peeked.txt = text
+ switch t {
+ case scanner.Ident:
+ p.peeked.tokTyp = tokenIdent
+ p.peeked.val = text
+ case scanner.Int:
+ p.peeked.tokTyp = tokenInt
+ p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+ case scanner.Float:
+ p.peeked.tokTyp = tokenFloat
+ var err error
+ if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+ return err
+ }
+ case scanner.Char, scanner.String:
+ p.peeked.tokTyp = tokenString
+ var err error
+ if p.peeked.val, err = strconv.Unquote(text); err != nil {
+ return err
+ }
+ case '-': // unary minus, for negative ints and floats
+ ch := p.scanner.Peek()
+ if ch < '0' || ch > '9' {
+ p.peeked.tokTyp = tokenMinus
+ p.peeked.val = '-'
+ } else {
+ t := p.scanner.Scan()
+ if t == scanner.EOF {
+ return io.ErrUnexpectedEOF
+ } else if t == scanner.Float {
+ p.peeked.tokTyp = tokenFloat
+ text += p.scanner.TokenText()
+ p.peeked.txt = text
+ var err error
+ if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+ p.peeked.pos = p.scanner.Position
+ return err
+ }
+ } else if t == scanner.Int {
+ p.peeked.tokTyp = tokenInt
+ text += p.scanner.TokenText()
+ p.peeked.txt = text
+ p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+ } else {
+ p.peeked.pos = p.scanner.Position
+ return fmt.Errorf("expecting an int or float but got %q", p.scanner.TokenText())
+ }
+ }
+ case ':':
+ p.peeked.tokTyp = tokenColon
+ p.peeked.val = ':'
+ case ',':
+ p.peeked.tokTyp = tokenComma
+ p.peeked.val = ','
+ case ';':
+ p.peeked.tokTyp = tokenSemiColon
+ p.peeked.val = ';'
+ case '{':
+ p.peeked.tokTyp = tokenOpenBrace
+ p.peeked.val = '{'
+ case '}':
+ p.peeked.tokTyp = tokenCloseBrace
+ p.peeked.val = '}'
+ case '<':
+ p.peeked.tokTyp = tokenOpenAngle
+ p.peeked.val = '<'
+ case '>':
+ p.peeked.tokTyp = tokenCloseAngle
+ p.peeked.val = '>'
+ case '[':
+ p.peeked.tokTyp = tokenOpenBracket
+ p.peeked.val = '['
+ case ']':
+ p.peeked.tokTyp = tokenCloseBracket
+ p.peeked.val = ']'
+ case '(':
+ p.peeked.tokTyp = tokenOpenParen
+ p.peeked.val = '('
+ case ')':
+ p.peeked.tokTyp = tokenCloseParen
+ p.peeked.val = ')'
+ case '/':
+ // only allowed to separate URL components in expanded Any format
+ p.peeked.tokTyp = tokenSlash
+ p.peeked.val = '/'
+ default:
+ return fmt.Errorf("invalid character: %c", t)
+ }
+ return nil
+}
+
+func (p *txtReader) next() *token {
+ t := p.peek()
+ if t.tokTyp != tokenEOF && t.tokTyp != tokenError {
+ p.havePeeked = false
+ }
+ return t
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go
new file mode 100644
index 0000000..661b925
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go
@@ -0,0 +1,137 @@
+package grpcreflect
+
+import (
+ refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+)
+
+func toV1Request(v1alpha *refv1alpha.ServerReflectionRequest) *refv1.ServerReflectionRequest {
+ var v1 refv1.ServerReflectionRequest
+ v1.Host = v1alpha.Host
+ switch mr := v1alpha.MessageRequest.(type) {
+ case *refv1alpha.ServerReflectionRequest_FileByFilename:
+ v1.MessageRequest = &refv1.ServerReflectionRequest_FileByFilename{
+ FileByFilename: mr.FileByFilename,
+ }
+ case *refv1alpha.ServerReflectionRequest_FileContainingSymbol:
+ v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: mr.FileContainingSymbol,
+ }
+ case *refv1alpha.ServerReflectionRequest_FileContainingExtension:
+ if mr.FileContainingExtension != nil {
+ v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &refv1.ExtensionRequest{
+ ContainingType: mr.FileContainingExtension.GetContainingType(),
+ ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+ },
+ }
+ }
+ case *refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType:
+ v1.MessageRequest = &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+ }
+ case *refv1alpha.ServerReflectionRequest_ListServices:
+ v1.MessageRequest = &refv1.ServerReflectionRequest_ListServices{
+ ListServices: mr.ListServices,
+ }
+ default:
+ // no value set
+ }
+ return &v1
+}
+
+func toV1AlphaRequest(v1 *refv1.ServerReflectionRequest) *refv1alpha.ServerReflectionRequest {
+ var v1alpha refv1alpha.ServerReflectionRequest
+ v1alpha.Host = v1.Host
+ switch mr := v1.MessageRequest.(type) {
+ case *refv1.ServerReflectionRequest_FileByFilename:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileByFilename{
+ FileByFilename: mr.FileByFilename,
+ }
+ }
+ case *refv1.ServerReflectionRequest_FileContainingSymbol:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: mr.FileContainingSymbol,
+ }
+ }
+ case *refv1.ServerReflectionRequest_FileContainingExtension:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &refv1alpha.ExtensionRequest{
+ ContainingType: mr.FileContainingExtension.GetContainingType(),
+ ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+ },
+ }
+ }
+ case *refv1.ServerReflectionRequest_AllExtensionNumbersOfType:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+ }
+ }
+ case *refv1.ServerReflectionRequest_ListServices:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_ListServices{
+ ListServices: mr.ListServices,
+ }
+ }
+ default:
+ // no value set
+ }
+ return &v1alpha
+}
+
+func toV1Response(v1alpha *refv1alpha.ServerReflectionResponse) *refv1.ServerReflectionResponse {
+ var v1 refv1.ServerReflectionResponse
+ v1.ValidHost = v1alpha.ValidHost
+ if v1alpha.OriginalRequest != nil {
+ v1.OriginalRequest = toV1Request(v1alpha.OriginalRequest)
+ }
+ switch mr := v1alpha.MessageResponse.(type) {
+ case *refv1alpha.ServerReflectionResponse_FileDescriptorResponse:
+ if mr != nil {
+ v1.MessageResponse = &refv1.ServerReflectionResponse_FileDescriptorResponse{
+ FileDescriptorResponse: &refv1.FileDescriptorResponse{
+ FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(),
+ },
+ }
+ }
+ case *refv1alpha.ServerReflectionResponse_AllExtensionNumbersResponse:
+ if mr != nil {
+ v1.MessageResponse = &refv1.ServerReflectionResponse_AllExtensionNumbersResponse{
+ AllExtensionNumbersResponse: &refv1.ExtensionNumberResponse{
+ BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(),
+ ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(),
+ },
+ }
+ }
+ case *refv1alpha.ServerReflectionResponse_ListServicesResponse:
+ if mr != nil {
+ svcs := make([]*refv1.ServiceResponse, len(mr.ListServicesResponse.GetService()))
+ for i, svc := range mr.ListServicesResponse.GetService() {
+ svcs[i] = &refv1.ServiceResponse{
+ Name: svc.GetName(),
+ }
+ }
+ v1.MessageResponse = &refv1.ServerReflectionResponse_ListServicesResponse{
+ ListServicesResponse: &refv1.ListServiceResponse{
+ Service: svcs,
+ },
+ }
+ }
+ case *refv1alpha.ServerReflectionResponse_ErrorResponse:
+ if mr != nil {
+ v1.MessageResponse = &refv1.ServerReflectionResponse_ErrorResponse{
+ ErrorResponse: &refv1.ErrorResponse{
+ ErrorCode: mr.ErrorResponse.GetErrorCode(),
+ ErrorMessage: mr.ErrorResponse.GetErrorMessage(),
+ },
+ }
+ }
+ default:
+ // no value set
+ }
+ return &v1
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
new file mode 100644
index 0000000..1a35540
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
@@ -0,0 +1,1018 @@
+package grpcreflect
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "reflect"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc"
+ "github.com/jhump/protoreflect/internal"
+)
+
+// If we try the v1 reflection API and get back "not implemented", we'll wait
+// this long before trying v1 again. This allows a long-lived client to
+// dynamically switch from v1alpha to v1 if the underlying server is updated
+// to support it. But it also prevents every stream request from always trying
+// v1 first: if we try it and see it fail, we shouldn't continually retry it
+// if we expect it will fail again.
+const durationBetweenV1Attempts = time.Hour
+
+// elementNotFoundError is the error returned by reflective operations where the
+// server does not recognize a given file name, symbol name, or extension.
+type elementNotFoundError struct {
+ name string
+ kind elementKind
+ symType symbolType // only used when kind == elementKindSymbol
+ tag int32 // only used when kind == elementKindExtension
+
+ // only errors with a kind of elementKindFile will have a cause, which means
+ // the named file count not be resolved because of a dependency that could
+ // not be found where cause describes the missing dependency
+ cause *elementNotFoundError
+}
+
+type elementKind int
+
+const (
+ elementKindSymbol elementKind = iota
+ elementKindFile
+ elementKindExtension
+)
+
+type symbolType string
+
+const (
+ symbolTypeService = "Service"
+ symbolTypeMessage = "Message"
+ symbolTypeEnum = "Enum"
+ symbolTypeUnknown = "Symbol"
+)
+
+func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error {
+ if cause != nil && cause.kind == elementKindSymbol && cause.name == symbol {
+ // no need to wrap
+ if symType != symbolTypeUnknown && cause.symType == symbolTypeUnknown {
+ // We previously didn't know symbol type but now do?
+ // Create a new error that has the right symbol type.
+ return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol}
+ }
+ return cause
+ }
+ return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause}
+}
+
+func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error {
+ if cause != nil && cause.kind == elementKindExtension && cause.name == extendee && cause.tag == tag {
+ // no need to wrap
+ return cause
+ }
+ return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause}
+}
+
+func fileNotFound(file string, cause *elementNotFoundError) error {
+ if cause != nil && cause.kind == elementKindFile && cause.name == file {
+ // no need to wrap
+ return cause
+ }
+ return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause}
+}
+
+func (e *elementNotFoundError) Error() string {
+ first := true
+ var b bytes.Buffer
+ for ; e != nil; e = e.cause {
+ if first {
+ first = false
+ } else {
+ _, _ = fmt.Fprint(&b, "\ncaused by: ")
+ }
+ switch e.kind {
+ case elementKindSymbol:
+ _, _ = fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
+ case elementKindExtension:
+ _, _ = fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
+ default:
+ _, _ = fmt.Fprintf(&b, "File not found: %s", e.name)
+ }
+ }
+ return b.String()
+}
+
+// IsElementNotFoundError determines if the given error indicates that a file
+// name, symbol name, or extension field was could not be found by the server.
+func IsElementNotFoundError(err error) bool {
+ _, ok := err.(*elementNotFoundError)
+ return ok
+}
+
+// ProtocolError is an error returned when the server sends a response of the
+// wrong type.
+type ProtocolError struct {
+ missingType reflect.Type
+}
+
+func (p ProtocolError) Error() string {
+ return fmt.Sprintf("Protocol error: response was missing %v", p.missingType)
+}
+
+type extDesc struct {
+ extendedMessageName string
+ extensionNumber int32
+}
+
+type resolvers struct {
+ descriptorResolver protodesc.Resolver
+ extensionResolver protoregistry.ExtensionTypeResolver
+}
+
+type fileEntry struct {
+ fd *desc.FileDescriptor
+ fallback bool
+}
+
+// Client is a client connection to a server for performing reflection calls
+// and resolving remote symbols.
+type Client struct {
+ ctx context.Context
+ now func() time.Time
+ stubV1 refv1.ServerReflectionClient
+ stubV1Alpha refv1alpha.ServerReflectionClient
+ allowMissing atomic.Bool
+ fallbackResolver atomic.Pointer[resolvers]
+
+ connMu sync.Mutex
+ cancel context.CancelFunc
+ stream refv1.ServerReflection_ServerReflectionInfoClient
+ useV1Alpha bool
+ lastTriedV1 time.Time
+
+ cacheMu sync.RWMutex
+ protosByName map[string]*descriptorpb.FileDescriptorProto
+ filesByName map[string]fileEntry
+ filesBySymbol map[string]fileEntry
+ filesByExtension map[extDesc]fileEntry
+}
+
+// NewClient creates a new Client with the given root context and using the
+// given RPC stub for talking to the server.
+//
+// Deprecated: Use NewClientV1Alpha if you are intentionally pinning the
+// v1alpha version of the reflection service. Otherwise, use NewClientAuto
+// instead.
+func NewClient(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client {
+ return NewClientV1Alpha(ctx, stub)
+}
+
+// NewClientV1Alpha creates a new Client using the v1alpha version of reflection
+// with the given root context and using the given RPC stub for talking to the
+// server.
+func NewClientV1Alpha(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client {
+ return newClient(ctx, nil, stub)
+}
+
+// NewClientV1 creates a new Client using the v1 version of reflection with the
+// given root context and using the given RPC stub for talking to the server.
+func NewClientV1(ctx context.Context, stub refv1.ServerReflectionClient) *Client {
+ return newClient(ctx, stub, nil)
+}
+
+func newClient(ctx context.Context, stubv1 refv1.ServerReflectionClient, stubv1alpha refv1alpha.ServerReflectionClient) *Client {
+ cr := &Client{
+ ctx: ctx,
+ now: time.Now,
+ stubV1: stubv1,
+ stubV1Alpha: stubv1alpha,
+ protosByName: map[string]*descriptorpb.FileDescriptorProto{},
+ filesByName: map[string]fileEntry{},
+ filesBySymbol: map[string]fileEntry{},
+ filesByExtension: map[extDesc]fileEntry{},
+ }
+ // don't leak a grpc stream
+ runtime.SetFinalizer(cr, (*Client).Reset)
+ return cr
+}
+
+// NewClientAuto creates a new Client that will use either v1 or v1alpha version
+// of reflection (based on what the server supports) with the given root context
+// and using the given client connection.
+//
+// It will first the v1 version of the reflection service. If it gets back an
+// "Unimplemented" error, it will fall back to using the v1alpha version. It
+// will remember which version the server supports for any subsequent operations
+// that need to re-invoke the streaming RPC. But, if it's a very long-lived
+// client, it will periodically retry the v1 version (in case the server is
+// updated to support it also). The period for these retries is every hour.
+func NewClientAuto(ctx context.Context, cc grpc.ClientConnInterface) *Client {
+ stubv1 := refv1.NewServerReflectionClient(cc)
+ stubv1alpha := refv1alpha.NewServerReflectionClient(cc)
+ return newClient(ctx, stubv1, stubv1alpha)
+}
+
+// AllowMissingFileDescriptors configures the client to allow missing files
+// when building descriptors when possible. Missing files are often fatal
+// errors, but with this option they can sometimes be worked around. Building
+// a schema can only succeed with some files missing if the files in question
+// only provide custom options and/or other unused types.
+func (cr *Client) AllowMissingFileDescriptors() {
+ cr.allowMissing.Store(true)
+}
+
+// AllowFallbackResolver configures the client to allow falling back to the
+// given resolvers if the server is unable to supply descriptors for a particular
+// query. This allows working around issues where servers' reflection service
+// provides an incomplete set of descriptors, but the client has knowledge of
+// the missing descriptors from another source. It is usually most appropriate
+// to pass [protoregistry.GlobalFiles] and [protoregistry.GlobalTypes] as the
+// resolver values.
+//
+// The first value is used as a fallback for FileByFilename and FileContainingSymbol
+// queries. The second value is used as a fallback for FileContainingExtension. It
+// can also be used as a fallback for AllExtensionNumbersForType if it provides
+// a method with the following signature (which *[protoregistry.Types] provides):
+//
+// RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+func (cr *Client) AllowFallbackResolver(descriptors protodesc.Resolver, exts protoregistry.ExtensionTypeResolver) {
+ if descriptors == nil && exts == nil {
+ cr.fallbackResolver.Store(nil)
+ } else {
+ cr.fallbackResolver.Store(&resolvers{
+ descriptorResolver: descriptors,
+ extensionResolver: exts,
+ })
+ }
+}
+
+// FileByFilename asks the server for a file descriptor for the proto file with
+// the given name.
+func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ if entry, ok := cr.filesByName[filename]; ok {
+ cr.cacheMu.RUnlock()
+ return entry.fd, nil
+ }
+ // not there? see if we've downloaded the proto
+ fdp, ok := cr.protosByName[filename]
+ cr.cacheMu.RUnlock()
+ if ok {
+ return cr.descriptorFromProto(fdp)
+ }
+
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_FileByFilename{
+ FileByFilename: filename,
+ },
+ }
+ accept := func(fd *desc.FileDescriptor) bool {
+ return fd.GetName() == filename
+ }
+
+ fd, err := cr.getAndCacheFileDescriptors(req, filename, "", accept)
+ if isNotFound(err) {
+ // File not found? see if we can look up via alternate name
+ if alternate, ok := internal.StdFileAliases[filename]; ok {
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_FileByFilename{
+ FileByFilename: alternate,
+ },
+ }
+ fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename, accept)
+ }
+ }
+ if isNotFound(err) {
+ // Still no? See if we can use a fallback resolver
+ resolver := cr.fallbackResolver.Load()
+ if resolver != nil && resolver.descriptorResolver != nil {
+ fileDesc, fallbackErr := resolver.descriptorResolver.FindFileByPath(filename)
+ if fallbackErr == nil {
+ var wrapErr error
+ fd, wrapErr = desc.WrapFile(fileDesc)
+ if wrapErr == nil {
+ fd = cr.cacheFile(fd, true)
+ err = nil // clear error since we've succeeded via the fallback
+ }
+ }
+ }
+ }
+ if isNotFound(err) {
+ err = fileNotFound(filename, nil)
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = fileNotFound(filename, e)
+ }
+
+ return fd, err
+}
+
+// FileContainingSymbol asks the server for a file descriptor for the proto file
+// that declares the given fully-qualified symbol.
+func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ entry, ok := cr.filesBySymbol[symbol]
+ cr.cacheMu.RUnlock()
+ if ok {
+ return entry.fd, nil
+ }
+
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: symbol,
+ },
+ }
+ accept := func(fd *desc.FileDescriptor) bool {
+ return fd.FindSymbol(symbol) != nil
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept)
+ if isNotFound(err) {
+ // Symbol not found? See if we can use a fallback resolver
+ resolver := cr.fallbackResolver.Load()
+ if resolver != nil && resolver.descriptorResolver != nil {
+ d, fallbackErr := resolver.descriptorResolver.FindDescriptorByName(protoreflect.FullName(symbol))
+ if fallbackErr == nil {
+ var wrapErr error
+ fd, wrapErr = desc.WrapFile(d.ParentFile())
+ if wrapErr == nil {
+ fd = cr.cacheFile(fd, true)
+ err = nil // clear error since we've succeeded via the fallback
+ }
+ }
+ }
+ }
+ if isNotFound(err) {
+ err = symbolNotFound(symbol, symbolTypeUnknown, nil)
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = symbolNotFound(symbol, symbolTypeUnknown, e)
+ }
+ return fd, err
+}
+
+// FileContainingExtension asks the server for a file descriptor for the proto
+// file that declares an extension with the given number for the given
+// fully-qualified message name.
+func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) {
+ // hit the cache first
+ cr.cacheMu.RLock()
+ entry, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
+ cr.cacheMu.RUnlock()
+ if ok {
+ return entry.fd, nil
+ }
+
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &refv1.ExtensionRequest{
+ ContainingType: extendedMessageName,
+ ExtensionNumber: extensionNumber,
+ },
+ },
+ }
+ accept := func(fd *desc.FileDescriptor) bool {
+ return fd.FindExtension(extendedMessageName, extensionNumber) != nil
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept)
+ if isNotFound(err) {
+ // Extension not found? See if we can use a fallback resolver
+ resolver := cr.fallbackResolver.Load()
+ if resolver != nil && resolver.extensionResolver != nil {
+ extType, fallbackErr := resolver.extensionResolver.FindExtensionByNumber(protoreflect.FullName(extendedMessageName), protoreflect.FieldNumber(extensionNumber))
+ if fallbackErr == nil {
+ var wrapErr error
+ fd, wrapErr = desc.WrapFile(extType.TypeDescriptor().ParentFile())
+ if wrapErr == nil {
+ fd = cr.cacheFile(fd, true)
+ err = nil // clear error since we've succeeded via the fallback
+ }
+ }
+ }
+ }
+ if isNotFound(err) {
+ err = extensionNotFound(extendedMessageName, extensionNumber, nil)
+ } else if e, ok := err.(*elementNotFoundError); ok {
+ err = extensionNotFound(extendedMessageName, extensionNumber, e)
+ }
+ return fd, err
+}
+
+func (cr *Client) getAndCacheFileDescriptors(req *refv1.ServerReflectionRequest, expectedName, alias string, accept func(*desc.FileDescriptor) bool) (*desc.FileDescriptor, error) {
+ resp, err := cr.send(req)
+ if err != nil {
+ return nil, err
+ }
+
+ fdResp := resp.GetFileDescriptorResponse()
+ if fdResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(fdResp).Elem()}
+ }
+
+ // Response can contain the result file descriptor, but also its transitive
+ // deps. Furthermore, protocol states that subsequent requests do not need
+ // to send transitive deps that have been sent in prior responses. So we
+ // need to cache all file descriptors and then return the first one (which
+ // should be the answer). If we're looking for a file by name, we can be
+ // smarter and make sure to grab one by name instead of just grabbing the
+ // first one.
+ var fds []*descriptorpb.FileDescriptorProto
+ for _, fdBytes := range fdResp.FileDescriptorProto {
+ fd := &descriptorpb.FileDescriptorProto{}
+ if err = proto.Unmarshal(fdBytes, fd); err != nil {
+ return nil, err
+ }
+
+ if expectedName != "" && alias != "" && expectedName != alias && fd.GetName() == expectedName {
+ // we found a file was aliased, so we need to update the proto to reflect that
+ fd.Name = proto.String(alias)
+ }
+
+ cr.cacheMu.Lock()
+ // store in cache of raw descriptor protos, but don't overwrite existing protos
+ if existingFd, ok := cr.protosByName[fd.GetName()]; ok {
+ fd = existingFd
+ } else {
+ cr.protosByName[fd.GetName()] = fd
+ }
+ cr.cacheMu.Unlock()
+
+ fds = append(fds, fd)
+ }
+
+ // find the right result from the files returned
+ for _, fd := range fds {
+ result, err := cr.descriptorFromProto(fd)
+ if err != nil {
+ return nil, err
+ }
+ if accept(result) {
+ return result, nil
+ }
+ }
+
+ return nil, status.Errorf(codes.NotFound, "response does not include expected file")
+}
+
+func (cr *Client) descriptorFromProto(fd *descriptorpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
+ allowMissing := cr.allowMissing.Load()
+ deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency()))
+ var deferredErr error
+ var missingDeps []int
+ for i, depName := range fd.GetDependency() {
+ if dep, err := cr.FileByFilename(depName); err != nil {
+ if _, ok := err.(*elementNotFoundError); !ok || !allowMissing {
+ return nil, err
+ }
+ // We'll ignore for now to see if the file is really necessary.
+ // (If it only supplies custom options, we can get by without it.)
+ if deferredErr == nil {
+ deferredErr = err
+ }
+ missingDeps = append(missingDeps, i)
+ } else {
+ deps = append(deps, dep)
+ }
+ }
+ if len(missingDeps) > 0 {
+ fd = fileWithoutDeps(fd, missingDeps)
+ }
+ d, err := desc.CreateFileDescriptor(fd, deps...)
+ if err != nil {
+ if deferredErr != nil {
+ // assume the issue is the missing dep
+ return nil, deferredErr
+ }
+ return nil, err
+ }
+ d = cr.cacheFile(d, false)
+ return d, nil
+}
+
+func (cr *Client) cacheFile(fd *desc.FileDescriptor, fallback bool) *desc.FileDescriptor {
+ cr.cacheMu.Lock()
+ defer cr.cacheMu.Unlock()
+
+ // Cache file descriptor by name. If we can't overwrite an existing
+ // entry, return it. (Existing entry could come from concurrent caller.)
+ if existing, ok := cr.filesByName[fd.GetName()]; ok && !canOverwrite(existing, fallback) {
+ return existing.fd
+ }
+ entry := fileEntry{fd: fd, fallback: fallback}
+ cr.filesByName[fd.GetName()] = entry
+
+ // also cache by symbols and extensions
+ for _, m := range fd.GetMessageTypes() {
+ cr.cacheMessageLocked(m, entry)
+ }
+ for _, e := range fd.GetEnumTypes() {
+ if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+ continue
+ }
+ for _, v := range e.GetValues() {
+ cr.maybeCacheFileBySymbol(v.GetFullyQualifiedName(), entry)
+ }
+ }
+ for _, e := range fd.GetExtensions() {
+ if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+ continue
+ }
+ cr.maybeCacheFileByExtension(extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}, entry)
+ }
+ for _, s := range fd.GetServices() {
+ if !cr.maybeCacheFileBySymbol(s.GetFullyQualifiedName(), entry) {
+ continue
+ }
+ for _, m := range s.GetMethods() {
+ cr.maybeCacheFileBySymbol(m.GetFullyQualifiedName(), entry)
+ }
+ }
+
+ return fd
+}
+
+func (cr *Client) cacheMessageLocked(md *desc.MessageDescriptor, entry fileEntry) {
+ if !cr.maybeCacheFileBySymbol(md.GetFullyQualifiedName(), entry) {
+ return
+ }
+ for _, f := range md.GetFields() {
+ cr.maybeCacheFileBySymbol(f.GetFullyQualifiedName(), entry)
+ }
+ for _, o := range md.GetOneOfs() {
+ cr.maybeCacheFileBySymbol(o.GetFullyQualifiedName(), entry)
+ }
+ for _, e := range md.GetNestedEnumTypes() {
+ if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+ continue
+ }
+ for _, v := range e.GetValues() {
+ cr.maybeCacheFileBySymbol(v.GetFullyQualifiedName(), entry)
+ }
+ }
+ for _, e := range md.GetNestedExtensions() {
+ if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+ continue
+ }
+ cr.maybeCacheFileByExtension(extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}, entry)
+ }
+ for _, m := range md.GetNestedMessageTypes() {
+ cr.cacheMessageLocked(m, entry) // recurse
+ }
+}
+
+func canOverwrite(existing fileEntry, fallback bool) bool {
+ return !fallback && existing.fallback
+}
+
+func (cr *Client) maybeCacheFileBySymbol(symbol string, entry fileEntry) bool {
+ existing, ok := cr.filesBySymbol[symbol]
+ if ok && !canOverwrite(existing, entry.fallback) {
+ return false
+ }
+ cr.filesBySymbol[symbol] = entry
+ return true
+}
+
+func (cr *Client) maybeCacheFileByExtension(ext extDesc, entry fileEntry) {
+ existing, ok := cr.filesByExtension[ext]
+ if ok && !canOverwrite(existing, entry.fallback) {
+ return
+ }
+ cr.filesByExtension[ext] = entry
+}
+
+// AllExtensionNumbersForType asks the server for all known extension numbers
+// for the given fully-qualified message name.
+func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) {
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: extendedMessageName,
+ },
+ }
+ resp, err := cr.send(req)
+ var exts []int32
+ if err != nil && !isNotFound(err) {
+ // If the server doesn't know about the message type and returns "not found",
+ // we'll treat that as "no known extensions" instead of returning an error.
+ return nil, err
+ }
+ if err == nil {
+ extResp := resp.GetAllExtensionNumbersResponse()
+ if extResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+ }
+ exts = extResp.ExtensionNumber
+ }
+
+ resolver := cr.fallbackResolver.Load()
+ if resolver != nil && resolver.extensionResolver != nil {
+ type extRanger interface {
+ RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+ }
+ if ranger, ok := resolver.extensionResolver.(extRanger); ok {
+ // Merge results with fallback resolver
+ extSet := map[int32]struct{}{}
+ ranger.RangeExtensionsByMessage(protoreflect.FullName(extendedMessageName), func(extType protoreflect.ExtensionType) bool {
+ extSet[int32(extType.TypeDescriptor().Number())] = struct{}{}
+ return true
+ })
+ if len(extSet) > 0 {
+ // De-dupe with the set of extension numbers we got
+ // from the server and merge the results back into exts.
+ for _, ext := range exts {
+ extSet[ext] = struct{}{}
+ }
+ exts = make([]int32, 0, len(extSet))
+ for ext := range extSet {
+ exts = append(exts, ext)
+ }
+ sort.Slice(exts, func(i, j int) bool { return exts[i] < exts[j] })
+ }
+ }
+ }
+ return exts, nil
+}
+
+// ListServices asks the server for the fully-qualified names of all exposed
+// services.
+func (cr *Client) ListServices() ([]string, error) {
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_ListServices{
+ // proto doesn't indicate any purpose for this value and server impl
+ // doesn't actually use it...
+ ListServices: "*",
+ },
+ }
+ resp, err := cr.send(req)
+ if err != nil {
+ return nil, err
+ }
+
+ listResp := resp.GetListServicesResponse()
+ if listResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(listResp).Elem()}
+ }
+ serviceNames := make([]string, len(listResp.Service))
+ for i, s := range listResp.Service {
+ serviceNames[i] = s.Name
+ }
+ return serviceNames, nil
+}
+
+func (cr *Client) send(req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
+ // we allow one immediate retry, in case we have a stale stream
+ // (e.g. closed by server)
+ resp, err := cr.doSend(req)
+ if err != nil {
+ return nil, err
+ }
+
+ // convert error response messages into errors
+ errResp := resp.GetErrorResponse()
+ if errResp != nil {
+ return nil, status.Errorf(codes.Code(errResp.ErrorCode), "%s", errResp.ErrorMessage)
+ }
+
+ return resp, nil
+}
+
+func isNotFound(err error) bool {
+ if err == nil {
+ return false
+ }
+ s, ok := status.FromError(err)
+ return ok && s.Code() == codes.NotFound
+}
+
+func (cr *Client) doSend(req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
+ // TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery
+ // (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus
+ // delivered in correct oder.
+ cr.connMu.Lock()
+ defer cr.connMu.Unlock()
+ return cr.doSendLocked(0, nil, req)
+}
+
+func (cr *Client) doSendLocked(attemptCount int, prevErr error, req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
+ if attemptCount >= 3 && prevErr != nil {
+ return nil, prevErr
+ }
+ if (status.Code(prevErr) == codes.Unimplemented ||
+ status.Code(prevErr) == codes.Unavailable) &&
+ cr.useV1() {
+ // If v1 is unimplemented, fallback to v1alpha.
+ // We also fallback on unavailable because some servers have been
+ // observed to close the connection/cancel the stream, w/out sending
+ // back status or headers, when the service name is not known. When
+ // this happens, the RPC status code is unavailable.
+ // See https://github.com/fullstorydev/grpcurl/issues/434
+ cr.useV1Alpha = true
+ cr.lastTriedV1 = cr.now()
+ }
+ attemptCount++
+
+ if err := cr.initStreamLocked(); err != nil {
+ return nil, err
+ }
+
+ if err := cr.stream.Send(req); err != nil {
+ if err == io.EOF {
+ // if send returns EOF, must call Recv to get real underlying error
+ _, err = cr.stream.Recv()
+ }
+ cr.resetLocked()
+ return cr.doSendLocked(attemptCount, err, req)
+ }
+
+ resp, err := cr.stream.Recv()
+ if err != nil {
+ cr.resetLocked()
+ return cr.doSendLocked(attemptCount, err, req)
+ }
+ return resp, nil
+}
+
+func (cr *Client) initStreamLocked() error {
+ if cr.stream != nil {
+ return nil
+ }
+ var newCtx context.Context
+ newCtx, cr.cancel = context.WithCancel(cr.ctx)
+ if cr.useV1Alpha == true && cr.now().Sub(cr.lastTriedV1) > durationBetweenV1Attempts {
+ // we're due for periodic retry of v1
+ cr.useV1Alpha = false
+ }
+ if cr.useV1() {
+ // try the v1 API
+ streamv1, err := cr.stubV1.ServerReflectionInfo(newCtx)
+ if err == nil {
+ cr.stream = streamv1
+ return nil
+ }
+ if status.Code(err) != codes.Unimplemented {
+ return err
+ }
+ // oh well, fall through below to try v1alpha and update state
+ // so we skip straight to v1alpha next time
+ cr.useV1Alpha = true
+ cr.lastTriedV1 = cr.now()
+ }
+ var err error
+ streamv1alpha, err := cr.stubV1Alpha.ServerReflectionInfo(newCtx)
+ if err == nil {
+ cr.stream = adaptStreamFromV1Alpha{streamv1alpha}
+ return nil
+ }
+ return err
+}
+
+func (cr *Client) useV1() bool {
+ return !cr.useV1Alpha && cr.stubV1 != nil
+}
+
+// Reset ensures that any active stream with the server is closed, releasing any
+// resources.
+func (cr *Client) Reset() {
+ cr.connMu.Lock()
+ defer cr.connMu.Unlock()
+ cr.resetLocked()
+}
+
+func (cr *Client) resetLocked() {
+ if cr.stream != nil {
+ _ = cr.stream.CloseSend()
+ for {
+ // drain the stream, this covers io.EOF too
+ if _, err := cr.stream.Recv(); err != nil {
+ break
+ }
+ }
+ cr.stream = nil
+ }
+ if cr.cancel != nil {
+ cr.cancel()
+ cr.cancel = nil
+ }
+}
+
+// ResolveService asks the server to resolve the given fully-qualified service
+// name into a service descriptor.
+func (cr *Client) ResolveService(serviceName string) (*desc.ServiceDescriptor, error) {
+ file, err := cr.FileContainingSymbol(serviceName)
+ if err != nil {
+ return nil, setSymbolType(err, serviceName, symbolTypeService)
+ }
+ d := file.FindSymbol(serviceName)
+ if d == nil {
+ return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+ }
+ if s, ok := d.(*desc.ServiceDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+ }
+}
+
+// ResolveMessage asks the server to resolve the given fully-qualified message
+// name into a message descriptor.
+func (cr *Client) ResolveMessage(messageName string) (*desc.MessageDescriptor, error) {
+ file, err := cr.FileContainingSymbol(messageName)
+ if err != nil {
+ return nil, setSymbolType(err, messageName, symbolTypeMessage)
+ }
+ d := file.FindSymbol(messageName)
+ if d == nil {
+ return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+ }
+ if s, ok := d.(*desc.MessageDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+ }
+}
+
+// ResolveEnum asks the server to resolve the given fully-qualified enum name
+// into an enum descriptor.
+func (cr *Client) ResolveEnum(enumName string) (*desc.EnumDescriptor, error) {
+ file, err := cr.FileContainingSymbol(enumName)
+ if err != nil {
+ return nil, setSymbolType(err, enumName, symbolTypeEnum)
+ }
+ d := file.FindSymbol(enumName)
+ if d == nil {
+ return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+ }
+ if s, ok := d.(*desc.EnumDescriptor); ok {
+ return s, nil
+ } else {
+ return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+ }
+}
+
+func setSymbolType(err error, name string, symType symbolType) error {
+ if e, ok := err.(*elementNotFoundError); ok {
+ if e.kind == elementKindSymbol && e.name == name && e.symType == symbolTypeUnknown {
+ e.symType = symType
+ }
+ }
+ return err
+}
+
+// ResolveEnumValues asks the server to resolve the given fully-qualified enum
+// name into a map of names to numbers that represents the enum's values.
+func (cr *Client) ResolveEnumValues(enumName string) (map[string]int32, error) {
+ enumDesc, err := cr.ResolveEnum(enumName)
+ if err != nil {
+ return nil, err
+ }
+ vals := map[string]int32{}
+ for _, valDesc := range enumDesc.GetValues() {
+ vals[valDesc.GetName()] = valDesc.GetNumber()
+ }
+ return vals, nil
+}
+
+// ResolveExtension asks the server to resolve the given extension number and
+// fully-qualified message name into a field descriptor.
+func (cr *Client) ResolveExtension(extendedType string, extensionNumber int32) (*desc.FieldDescriptor, error) {
+ file, err := cr.FileContainingExtension(extendedType, extensionNumber)
+ if err != nil {
+ return nil, err
+ }
+ d := findExtension(extendedType, extensionNumber, fileDescriptorExtensions{file})
+ if d == nil {
+ return nil, extensionNotFound(extendedType, extensionNumber, nil)
+ } else {
+ return d, nil
+ }
+}
+
+func fileWithoutDeps(fd *descriptorpb.FileDescriptorProto, missingDeps []int) *descriptorpb.FileDescriptorProto {
+ // We need to rebuild the file without the missing deps.
+ fd = proto.Clone(fd).(*descriptorpb.FileDescriptorProto)
+ newNumDeps := len(fd.GetDependency()) - len(missingDeps)
+ newDeps := make([]string, 0, newNumDeps)
+ remapped := make(map[int]int, newNumDeps)
+ missingIdx := 0
+ for i, dep := range fd.GetDependency() {
+ if missingIdx < len(missingDeps) {
+ if i == missingDeps[missingIdx] {
+ // This dep was missing. Skip it.
+ missingIdx++
+ continue
+ }
+ }
+ remapped[i] = len(newDeps)
+ newDeps = append(newDeps, dep)
+ }
+ // Also rebuild public and weak import slices.
+ newPublic := make([]int32, 0, len(fd.GetPublicDependency()))
+ for _, idx := range fd.GetPublicDependency() {
+ newIdx, ok := remapped[int(idx)]
+ if ok {
+ newPublic = append(newPublic, int32(newIdx))
+ }
+ }
+ newWeak := make([]int32, 0, len(fd.GetWeakDependency()))
+ for _, idx := range fd.GetWeakDependency() {
+ newIdx, ok := remapped[int(idx)]
+ if ok {
+ newWeak = append(newWeak, int32(newIdx))
+ }
+ }
+
+ fd.Dependency = newDeps
+ fd.PublicDependency = newPublic
+ fd.WeakDependency = newWeak
+ return fd
+}
+
+func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor {
+ // search extensions in this scope
+ for _, ext := range scope.extensions() {
+ if ext.GetNumber() == extensionNumber && ext.GetOwner().GetFullyQualifiedName() == extendedType {
+ return ext
+ }
+ }
+
+ // if not found, search nested scopes
+ for _, nested := range scope.nestedScopes() {
+ ext := findExtension(extendedType, extensionNumber, nested)
+ if ext != nil {
+ return ext
+ }
+ }
+
+ return nil
+}
+
+type extensionScope interface {
+ extensions() []*desc.FieldDescriptor
+ nestedScopes() []extensionScope
+}
+
+// fileDescriptorExtensions implements extensionHolder interface on top of
+// FileDescriptorProto
+type fileDescriptorExtensions struct {
+ proto *desc.FileDescriptor
+}
+
+func (fde fileDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+ return fde.proto.GetExtensions()
+}
+
+func (fde fileDescriptorExtensions) nestedScopes() []extensionScope {
+ scopes := make([]extensionScope, len(fde.proto.GetMessageTypes()))
+ for i, m := range fde.proto.GetMessageTypes() {
+ scopes[i] = msgDescriptorExtensions{m}
+ }
+ return scopes
+}
+
+// msgDescriptorExtensions implements extensionHolder interface on top of
+// DescriptorProto
+type msgDescriptorExtensions struct {
+ proto *desc.MessageDescriptor
+}
+
+func (mde msgDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+ return mde.proto.GetNestedExtensions()
+}
+
+func (mde msgDescriptorExtensions) nestedScopes() []extensionScope {
+ scopes := make([]extensionScope, len(mde.proto.GetNestedMessageTypes()))
+ for i, m := range mde.proto.GetNestedMessageTypes() {
+ scopes[i] = msgDescriptorExtensions{m}
+ }
+ return scopes
+}
+
+type adaptStreamFromV1Alpha struct {
+ refv1alpha.ServerReflection_ServerReflectionInfoClient
+}
+
+func (a adaptStreamFromV1Alpha) Send(request *refv1.ServerReflectionRequest) error {
+ v1req := toV1AlphaRequest(request)
+ return a.ServerReflection_ServerReflectionInfoClient.Send(v1req)
+}
+
+func (a adaptStreamFromV1Alpha) Recv() (*refv1.ServerReflectionResponse, error) {
+ v1resp, err := a.ServerReflection_ServerReflectionInfoClient.Recv()
+ if err != nil {
+ return nil, err
+ }
+ return toV1Response(v1resp), nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
new file mode 100644
index 0000000..ec7bd02
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
@@ -0,0 +1,10 @@
+// Package grpcreflect provides GRPC-specific extensions to protobuf reflection.
+// This includes a way to access rich service descriptors for all services that
+// a GRPC server exports.
+//
+// Also included is an easy-to-use client for the GRPC reflection service
+// (https://goo.gl/2ILAHf). This client makes it easy to ask a server (that
+// supports the reflection service) for metadata on its exported services, which
+// could be used to construct a dynamic client. (See the grpcdynamic package in
+// this same repo for more on that.)
+package grpcreflect
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
new file mode 100644
index 0000000..7ff1912
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
@@ -0,0 +1,67 @@
+package grpcreflect
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/reflection"
+
+ "github.com/jhump/protoreflect/desc"
+)
+
+// GRPCServer is the interface provided by a gRPC server. In addition to being a
+// service registrar (for registering services and handlers), it also has an
+// accessor for retrieving metadata about all registered services.
+type GRPCServer = reflection.GRPCServer
+
+// LoadServiceDescriptors loads the service descriptors for all services exposed by the
+// given GRPC server.
+func LoadServiceDescriptors(s GRPCServer) (map[string]*desc.ServiceDescriptor, error) {
+ descs := map[string]*desc.ServiceDescriptor{}
+ for name, info := range s.GetServiceInfo() {
+ file, ok := info.Metadata.(string)
+ if !ok {
+ return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", name, info.Metadata)
+ }
+ fd, err := desc.LoadFileDescriptor(file)
+ if err != nil {
+ return nil, err
+ }
+ d := fd.FindSymbol(name)
+ if d == nil {
+ return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, name)
+ }
+ sd, ok := d.(*desc.ServiceDescriptor)
+ if !ok {
+ return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, name, d)
+ }
+ descs[name] = sd
+ }
+ return descs, nil
+}
+
+// LoadServiceDescriptor loads a rich descriptor for a given service description
+// generated by protoc-gen-go. Generated code contains an unexported symbol with
+// a name like "_<Service>_serviceDesc" which is the service's description. It
+// is used internally to register a service implementation with a GRPC server.
+// But it can also be used by this package to retrieve the rich descriptor for
+// the service.
+func LoadServiceDescriptor(svc *grpc.ServiceDesc) (*desc.ServiceDescriptor, error) {
+ file, ok := svc.Metadata.(string)
+ if !ok {
+ return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", svc.ServiceName, svc.Metadata)
+ }
+ fd, err := desc.LoadFileDescriptor(file)
+ if err != nil {
+ return nil, err
+ }
+ d := fd.FindSymbol(svc.ServiceName)
+ if d == nil {
+ return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, svc.ServiceName)
+ }
+ sd, ok := d.(*desc.ServiceDescriptor)
+ if !ok {
+ return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, svc.ServiceName, d)
+ }
+ return sd, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go b/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go
new file mode 100644
index 0000000..09f8849
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/codec/buffer.go
@@ -0,0 +1,118 @@
+package codec
+
+import (
+ "fmt"
+ "io"
+)
+
+// Buffer is a reader and a writer that wraps a slice of bytes and also
+// provides API for decoding and encoding the protobuf binary format.
+//
+// Its operation is similar to that of a bytes.Buffer: writing pushes
+// data to the end of the buffer while reading pops data from the head
+// of the buffer. So the same buffer can be used to both read and write.
+type Buffer struct {
+ buf []byte
+ index int
+
+ // tmp is used when another byte slice is needed, such as when
+ // serializing messages, since we need to know the length before
+ // we can write the length prefix; by caching this, including
+ // after it is grown by serialization operations, we reduce the
+ // number of allocations needed
+ tmp []byte
+
+ deterministic bool
+}
+
+// NewBuffer creates a new buffer with the given slice of bytes as the
+// buffer's initial contents.
+func NewBuffer(buf []byte) *Buffer {
+ return &Buffer{buf: buf}
+}
+
+// SetDeterministic sets this buffer to encode messages deterministically. This
+// is useful for tests. But the overhead is non-zero, so it should not likely be
+// used outside of tests. When true, map fields in a message must have their
+// keys sorted before serialization to ensure deterministic output. Otherwise,
+// values in a map field will be serialized in map iteration order.
+func (cb *Buffer) SetDeterministic(deterministic bool) {
+ cb.deterministic = deterministic
+}
+
+// IsDeterministic returns whether or not this buffer is configured to encode
+// messages deterministically.
+func (cb *Buffer) IsDeterministic() bool {
+ return cb.deterministic
+}
+
+// Reset resets this buffer back to empty. Any subsequent writes/encodes
+// to the buffer will allocate a new backing slice of bytes.
+func (cb *Buffer) Reset() {
+ cb.buf = []byte(nil)
+ cb.index = 0
+}
+
+// Bytes returns the slice of bytes remaining in the buffer. Note that
+// this does not perform a copy: if the contents of the returned slice
+// are modified, the modifications will be visible to subsequent reads
+// via the buffer.
+func (cb *Buffer) Bytes() []byte {
+ return cb.buf[cb.index:]
+}
+
+// String returns the remaining bytes in the buffer as a string.
+func (cb *Buffer) String() string {
+ return string(cb.Bytes())
+}
+
+// EOF returns true if there are no more bytes remaining to read.
+func (cb *Buffer) EOF() bool {
+ return cb.index >= len(cb.buf)
+}
+
+// Skip attempts to skip the given number of bytes in the input. If
+// the input has fewer bytes than the given count, io.ErrUnexpectedEOF
+// is returned and the buffer is unchanged. Otherwise, the given number
+// of bytes are skipped and nil is returned.
+func (cb *Buffer) Skip(count int) error {
+ if count < 0 {
+ return fmt.Errorf("proto: bad byte length %d", count)
+ }
+ newIndex := cb.index + count
+ if newIndex < cb.index || newIndex > len(cb.buf) {
+ return io.ErrUnexpectedEOF
+ }
+ cb.index = newIndex
+ return nil
+}
+
+// Len returns the remaining number of bytes in the buffer.
+func (cb *Buffer) Len() int {
+ return len(cb.buf) - cb.index
+}
+
+// Read implements the io.Reader interface. If there are no bytes
+// remaining in the buffer, it will return 0, io.EOF. Otherwise,
+// it reads max(len(dest), cb.Len()) bytes from input and copies
+// them into dest. It returns the number of bytes copied and a nil
+// error in this case.
+func (cb *Buffer) Read(dest []byte) (int, error) {
+ if cb.index == len(cb.buf) {
+ return 0, io.EOF
+ }
+ copied := copy(dest, cb.buf[cb.index:])
+ cb.index += copied
+ return copied, nil
+}
+
+var _ io.Reader = (*Buffer)(nil)
+
+// Write implements the io.Writer interface. It always returns
+// len(data), nil.
+func (cb *Buffer) Write(data []byte) (int, error) {
+ cb.buf = append(cb.buf, data...)
+ return len(data), nil
+}
+
+var _ io.Writer = (*Buffer)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/decode.go b/vendor/github.com/jhump/protoreflect/internal/codec/decode.go
new file mode 100644
index 0000000..a25f680
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/codec/decode.go
@@ -0,0 +1,346 @@
+package codec
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/golang/protobuf/proto"
+)
+
+// ErrOverflow is returned when an integer is too large to be represented.
+var ErrOverflow = errors.New("proto: integer overflow")
+
+// ErrBadWireType is returned when decoding a wire-type from a buffer that
+// is not valid.
+var ErrBadWireType = errors.New("proto: bad wiretype")
+
+func (cb *Buffer) decodeVarintSlow() (x uint64, err error) {
+ i := cb.index
+ l := len(cb.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := cb.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ cb.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = ErrOverflow
+ return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) DecodeVarint() (uint64, error) {
+ i := cb.index
+ buf := cb.buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ cb.index++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return cb.decodeVarintSlow()
+ }
+
+ var b uint64
+ // we already checked the first byte
+ x := uint64(buf[i]) - 0x80
+ i++
+
+ b = uint64(buf[i])
+ i++
+ x += b << 7
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 7
+
+ b = uint64(buf[i])
+ i++
+ x += b << 14
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 14
+
+ b = uint64(buf[i])
+ i++
+ x += b << 21
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 21
+
+ b = uint64(buf[i])
+ i++
+ x += b << 28
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 28
+
+ b = uint64(buf[i])
+ i++
+ x += b << 35
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 35
+
+ b = uint64(buf[i])
+ i++
+ x += b << 42
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 42
+
+ b = uint64(buf[i])
+ i++
+ x += b << 49
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 49
+
+ b = uint64(buf[i])
+ i++
+ x += b << 56
+ if b&0x80 == 0 {
+ goto done
+ }
+ x -= 0x80 << 56
+
+ b = uint64(buf[i])
+ i++
+ x += b << 63
+ if b&0x80 == 0 {
+ goto done
+ }
+ // x -= 0x80 << 63 // Always zero.
+
+ return 0, ErrOverflow
+
+done:
+ cb.index = i
+ return x, nil
+}
+
+// DecodeTagAndWireType decodes a field tag and wire type from input.
+// This reads a varint and then extracts the two fields from the varint
+// value read.
+func (cb *Buffer) DecodeTagAndWireType() (tag int32, wireType int8, err error) {
+ var v uint64
+ v, err = cb.DecodeVarint()
+ if err != nil {
+ return
+ }
+ // low 7 bits is wire type
+ wireType = int8(v & 7)
+ // rest is int32 tag number
+ v = v >> 3
+ if v > math.MaxInt32 {
+ err = fmt.Errorf("tag number out of range: %d", v)
+ return
+ }
+ tag = int32(v)
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := cb.index + 8
+ if i < 0 || i > len(cb.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ cb.index = i
+
+ x = uint64(cb.buf[i-8])
+ x |= uint64(cb.buf[i-7]) << 8
+ x |= uint64(cb.buf[i-6]) << 16
+ x |= uint64(cb.buf[i-5]) << 24
+ x |= uint64(cb.buf[i-4]) << 32
+ x |= uint64(cb.buf[i-3]) << 40
+ x |= uint64(cb.buf[i-2]) << 48
+ x |= uint64(cb.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := cb.index + 4
+ if i < 0 || i > len(cb.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ cb.index = i
+
+ x = uint64(cb.buf[i-4])
+ x |= uint64(cb.buf[i-3]) << 8
+ x |= uint64(cb.buf[i-2]) << 16
+ x |= uint64(cb.buf[i-1]) << 24
+ return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := cb.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := cb.index + nb
+ if end < cb.index || end > len(cb.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ buf = cb.buf[cb.index:end]
+ cb.index = end
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, cb.buf[cb.index:])
+ cb.index = end
+ return
+}
+
+// ReadGroup reads the input until a "group end" tag is found
+// and returns the data up to that point. Subsequent reads from
+// the buffer will read data after the group end tag. If alloc
+// is true, the data is copied to a new slice before being returned.
+// Otherwise, the returned slice is a view into the buffer's
+// underlying byte slice.
+//
+// This function correctly handles nested groups: if a "group start"
+// tag is found, then that group's end tag will be included in the
+// returned data.
+func (cb *Buffer) ReadGroup(alloc bool) ([]byte, error) {
+ var groupEnd, dataEnd int
+ groupEnd, dataEnd, err := cb.findGroupEnd()
+ if err != nil {
+ return nil, err
+ }
+ var results []byte
+ if !alloc {
+ results = cb.buf[cb.index:dataEnd]
+ } else {
+ results = make([]byte, dataEnd-cb.index)
+ copy(results, cb.buf[cb.index:])
+ }
+ cb.index = groupEnd
+ return results, nil
+}
+
+// SkipGroup is like ReadGroup, except that it discards the
+// data and just advances the buffer to point to the input
+// right *after* the "group end" tag.
+func (cb *Buffer) SkipGroup() error {
+ groupEnd, _, err := cb.findGroupEnd()
+ if err != nil {
+ return err
+ }
+ cb.index = groupEnd
+ return nil
+}
+
+// SkipField attempts to skip the value of a field with the given wire
+// type. When consuming a protobuf-encoded stream, it can be called immediately
+// after DecodeTagAndWireType to discard the subsequent data for the field.
+func (cb *Buffer) SkipField(wireType int8) error {
+ switch wireType {
+ case proto.WireFixed32:
+ if err := cb.Skip(4); err != nil {
+ return err
+ }
+ case proto.WireFixed64:
+ if err := cb.Skip(8); err != nil {
+ return err
+ }
+ case proto.WireVarint:
+ // skip varint by finding last byte (has high bit unset)
+ i := cb.index
+ limit := i + 10 // varint cannot be >10 bytes
+ for {
+ if i >= limit {
+ return ErrOverflow
+ }
+ if i >= len(cb.buf) {
+ return io.ErrUnexpectedEOF
+ }
+ if cb.buf[i]&0x80 == 0 {
+ break
+ }
+ i++
+ }
+ // TODO: This would only overflow if buffer length was MaxInt and we
+ // read the last byte. This is not a real/feasible concern on 64-bit
+ // systems. Something to worry about for 32-bit systems? Do we care?
+ cb.index = i + 1
+ case proto.WireBytes:
+ l, err := cb.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ if err := cb.Skip(int(l)); err != nil {
+ return err
+ }
+ case proto.WireStartGroup:
+ if err := cb.SkipGroup(); err != nil {
+ return err
+ }
+ default:
+ return ErrBadWireType
+ }
+ return nil
+}
+
+func (cb *Buffer) findGroupEnd() (groupEnd int, dataEnd int, err error) {
+ start := cb.index
+ defer func() {
+ cb.index = start
+ }()
+ for {
+ fieldStart := cb.index
+ // read a field tag
+ _, wireType, err := cb.DecodeTagAndWireType()
+ if err != nil {
+ return 0, 0, err
+ }
+ if wireType == proto.WireEndGroup {
+ return cb.index, fieldStart, nil
+ }
+ // skip past the field's data
+ if err := cb.SkipField(wireType); err != nil {
+ return 0, 0, err
+ }
+ }
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/codec/encode.go b/vendor/github.com/jhump/protoreflect/internal/codec/encode.go
new file mode 100644
index 0000000..524f1bc
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/codec/encode.go
@@ -0,0 +1,147 @@
+package codec
+
+import (
+ "github.com/golang/protobuf/proto"
+)
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ cb.buf = append(cb.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ cb.buf = append(cb.buf, uint8(x))
+ return nil
+}
+
+// EncodeTagAndWireType encodes the given field tag and wire type to the
+// buffer. This combines the two values and then writes them as a varint.
+func (cb *Buffer) EncodeTagAndWireType(tag int32, wireType int8) error {
+ v := uint64((int64(tag) << 3) | int64(wireType))
+ return cb.EncodeVarint(v)
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *Buffer) EncodeFixed64(x uint64) error {
+ cb.buf = append(cb.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *Buffer) EncodeFixed32(x uint64) error {
+ cb.buf = append(cb.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *Buffer) EncodeRawBytes(b []byte) error {
+ if err := cb.EncodeVarint(uint64(len(b))); err != nil {
+ return err
+ }
+ cb.buf = append(cb.buf, b...)
+ return nil
+}
+
+// EncodeMessage writes the given message to the buffer.
+func (cb *Buffer) EncodeMessage(pm proto.Message) error {
+ bytes, err := marshalMessage(cb.buf, pm, cb.deterministic)
+ if err != nil {
+ return err
+ }
+ cb.buf = bytes
+ return nil
+}
+
+// EncodeDelimitedMessage writes the given message to the buffer with a
+// varint-encoded length prefix (the delimiter).
+func (cb *Buffer) EncodeDelimitedMessage(pm proto.Message) error {
+ bytes, err := marshalMessage(cb.tmp, pm, cb.deterministic)
+ if err != nil {
+ return err
+ }
+ // save truncated buffer if it was grown (so we can re-use it and
+ // curtail future allocations)
+ if cap(bytes) > cap(cb.tmp) {
+ cb.tmp = bytes[:0]
+ }
+ return cb.EncodeRawBytes(bytes)
+}
+
+func marshalMessage(b []byte, pm proto.Message, deterministic bool) ([]byte, error) {
+ // We try to use the most efficient way to marshal to existing slice.
+
+ if deterministic {
+ // see if the message has custom deterministic methods, preferring an
+ // "append" method over one that must always re-allocate
+ madm, ok := pm.(interface {
+ MarshalAppendDeterministic(b []byte) ([]byte, error)
+ })
+ if ok {
+ return madm.MarshalAppendDeterministic(b)
+ }
+
+ mdm, ok := pm.(interface {
+ MarshalDeterministic() ([]byte, error)
+ })
+ if ok {
+ bytes, err := mdm.MarshalDeterministic()
+ if err != nil {
+ return nil, err
+ }
+ if len(b) == 0 {
+ return bytes, nil
+ }
+ return append(b, bytes...), nil
+ }
+
+ var buf proto.Buffer
+ buf.SetDeterministic(true)
+ if err := buf.Marshal(pm); err != nil {
+ return nil, err
+ }
+ bytes := buf.Bytes()
+ if len(b) == 0 {
+ return bytes, nil
+ }
+ return append(b, bytes...), nil
+ }
+
+ mam, ok := pm.(interface {
+ // see if we can append the message, vs. having to re-allocate
+ MarshalAppend(b []byte) ([]byte, error)
+ })
+ if ok {
+ return mam.MarshalAppend(b)
+ }
+
+ // lowest common denominator
+ bytes, err := proto.Marshal(pm)
+ if err != nil {
+ return nil, err
+ }
+ if len(b) == 0 {
+ return bytes, nil
+ }
+ return append(b, bytes...), nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
new file mode 100644
index 0000000..777c3a4
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -0,0 +1,127 @@
+// Package internal contains some code that should not be exported but needs to
+// be shared across more than one of the protoreflect sub-packages.
+package internal
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io/ioutil"
+
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// TODO: replace this alias configuration with desc.RegisterImportPath?
+
+// StdFileAliases are the standard protos included with protoc, but older versions of
+// their respective packages registered them using incorrect paths.
+var StdFileAliases = map[string]string{
+ // Files for the github.com/golang/protobuf/ptypes package at one point were
+ // registered using the path where the proto files are mirrored in GOPATH,
+ // inside the golang/protobuf repo.
+ // (Fixed as of https://github.com/golang/protobuf/pull/412)
+ "google/protobuf/any.proto": "github.com/golang/protobuf/ptypes/any/any.proto",
+ "google/protobuf/duration.proto": "github.com/golang/protobuf/ptypes/duration/duration.proto",
+ "google/protobuf/empty.proto": "github.com/golang/protobuf/ptypes/empty/empty.proto",
+ "google/protobuf/struct.proto": "github.com/golang/protobuf/ptypes/struct/struct.proto",
+ "google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto",
+ "google/protobuf/wrappers.proto": "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto",
+ // Files for the google.golang.org/genproto/protobuf package at one point
+ // were registered with an anomalous "src/" prefix.
+ // (Fixed as of https://github.com/google/go-genproto/pull/31)
+ "google/protobuf/api.proto": "src/google/protobuf/api.proto",
+ "google/protobuf/field_mask.proto": "src/google/protobuf/field_mask.proto",
+ "google/protobuf/source_context.proto": "src/google/protobuf/source_context.proto",
+ "google/protobuf/type.proto": "src/google/protobuf/type.proto",
+
+ // Other standard files (descriptor.proto and compiler/plugin.proto) are
+ // registered correctly, so we don't need rules for them here.
+}
+
+func init() {
+ // We provide aliasing in both directions, to support files with the
+ // proper import path linked against older versions of the generated
+ // files AND files that used the aliased import path but linked against
+ // newer versions of the generated files (which register with the
+ // correct path).
+
+ // Get all files defined above
+ keys := make([]string, 0, len(StdFileAliases))
+ for k := range StdFileAliases {
+ keys = append(keys, k)
+ }
+ // And add inverse mappings
+ for _, k := range keys {
+ alias := StdFileAliases[k]
+ StdFileAliases[alias] = k
+ }
+}
+
+type ErrNoSuchFile string
+
+func (e ErrNoSuchFile) Error() string {
+ return fmt.Sprintf("no such file: %q", string(e))
+}
+
+// LoadFileDescriptor loads a registered descriptor and decodes it. If the given
+// name cannot be loaded but is a known standard name, an alias will be tried,
+// so the standard files can be loaded even if linked against older "known bad"
+// versions of packages.
+func LoadFileDescriptor(file string) (*descriptorpb.FileDescriptorProto, error) {
+ fdb := proto.FileDescriptor(file)
+ aliased := false
+ if fdb == nil {
+ var ok bool
+ alias, ok := StdFileAliases[file]
+ if ok {
+ aliased = true
+ if fdb = proto.FileDescriptor(alias); fdb == nil {
+ return nil, ErrNoSuchFile(file)
+ }
+ } else {
+ return nil, ErrNoSuchFile(file)
+ }
+ }
+
+ fd, err := DecodeFileDescriptor(file, fdb)
+ if err != nil {
+ return nil, err
+ }
+
+ if aliased {
+ // the file descriptor will have the alias used to load it, but
+ // we need it to have the specified name in order to link it
+ fd.Name = proto.String(file)
+ }
+
+ return fd, nil
+}
+
+// DecodeFileDescriptor decodes the bytes of a registered file descriptor.
+// Registered file descriptors are first "proto encoded" (e.g. binary format
+// for the descriptor protos) and then gzipped. So this function gunzips and
+// then unmarshals into a descriptor proto.
+func DecodeFileDescriptor(element string, fdb []byte) (*descriptorpb.FileDescriptorProto, error) {
+ raw, err := decompress(fdb)
+ if err != nil {
+ return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
+ }
+ fd := descriptorpb.FileDescriptorProto{}
+ if err := proto.Unmarshal(raw, &fd); err != nil {
+ return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
+ }
+ return &fd, nil
+}
+
+func decompress(b []byte) ([]byte, error) {
+ r, err := gzip.NewReader(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ out, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
new file mode 100644
index 0000000..25376c7
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
@@ -0,0 +1,20 @@
+package internal
+
+import (
+ "github.com/golang/protobuf/proto"
+)
+
+// GetUnrecognized fetches the bytes of unrecognized fields for the given message.
+func GetUnrecognized(msg proto.Message) []byte {
+ return proto.MessageReflect(msg).GetUnknown()
+}
+
+// SetUnrecognized adds the given bytes to the unrecognized fields for the given message.
+func SetUnrecognized(msg proto.Message, data []byte) {
+ refl := proto.MessageReflect(msg)
+ existing := refl.GetUnknown()
+ if len(existing) > 0 {
+ data = append(existing, data...)
+ }
+ refl.SetUnknown(data)
+}