summaryrefslogtreecommitdiff
path: root/vendor/github.com/fullstorydev
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/fullstorydev')
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/.gitignore4
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml63
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/Dockerfile36
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/LICENSE21
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/Makefile102
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/README.md256
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/grpcurl.go983
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/unix.go15
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/desc_source.go369
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/download_protoc.sh35
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/format.go554
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/grpcurl.go694
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/invoke.go409
-rw-r--r--vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh57
14 files changed, 3598 insertions, 0 deletions
diff --git a/vendor/github.com/fullstorydev/grpcurl/.gitignore b/vendor/github.com/fullstorydev/grpcurl/.gitignore
new file mode 100644
index 0000000..3a2f7da
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/.gitignore
@@ -0,0 +1,4 @@
+dist/
+.idea/
+VERSION
+.tmp/
diff --git a/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml b/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml
new file mode 100644
index 0000000..294616a
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml
@@ -0,0 +1,63 @@
+builds:
+ - binary: grpcurl
+ main: ./cmd/grpcurl
+ goos:
+ - linux
+ - darwin
+ - windows
+ goarch:
+ - amd64
+ - 386
+ - arm
+ - arm64
+ - s390x
+ - ppc64le
+ goarm:
+ - 5
+ - 6
+ - 7
+ ignore:
+ - goos: darwin
+ goarch: 386
+ - goos: windows
+ goarch: arm64
+ - goos: darwin
+ goarch: arm
+ - goos: windows
+ goarch: arm
+ - goos: darwin
+ goarch: s390x
+ - goos: windows
+ goarch: s390x
+ - goos: darwin
+ goarch: ppc64le
+ - goos: windows
+ goarch: ppc64le
+ ldflags:
+ - -s -w -X main.version=v{{.Version}}
+
+archives:
+ - format: tar.gz
+ name_template: >-
+ {{ .Binary }}_{{ .Version }}_
+ {{- if eq .Os "darwin" }}osx{{ else }}{{ .Os }}{{ end }}_
+ {{- if eq .Arch "amd64" }}x86_64
+ {{- else if eq .Arch "386" }}x86_32
+ {{- else }}{{ .Arch }}{{ end }}
+ {{- with .Arm }}v{{ . }}{{ end }}{{ with .Mips }}_{{ . }}{{ end }}{{ if not (eq .Amd64 "v1") }}{{ .Amd64 }}{{ end }}
+ format_overrides:
+ - goos: windows
+ format: zip
+ files:
+ - LICENSE
+
+nfpms:
+ - vendor: Fullstory
+ homepage: https://github.com/fullstorydev/grpcurl/
+ maintainer: Engineering at Fullstory <fixme@fixme>
+ description: 'Like cURL, but for gRPC: Command-line tool for interacting with gRPC servers'
+ license: MIT
+ id: nfpms
+ formats:
+ - deb
+ - rpm
diff --git a/vendor/github.com/fullstorydev/grpcurl/Dockerfile b/vendor/github.com/fullstorydev/grpcurl/Dockerfile
new file mode 100644
index 0000000..1d6ac4c
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/Dockerfile
@@ -0,0 +1,36 @@
+FROM golang:1.23-alpine as builder
+MAINTAINER Fullstory Engineering
+
+# create non-privileged group and user
+RUN addgroup -S grpcurl && adduser -S grpcurl -G grpcurl
+
+WORKDIR /tmp/fullstorydev/grpcurl
+# copy just the files/sources we need to build grpcurl
+COPY VERSION *.go go.* /tmp/fullstorydev/grpcurl/
+COPY cmd /tmp/fullstorydev/grpcurl/cmd
+# and build a completely static binary (so we can use
+# scratch as basis for the final image)
+ENV CGO_ENABLED=0
+ENV GO111MODULE=on
+RUN go build -o /grpcurl \
+ -ldflags "-w -extldflags \"-static\" -X \"main.version=$(cat VERSION)\"" \
+ ./cmd/grpcurl
+
+FROM alpine:3 as alpine
+WORKDIR /
+COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
+COPY --from=builder /etc/passwd /etc/passwd
+COPY --from=builder /grpcurl /bin/grpcurl
+USER grpcurl
+
+ENTRYPOINT ["/bin/grpcurl"]
+
+# New FROM so we have a nice'n'tiny image
+FROM scratch
+WORKDIR /
+COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
+COPY --from=builder /etc/passwd /etc/passwd
+COPY --from=builder /grpcurl /bin/grpcurl
+USER grpcurl
+
+ENTRYPOINT ["/bin/grpcurl"]
diff --git a/vendor/github.com/fullstorydev/grpcurl/LICENSE b/vendor/github.com/fullstorydev/grpcurl/LICENSE
new file mode 100644
index 0000000..63cc7ac
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Fullstory, Inc
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/fullstorydev/grpcurl/Makefile b/vendor/github.com/fullstorydev/grpcurl/Makefile
new file mode 100644
index 0000000..0c92794
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/Makefile
@@ -0,0 +1,102 @@
+dev_build_version=$(shell git describe --tags --always --dirty)
+
+export PATH := $(shell pwd)/.tmp/protoc/bin:$(PATH)
+export PROTOC_VERSION := 22.0
+# Disable CGO for improved compatibility across distros
+export CGO_ENABLED=0
+
+# TODO: run golint and errcheck, but only to catch *new* violations and
+# decide whether to change code or not (e.g. we need to be able to whitelist
+# violations already in the code). They can be useful to catch errors, but
+# they are just too noisy to be a requirement for a CI -- we don't even *want*
+# to fix some of the things they consider to be violations.
+.PHONY: ci
+ci: deps checkgofmt checkgenerate vet staticcheck ineffassign predeclared test
+
+.PHONY: deps
+deps:
+ go get -d -v -t ./...
+ go mod tidy
+
+.PHONY: updatedeps
+updatedeps:
+ go get -d -v -t -u -f ./...
+ go mod tidy
+
+.PHONY: install
+install:
+ go install -ldflags '-X "main.version=dev build $(dev_build_version)"' ./...
+
+.PHONY: release
+release:
+ @go install github.com/goreleaser/goreleaser@v1.21.0
+ goreleaser release --clean
+
+.PHONY: docker
+docker:
+ @echo $(dev_build_version) > VERSION
+ docker build -t fullstorydev/grpcurl:$(dev_build_version) .
+ @rm VERSION
+
+.PHONY: generate
+generate: .tmp/protoc/bin/protoc
+ @go install google.golang.org/protobuf/cmd/protoc-gen-go@a709e31e5d12
+ @go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.1.0
+ @go install github.com/jhump/protoreflect/desc/sourceinfo/cmd/protoc-gen-gosrcinfo@v1.14.1
+ go generate ./...
+ go mod tidy
+
+.PHONY: checkgenerate
+checkgenerate: generate
+ git status --porcelain
+ @if [ -n "$$(git status --porcelain)" ]; then \
+ git diff; \
+ exit 1; \
+ fi
+
+.PHONY: checkgofmt
+checkgofmt:
+ gofmt -s -l .
+ @if [ -n "$$(gofmt -s -l .)" ]; then \
+ exit 1; \
+ fi
+
+.PHONY: vet
+vet:
+ go vet ./...
+
+.PHONY: staticcheck
+staticcheck:
+ @go install honnef.co/go/tools/cmd/staticcheck@v0.5.1
+ staticcheck -checks "inherit,-SA1019" ./...
+
+.PHONY: ineffassign
+ineffassign:
+ @go install github.com/gordonklaus/ineffassign@7953dde2c7bf
+ ineffassign .
+
+.PHONY: predeclared
+predeclared:
+ @go install github.com/nishanths/predeclared@245576f9a85c
+ predeclared ./...
+
+# Intentionally omitted from CI, but target here for ad-hoc reports.
+.PHONY: golint
+golint:
+ @go install golang.org/x/lint/golint@v0.0.0-20210508222113-6edffad5e616
+ golint -min_confidence 0.9 -set_exit_status ./...
+
+# Intentionally omitted from CI, but target here for ad-hoc reports.
+.PHONY: errcheck
+errcheck:
+ @go install github.com/kisielk/errcheck@v1.2.0
+ errcheck ./...
+
+.PHONY: test
+test:
+ # The race detector requires CGO: https://github.com/golang/go/issues/6508
+ CGO_ENABLED=1 go test -race ./...
+
+.tmp/protoc/bin/protoc: ./Makefile ./download_protoc.sh
+ ./download_protoc.sh
+
diff --git a/vendor/github.com/fullstorydev/grpcurl/README.md b/vendor/github.com/fullstorydev/grpcurl/README.md
new file mode 100644
index 0000000..7c73b50
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/README.md
@@ -0,0 +1,256 @@
+# gRPCurl
+[![Build Status](https://circleci.com/gh/fullstorydev/grpcurl/tree/master.svg?style=svg)](https://circleci.com/gh/fullstorydev/grpcurl/tree/master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/fullstorydev/grpcurl)](https://goreportcard.com/report/github.com/fullstorydev/grpcurl)
+
+`grpcurl` is a command-line tool that lets you interact with gRPC servers. It's
+basically `curl` for gRPC servers.
+
+The main purpose for this tool is to invoke RPC methods on a gRPC server from the
+command-line. gRPC servers use a binary encoding on the wire
+([protocol buffers](https://developers.google.com/protocol-buffers/), or "protobufs"
+for short). So they are basically impossible to interact with using regular `curl`
+(and older versions of `curl` that do not support HTTP/2 are of course non-starters).
+This program accepts messages using JSON encoding, which is much more friendly for both
+humans and scripts.
+
+With this tool you can also browse the schema for gRPC services, either by querying
+a server that supports [server reflection](https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto),
+by reading proto source files, or by loading in compiled "protoset" files (files that contain
+encoded file [descriptor protos](https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto)).
+In fact, the way the tool transforms JSON request data into a binary encoded protobuf
+is using that very same schema. So, if the server you interact with does not support
+reflection, you will either need the proto source files that define the service or need
+protoset files that `grpcurl` can use.
+
+This repo also provides a library package, `github.com/fullstorydev/grpcurl`, that has
+functions for simplifying the construction of other command-line tools that dynamically
+invoke gRPC endpoints. This code is a great example of how to use the various packages of
+the [protoreflect](https://godoc.org/github.com/jhump/protoreflect) library, and shows
+off what they can do.
+
+See also the [`grpcurl` talk at GopherCon 2018](https://www.youtube.com/watch?v=dDr-8kbMnaw).
+
+## Features
+`grpcurl` supports all kinds of RPC methods, including streaming methods. You can even
+operate bi-directional streaming methods interactively by running `grpcurl` from an
+interactive terminal and using stdin as the request body!
+
+`grpcurl` supports both secure/TLS servers _and_ plain-text servers (i.e. no TLS) and has
+numerous options for TLS configuration. It also supports mutual TLS, where the client is
+required to present a client certificate.
+
+As mentioned above, `grpcurl` works seamlessly if the server supports the reflection
+service. If not, you can supply the `.proto` source files or you can supply protoset
+files (containing compiled descriptors, produced by `protoc`) to `grpcurl`.
+
+## Installation
+
+### Binaries
+
+Download the binary from the [releases](https://github.com/fullstorydev/grpcurl/releases) page.
+
+### Homebrew (macOS)
+
+On macOS, `grpcurl` is available via Homebrew:
+```shell
+brew install grpcurl
+```
+
+### Docker
+
+For platforms that support Docker, you can download an image that lets you run `grpcurl`:
+```shell
+# Download image
+docker pull fullstorydev/grpcurl:latest
+# Run the tool
+docker run fullstorydev/grpcurl api.grpc.me:443 list
+```
+Note that there are some pitfalls when using docker:
+- If you need to interact with a server listening on the host's loopback network, you must specify the host as `host.docker.internal` instead of `localhost` (for Mac or Windows) _OR_ have the container use the host network with `-network="host"` (Linux only).
+- If you need to provide proto source files or descriptor sets, you must mount the folder containing the files as a volume (`-v $(pwd):/protos`) and adjust the import paths to container paths accordingly.
+- If you want to provide the request message via stdin, using the `-d @` option, you need to use the `-i` flag on the docker command.
+
+### Other Packages
+
+There are numerous other ways to install `grpcurl`, thanks to support from third parties that
+have created recipes/packages for it. These include other ways to install `grpcurl` on a variety
+of environments, including Windows and myriad Linux distributions.
+
+You can see more details and the full list of other packages for `grpcurl` at _repology.org_:
+https://repology.org/project/grpcurl/information
+
+### From Source
+If you already have the [Go SDK](https://golang.org/doc/install) installed, you can use the `go`
+tool to install `grpcurl`:
+```shell
+go install github.com/fullstorydev/grpcurl/cmd/grpcurl@latest
+```
+
+This installs the command into the `bin` sub-folder of wherever your `$GOPATH`
+environment variable points. (If you have no `GOPATH` environment variable set,
+the default install location is `$HOME/go/bin`). If this directory is already in
+your `$PATH`, then you should be good to go.
+
+If you have already pulled down this repo to a location that is not in your
+`$GOPATH` and want to build from the sources, you can `cd` into the repo and then
+run `make install`.
+
+If you encounter compile errors and are using a version of the Go SDK older than 1.13,
+you could have out-dated versions of `grpcurl`'s dependencies. You can update the
+dependencies by running `make updatedeps`. Or, if you are using Go 1.11 or 1.12, you
+can add `GO111MODULE=on` as a prefix to the commands above, which will also build using
+the right versions of dependencies (vs. whatever you may already have in your `GOPATH`).
+
+## Usage
+The usage doc for the tool explains the numerous options:
+```shell
+grpcurl -help
+```
+
+In the sections below, you will find numerous examples demonstrating how to use
+`grpcurl`.
+
+### Invoking RPCs
+Invoking an RPC on a trusted server (e.g. TLS without self-signed key or custom CA)
+that requires no client certs and supports server reflection is the simplest thing to
+do with `grpcurl`. This minimal invocation sends an empty request body:
+```shell
+grpcurl grpc.server.com:443 my.custom.server.Service/Method
+
+# no TLS
+grpcurl -plaintext grpc.server.com:80 my.custom.server.Service/Method
+```
+
+To send a non-empty request, use the `-d` argument. Note that all arguments must come
+*before* the server address and method name:
+```shell
+grpcurl -d '{"id": 1234, "tags": ["foo","bar"]}' \
+ grpc.server.com:443 my.custom.server.Service/Method
+```
+
+As can be seen in the example, the supplied body must be in JSON format. The body will
+be parsed and then transmitted to the server in the protobuf binary format.
+
+If you want to include `grpcurl` in a command pipeline, such as when using `jq` to
+create a request body, you can use `-d @`, which tells `grpcurl` to read the actual
+request body from stdin:
+```shell
+grpcurl -d @ grpc.server.com:443 my.custom.server.Service/Method <<EOM
+{
+ "id": 1234,
+ "tags": [
+ "foor",
+ "bar"
+ ]
+}
+EOM
+```
+### Adding Headers/Metadata to Request
+Adding of headers / metadata to a rpc request is possible via the `-H name:value` command line option. Multiple headers can be added in a similar fashion.
+Example :
+```shell
+grpcurl -H header1:value1 -H header2:value2 -d '{"id": 1234, "tags": ["foo","bar"]}' grpc.server.com:443 my.custom.server.Service/Method
+```
+For more usage guide, check out the help docs via `grpcurl -help`
+
+### Listing Services
+To list all services exposed by a server, use the "list" verb. When using `.proto` source
+or protoset files instead of server reflection, this lists all services defined in the
+source or protoset files.
+```shell
+# Server supports reflection
+grpcurl localhost:8787 list
+
+# Using compiled protoset files
+grpcurl -protoset my-protos.bin list
+
+# Using proto sources
+grpcurl -import-path ../protos -proto my-stuff.proto list
+
+# Export proto files (use -proto-out-dir to specify the output directory)
+grpcurl -plaintext -proto-out-dir "out_protos" "localhost:8787" describe my.custom.server.Service
+
+# Export protoset file (use -protoset-out to specify the output file)
+grpcurl -plaintext -protoset-out "out.protoset" "localhost:8787" describe my.custom.server.Service
+
+```
+
+The "list" verb also lets you see all methods in a particular service:
+```shell
+grpcurl localhost:8787 list my.custom.server.Service
+```
+
+### Describing Elements
+The "describe" verb will print the type of any symbol that the server knows about
+or that is found in a given protoset file. It also prints a description of that
+symbol, in the form of snippets of proto source. It won't necessarily be the
+original source that defined the element, but it will be equivalent.
+
+```shell
+# Server supports reflection
+grpcurl localhost:8787 describe my.custom.server.Service.MethodOne
+
+# Using compiled protoset files
+grpcurl -protoset my-protos.bin describe my.custom.server.Service.MethodOne
+
+# Using proto sources
+grpcurl -import-path ../protos -proto my-stuff.proto describe my.custom.server.Service.MethodOne
+```
+
+## Descriptor Sources
+The `grpcurl` tool can operate on a variety of sources for descriptors. The descriptors
+are required, in order for `grpcurl` to understand the RPC schema, translate inputs
+into the protobuf binary format as well as translate responses from the binary format
+into text. The sections below document the supported sources and what command-line flags
+are needed to use them.
+
+### Server Reflection
+
+Without any additional command-line flags, `grpcurl` will try to use [server reflection](https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto).
+
+Examples for how to set up server reflection can be found [here](https://github.com/grpc/grpc/blob/master/doc/server-reflection.md#known-implementations).
+
+When using reflection, the server address (host:port or path to Unix socket) is required
+even for "list" and "describe" operations, so that `grpcurl` can connect to the server
+and ask it for its descriptors.
+
+### Proto Source Files
+To use `grpcurl` on servers that do not support reflection, you can use `.proto` source
+files.
+
+In addition to using `-proto` flags to point `grpcurl` at the relevant proto source file(s),
+you may also need to supply `-import-path` flags to tell `grpcurl` the folders from which
+dependencies can be imported.
+
+Just like when compiling with `protoc`, you do *not* need to provide an import path for the
+location of the standard protos included with `protoc` (which contain various "well-known
+types" with a package definition of `google.protobuf`). These files are "known" by `grpcurl`
+as a snapshot of their descriptors is built into the `grpcurl` binary.
+
+When using proto sources, you can omit the server address (host:port or path to Unix socket)
+when using the "list" and "describe" operations since they only need to consult the proto
+source files.
+
+### Protoset Files
+You can also use compiled protoset files with `grpcurl`. If you are scripting `grpcurl` and
+need to re-use the same proto sources for many invocations, you will see better performance
+by using protoset files (since it skips the parsing and compilation steps with each
+invocation).
+
+Protoset files contain binary encoded `google.protobuf.FileDescriptorSet` protos. To create
+a protoset file, invoke `protoc` with the `*.proto` files that define the service:
+```shell
+protoc --proto_path=. \
+ --descriptor_set_out=myservice.protoset \
+ --include_imports \
+ my/custom/server/service.proto
+```
+
+The `--descriptor_set_out` argument is what tells `protoc` to produce a protoset,
+and the `--include_imports` argument is necessary for the protoset to contain
+everything that `grpcurl` needs to process and understand the schema.
+
+When using protosets, you can omit the server address (host:port or path to Unix socket)
+when using the "list" and "describe" operations since they only need to consult the
+protoset files.
+
diff --git a/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/grpcurl.go b/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/grpcurl.go
new file mode 100644
index 0000000..b794288
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/grpcurl.go
@@ -0,0 +1,983 @@
+// Command grpcurl makes gRPC requests (a la cURL, but HTTP/2). It can use a supplied descriptor
+// file, protobuf sources, or service reflection to translate JSON or text request data into the
+// appropriate protobuf messages and vice versa for presenting the response contents.
+package main
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jhump/protoreflect/desc" //lint:ignore SA1019 required to use APIs in other grpcurl package
+ "github.com/jhump/protoreflect/grpcreflect"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/alts"
+ "google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ // Register gzip compressor so compressed responses will work
+ _ "google.golang.org/grpc/encoding/gzip"
+ // Register xds so xds and xds-experimental resolver schemes work
+ _ "google.golang.org/grpc/xds"
+
+ "github.com/fullstorydev/grpcurl"
+)
+
+// To avoid confusion between program error codes and the gRPC response
+// status codes 'Cancelled' and 'Unknown', 1 and 2 respectively,
+// the response status codes emitted use an offset of 64
+const statusCodeOffset = 64
+
+const noVersion = "dev build <no version set>"
+
+var version = noVersion
+
+var (
+ exit = os.Exit
+
+ isUnixSocket func() bool // nil when run on non-unix platform
+
+ flags = flag.NewFlagSet(os.Args[0], flag.ExitOnError)
+
+ help = flags.Bool("help", false, prettify(`
+ Print usage instructions and exit.`))
+ printVersion = flags.Bool("version", false, prettify(`
+ Print version.`))
+
+ plaintext = flags.Bool("plaintext", false, prettify(`
+ Use plain-text HTTP/2 when connecting to server (no TLS).`))
+ insecure = flags.Bool("insecure", false, prettify(`
+ Skip server certificate and domain verification. (NOT SECURE!) Not
+ valid with -plaintext option.`))
+
+ // TLS Options
+ cacert = flags.String("cacert", "", prettify(`
+ File containing trusted root certificates for verifying the server.
+ Ignored if -insecure is specified.`))
+ cert = flags.String("cert", "", prettify(`
+ File containing client certificate (public key), to present to the
+ server. Not valid with -plaintext option. Must also provide -key option.`))
+ key = flags.String("key", "", prettify(`
+ File containing client private key, to present to the server. Not valid
+ with -plaintext option. Must also provide -cert option.`))
+
+ // ALTS Options
+ usealts = flags.Bool("alts", false, prettify(`
+ Use Application Layer Transport Security (ALTS) when connecting to server.`))
+ altsHandshakerServiceAddress = flags.String("alts-handshaker-service", "", prettify(`If set, this server will be used to do the ATLS handshaking.`))
+ altsTargetServiceAccounts multiString
+
+ protoset multiString
+ protoFiles multiString
+ importPaths multiString
+ addlHeaders multiString
+ rpcHeaders multiString
+ reflHeaders multiString
+ expandHeaders = flags.Bool("expand-headers", false, prettify(`
+ If set, headers may use '${NAME}' syntax to reference environment
+ variables. These will be expanded to the actual environment variable
+ value before sending to the server. For example, if there is an
+ environment variable defined like FOO=bar, then a header of
+ 'key: ${FOO}' would expand to 'key: bar'. This applies to -H,
+ -rpc-header, and -reflect-header options. No other expansion/escaping is
+ performed. This can be used to supply credentials/secrets without having
+ to put them in command-line arguments.`))
+ authority = flags.String("authority", "", prettify(`
+ The authoritative name of the remote server. This value is passed as the
+ value of the ":authority" pseudo-header in the HTTP/2 protocol. When TLS
+ is used, this will also be used as the server name when verifying the
+ server's certificate. It defaults to the address that is provided in the
+ positional arguments, or 'localhost' in the case of a unix domain
+ socket.`))
+ userAgent = flags.String("user-agent", "", prettify(`
+ If set, the specified value will be added to the User-Agent header set
+ by the grpc-go library.
+ `))
+ data = flags.String("d", "", prettify(`
+ Data for request contents. If the value is '@' then the request contents
+ are read from stdin. For calls that accept a stream of requests, the
+ contents should include all such request messages concatenated together
+ (possibly delimited; see -format).`))
+ format = flags.String("format", "json", prettify(`
+ The format of request data. The allowed values are 'json' or 'text'. For
+ 'json', the input data must be in JSON format. Multiple request values
+ may be concatenated (messages with a JSON representation other than
+ object must be separated by whitespace, such as a newline). For 'text',
+ the input data must be in the protobuf text format, in which case
+ multiple request values must be separated by the "record separator"
+ ASCII character: 0x1E. The stream should not end in a record separator.
+ If it does, it will be interpreted as a final, blank message after the
+ separator.`))
+ allowUnknownFields = flags.Bool("allow-unknown-fields", false, prettify(`
+ When true, the request contents, if 'json' format is used, allows
+ unknown fields to be present. They will be ignored when parsing
+ the request.`))
+ connectTimeout = flags.Float64("connect-timeout", 0, prettify(`
+ The maximum time, in seconds, to wait for connection to be established.
+ Defaults to 10 seconds.`))
+ formatError = flags.Bool("format-error", false, prettify(`
+ When a non-zero status is returned, format the response using the
+ value set by the -format flag .`))
+ keepaliveTime = flags.Float64("keepalive-time", 0, prettify(`
+ If present, the maximum idle time in seconds, after which a keepalive
+ probe is sent. If the connection remains idle and no keepalive response
+ is received for this same period then the connection is closed and the
+ operation fails.`))
+ maxTime = flags.Float64("max-time", 0, prettify(`
+ The maximum total time the operation can take, in seconds. This sets a
+ timeout on the gRPC context, allowing both client and server to give up
+ after the deadline has past. This is useful for preventing batch jobs
+ that use grpcurl from hanging due to slow or bad network links or due
+ to incorrect stream method usage.`))
+ maxMsgSz = flags.Int("max-msg-sz", 0, prettify(`
+ The maximum encoded size of a response message, in bytes, that grpcurl
+ will accept. If not specified, defaults to 4,194,304 (4 megabytes).`))
+ emitDefaults = flags.Bool("emit-defaults", false, prettify(`
+ Emit default values for JSON-encoded responses.`))
+ protosetOut = flags.String("protoset-out", "", prettify(`
+ The name of a file to be written that will contain a FileDescriptorSet
+ proto. With the list and describe verbs, the listed or described
+ elements and their transitive dependencies will be written to the named
+ file if this option is given. When invoking an RPC and this option is
+ given, the method being invoked and its transitive dependencies will be
+ included in the output file.`))
+ protoOut = flags.String("proto-out-dir", "", prettify(`
+ The name of a directory where the generated .proto files will be written.
+ With the list and describe verbs, the listed or described elements and
+ their transitive dependencies will be written as .proto files in the
+ specified directory if this option is given. When invoking an RPC and
+ this option is given, the method being invoked and its transitive
+ dependencies will be included in the generated .proto files in the
+ output directory.`))
+ msgTemplate = flags.Bool("msg-template", false, prettify(`
+ When describing messages, show a template of input data.`))
+ verbose = flags.Bool("v", false, prettify(`
+ Enable verbose output.`))
+ veryVerbose = flags.Bool("vv", false, prettify(`
+ Enable very verbose output (includes timing data).`))
+ serverName = flags.String("servername", "", prettify(`
+ Override server name when validating TLS certificate. This flag is
+ ignored if -plaintext or -insecure is used.
+ NOTE: Prefer -authority. This flag may be removed in the future. It is
+ an error to use both -authority and -servername (though this will be
+ permitted if they are both set to the same value, to increase backwards
+ compatibility with earlier releases that allowed both to be set).`))
+ reflection = optionalBoolFlag{val: true}
+)
+
+func init() {
+ flags.Var(&addlHeaders, "H", prettify(`
+ Additional headers in 'name: value' format. May specify more than one
+ via multiple flags. These headers will also be included in reflection
+ requests to a server.`))
+ flags.Var(&rpcHeaders, "rpc-header", prettify(`
+ Additional RPC headers in 'name: value' format. May specify more than
+ one via multiple flags. These headers will *only* be used when invoking
+ the requested RPC method. They are excluded from reflection requests.`))
+ flags.Var(&reflHeaders, "reflect-header", prettify(`
+ Additional reflection headers in 'name: value' format. May specify more
+ than one via multiple flags. These headers will *only* be used during
+ reflection requests and will be excluded when invoking the requested RPC
+ method.`))
+ flags.Var(&protoset, "protoset", prettify(`
+ The name of a file containing an encoded FileDescriptorSet. This file's
+ contents will be used to determine the RPC schema instead of querying
+ for it from the remote server via the gRPC reflection API. When set: the
+ 'list' action lists the services found in the given descriptors (vs.
+ those exposed by the remote server), and the 'describe' action describes
+ symbols found in the given descriptors. May specify more than one via
+ multiple -protoset flags. It is an error to use both -protoset and
+ -proto flags.`))
+ flags.Var(&protoFiles, "proto", prettify(`
+ The name of a proto source file. Source files given will be used to
+ determine the RPC schema instead of querying for it from the remote
+ server via the gRPC reflection API. When set: the 'list' action lists
+ the services found in the given files and their imports (vs. those
+ exposed by the remote server), and the 'describe' action describes
+ symbols found in the given files. May specify more than one via multiple
+ -proto flags. Imports will be resolved using the given -import-path
+ flags. Multiple proto files can be specified by specifying multiple
+ -proto flags. It is an error to use both -protoset and -proto flags.`))
+ flags.Var(&importPaths, "import-path", prettify(`
+ The path to a directory from which proto sources can be imported, for
+ use with -proto flags. Multiple import paths can be configured by
+ specifying multiple -import-path flags. Paths will be searched in the
+ order given. If no import paths are given, all files (including all
+ imports) must be provided as -proto flags, and grpcurl will attempt to
+ resolve all import statements from the set of file names given.`))
+ flags.Var(&reflection, "use-reflection", prettify(`
+ When true, server reflection will be used to determine the RPC schema.
+ Defaults to true unless a -proto or -protoset option is provided. If
+ -use-reflection is used in combination with a -proto or -protoset flag,
+ the provided descriptor sources will be used in addition to server
+ reflection to resolve messages and extensions.`))
+ flags.Var(&altsTargetServiceAccounts, "alts-target-service-account", prettify(`
+ The full email address of the service account that the server is
+ expected to be using when ALTS is used. You can specify this option
+ multiple times to indicate multiple allowed service accounts. If the
+ server authenticates with a service account that is not one of the
+ expected accounts, the RPC will not be issued. If no such arguments are
+ provided, no check will be performed, and the RPC will be issued
+ regardless of the server's service account.`))
+}
+
+type multiString []string
+
+func (s *multiString) String() string {
+ return strings.Join(*s, ",")
+}
+
+func (s *multiString) Set(value string) error {
+ *s = append(*s, value)
+ return nil
+}
+
+// Uses a file source as a fallback for resolving symbols and extensions, but
+// only uses the reflection source for listing services
+type compositeSource struct {
+ reflection grpcurl.DescriptorSource
+ file grpcurl.DescriptorSource
+}
+
+func (cs compositeSource) ListServices() ([]string, error) {
+ return cs.reflection.ListServices()
+}
+
+func (cs compositeSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) {
+ d, err := cs.reflection.FindSymbol(fullyQualifiedName)
+ if err == nil {
+ return d, nil
+ }
+ return cs.file.FindSymbol(fullyQualifiedName)
+}
+
+func (cs compositeSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) {
+ exts, err := cs.reflection.AllExtensionsForType(typeName)
+ if err != nil {
+ // On error fall back to file source
+ return cs.file.AllExtensionsForType(typeName)
+ }
+ // Track the tag numbers from the reflection source
+ tags := make(map[int32]bool)
+ for _, ext := range exts {
+ tags[ext.GetNumber()] = true
+ }
+ fileExts, err := cs.file.AllExtensionsForType(typeName)
+ if err != nil {
+ return exts, nil
+ }
+ for _, ext := range fileExts {
+ // Prioritize extensions found via reflection
+ if !tags[ext.GetNumber()] {
+ exts = append(exts, ext)
+ }
+ }
+ return exts, nil
+}
+
+type timingData struct {
+ Title string
+ Start time.Time
+ Value time.Duration
+ Parent *timingData
+ Sub []*timingData
+}
+
+func (d *timingData) Child(title string) *timingData {
+ if d == nil {
+ return nil
+ }
+ child := &timingData{Title: title, Start: time.Now()}
+ d.Sub = append(d.Sub, child)
+ return child
+}
+
+func (d *timingData) Done() {
+ if d == nil {
+ return
+ }
+ if d.Value == 0 {
+ d.Value = time.Since(d.Start)
+ }
+}
+
+func main() {
+ flags.Usage = usage
+ flags.Parse(os.Args[1:])
+ if *help {
+ usage()
+ os.Exit(0)
+ }
+ if *printVersion {
+ fmt.Fprintf(os.Stderr, "%s %s\n", filepath.Base(os.Args[0]), version)
+ os.Exit(0)
+ }
+
+ // default behavior is to use tls
+ usetls := !*plaintext && !*usealts
+
+ // Do extra validation on arguments and figure out what user asked us to do.
+ if *connectTimeout < 0 {
+ fail(nil, "The -connect-timeout argument must not be negative.")
+ }
+ if *keepaliveTime < 0 {
+ fail(nil, "The -keepalive-time argument must not be negative.")
+ }
+ if *maxTime < 0 {
+ fail(nil, "The -max-time argument must not be negative.")
+ }
+ if *maxMsgSz < 0 {
+ fail(nil, "The -max-msg-sz argument must not be negative.")
+ }
+ if *plaintext && *usealts {
+ fail(nil, "The -plaintext and -alts arguments are mutually exclusive.")
+ }
+ if *insecure && !usetls {
+ fail(nil, "The -insecure argument can only be used with TLS.")
+ }
+ if *cert != "" && !usetls {
+ fail(nil, "The -cert argument can only be used with TLS.")
+ }
+ if *key != "" && !usetls {
+ fail(nil, "The -key argument can only be used with TLS.")
+ }
+ if (*key == "") != (*cert == "") {
+ fail(nil, "The -cert and -key arguments must be used together and both be present.")
+ }
+ if *altsHandshakerServiceAddress != "" && !*usealts {
+ fail(nil, "The -alts-handshaker-service argument must be used with the -alts argument.")
+ }
+ if len(altsTargetServiceAccounts) > 0 && !*usealts {
+ fail(nil, "The -alts-target-service-account argument must be used with the -alts argument.")
+ }
+ if *format != "json" && *format != "text" {
+ fail(nil, "The -format option must be 'json' or 'text'.")
+ }
+ if *emitDefaults && *format != "json" {
+ warn("The -emit-defaults is only used when using json format.")
+ }
+
+ args := flags.Args()
+
+ if len(args) == 0 {
+ fail(nil, "Too few arguments.")
+ }
+ var target string
+ if args[0] != "list" && args[0] != "describe" {
+ target = args[0]
+ args = args[1:]
+ }
+
+ if len(args) == 0 {
+ fail(nil, "Too few arguments.")
+ }
+ var list, describe, invoke bool
+ if args[0] == "list" {
+ list = true
+ args = args[1:]
+ } else if args[0] == "describe" {
+ describe = true
+ args = args[1:]
+ } else {
+ invoke = true
+ }
+
+ verbosityLevel := 0
+ if *verbose {
+ verbosityLevel = 1
+ }
+
+ var rootTiming *timingData
+ if *veryVerbose {
+ verbosityLevel = 2
+
+ rootTiming = &timingData{Title: "Timing Data", Start: time.Now()}
+ defer func() {
+ rootTiming.Done()
+ dumpTiming(rootTiming, 0)
+ }()
+ }
+
+ var symbol string
+ if invoke {
+ if len(args) == 0 {
+ fail(nil, "Too few arguments.")
+ }
+ symbol = args[0]
+ args = args[1:]
+ } else {
+ if *data != "" {
+ warn("The -d argument is not used with 'list' or 'describe' verb.")
+ }
+ if len(rpcHeaders) > 0 {
+ warn("The -rpc-header argument is not used with 'list' or 'describe' verb.")
+ }
+ if len(args) > 0 {
+ symbol = args[0]
+ args = args[1:]
+ }
+ }
+
+ if len(args) > 0 {
+ fail(nil, "Too many arguments.")
+ }
+ if invoke && target == "" {
+ fail(nil, "No host:port specified.")
+ }
+ if len(protoset) == 0 && len(protoFiles) == 0 && target == "" {
+ fail(nil, "No host:port specified, no protoset specified, and no proto sources specified.")
+ }
+ if len(protoset) > 0 && len(reflHeaders) > 0 {
+ warn("The -reflect-header argument is not used when -protoset files are used.")
+ }
+ if len(protoset) > 0 && len(protoFiles) > 0 {
+ fail(nil, "Use either -protoset files or -proto files, but not both.")
+ }
+ if len(importPaths) > 0 && len(protoFiles) == 0 {
+ warn("The -import-path argument is not used unless -proto files are used.")
+ }
+ if !reflection.val && len(protoset) == 0 && len(protoFiles) == 0 {
+ fail(nil, "No protoset files or proto files specified and -use-reflection set to false.")
+ }
+
+ // Protoset or protofiles provided and -use-reflection unset
+ if !reflection.set && (len(protoset) > 0 || len(protoFiles) > 0) {
+ reflection.val = false
+ }
+
+ ctx := context.Background()
+ if *maxTime > 0 {
+ timeout := floatSecondsToDuration(*maxTime)
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+
+ dial := func() *grpc.ClientConn {
+ dialTiming := rootTiming.Child("Dial")
+ defer dialTiming.Done()
+ dialTime := 10 * time.Second
+ if *connectTimeout > 0 {
+ dialTime = floatSecondsToDuration(*connectTimeout)
+ }
+ ctx, cancel := context.WithTimeout(ctx, dialTime)
+ defer cancel()
+ var opts []grpc.DialOption
+ if *keepaliveTime > 0 {
+ timeout := floatSecondsToDuration(*keepaliveTime)
+ opts = append(opts, grpc.WithKeepaliveParams(keepalive.ClientParameters{
+ Time: timeout,
+ Timeout: timeout,
+ }))
+ }
+ if *maxMsgSz > 0 {
+ opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(*maxMsgSz)))
+ }
+ network := "tcp"
+ if isUnixSocket != nil && isUnixSocket() {
+ network = "unix"
+ if *authority == "" {
+ *authority = "localhost"
+ }
+ }
+ var creds credentials.TransportCredentials
+ if *plaintext {
+ if *authority != "" {
+ opts = append(opts, grpc.WithAuthority(*authority))
+ }
+ } else if *usealts {
+ clientOptions := alts.DefaultClientOptions()
+ if len(altsTargetServiceAccounts) > 0 {
+ clientOptions.TargetServiceAccounts = altsTargetServiceAccounts
+ }
+ if *altsHandshakerServiceAddress != "" {
+ clientOptions.HandshakerServiceAddress = *altsHandshakerServiceAddress
+ }
+ creds = alts.NewClientCreds(clientOptions)
+ } else if usetls {
+ tlsTiming := dialTiming.Child("TLS Setup")
+ defer tlsTiming.Done()
+
+ tlsConf, err := grpcurl.ClientTLSConfig(*insecure, *cacert, *cert, *key)
+ if err != nil {
+ fail(err, "Failed to create TLS config")
+ }
+
+ sslKeylogFile := os.Getenv("SSLKEYLOGFILE")
+ if sslKeylogFile != "" {
+ w, err := os.OpenFile(sslKeylogFile, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0600)
+ if err != nil {
+ fail(err, "Could not open SSLKEYLOGFILE %s", sslKeylogFile)
+ }
+ tlsConf.KeyLogWriter = w
+ }
+
+ creds = credentials.NewTLS(tlsConf)
+
+ // can use either -servername or -authority; but not both
+ if *serverName != "" && *authority != "" {
+ if *serverName == *authority {
+ warn("Both -servername and -authority are present; prefer only -authority.")
+ } else {
+ fail(nil, "Cannot specify different values for -servername and -authority.")
+ }
+ }
+ overrideName := *serverName
+ if overrideName == "" {
+ overrideName = *authority
+ }
+
+ if overrideName != "" {
+ opts = append(opts, grpc.WithAuthority(overrideName))
+ }
+ tlsTiming.Done()
+ } else {
+ panic("Should have defaulted to use TLS.")
+ }
+
+ grpcurlUA := "grpcurl/" + version
+ if version == noVersion {
+ grpcurlUA = "grpcurl/dev-build (no version set)"
+ }
+ if *userAgent != "" {
+ grpcurlUA = *userAgent + " " + grpcurlUA
+ }
+ opts = append(opts, grpc.WithUserAgent(grpcurlUA))
+
+ blockingDialTiming := dialTiming.Child("BlockingDial")
+ defer blockingDialTiming.Done()
+ cc, err := grpcurl.BlockingDial(ctx, network, target, creds, opts...)
+ if err != nil {
+ fail(err, "Failed to dial target host %q", target)
+ }
+ return cc
+ }
+ printFormattedStatus := func(w io.Writer, stat *status.Status, formatter grpcurl.Formatter) {
+ formattedStatus, err := formatter(stat.Proto())
+ if err != nil {
+ fmt.Fprintf(w, "ERROR: %v", err.Error())
+ }
+ fmt.Fprint(w, formattedStatus)
+ }
+
+ if *expandHeaders {
+ var err error
+ addlHeaders, err = grpcurl.ExpandHeaders(addlHeaders)
+ if err != nil {
+ fail(err, "Failed to expand additional headers")
+ }
+ rpcHeaders, err = grpcurl.ExpandHeaders(rpcHeaders)
+ if err != nil {
+ fail(err, "Failed to expand rpc headers")
+ }
+ reflHeaders, err = grpcurl.ExpandHeaders(reflHeaders)
+ if err != nil {
+ fail(err, "Failed to expand reflection headers")
+ }
+ }
+
+ var cc *grpc.ClientConn
+ var descSource grpcurl.DescriptorSource
+ var refClient *grpcreflect.Client
+ var fileSource grpcurl.DescriptorSource
+ if len(protoset) > 0 {
+ var err error
+ fileSource, err = grpcurl.DescriptorSourceFromProtoSets(protoset...)
+ if err != nil {
+ fail(err, "Failed to process proto descriptor sets.")
+ }
+ } else if len(protoFiles) > 0 {
+ var err error
+ fileSource, err = grpcurl.DescriptorSourceFromProtoFiles(importPaths, protoFiles...)
+ if err != nil {
+ fail(err, "Failed to process proto source files.")
+ }
+ }
+ if reflection.val {
+ md := grpcurl.MetadataFromHeaders(append(addlHeaders, reflHeaders...))
+ refCtx := metadata.NewOutgoingContext(ctx, md)
+ cc = dial()
+ refClient = grpcreflect.NewClientAuto(refCtx, cc)
+ refClient.AllowMissingFileDescriptors()
+ reflSource := grpcurl.DescriptorSourceFromServer(ctx, refClient)
+ if fileSource != nil {
+ descSource = compositeSource{reflSource, fileSource}
+ } else {
+ descSource = reflSource
+ }
+ } else {
+ descSource = fileSource
+ }
+
+ // arrange for the RPCs to be cleanly shutdown
+ reset := func() {
+ if refClient != nil {
+ refClient.Reset()
+ refClient = nil
+ }
+ if cc != nil {
+ cc.Close()
+ cc = nil
+ }
+ }
+ defer reset()
+ exit = func(code int) {
+ // since defers aren't run by os.Exit...
+ reset()
+ os.Exit(code)
+ }
+
+ if list {
+ if symbol == "" {
+ svcs, err := grpcurl.ListServices(descSource)
+ if err != nil {
+ fail(err, "Failed to list services")
+ }
+ if len(svcs) == 0 {
+ fmt.Println("(No services)")
+ } else {
+ for _, svc := range svcs {
+ fmt.Printf("%s\n", svc)
+ }
+ }
+ if err := writeProtoset(descSource, svcs...); err != nil {
+ fail(err, "Failed to write protoset to %s", *protosetOut)
+ }
+ if err := writeProtos(descSource, svcs...); err != nil {
+ fail(err, "Failed to write protos to %s", *protoOut)
+ }
+ } else {
+ methods, err := grpcurl.ListMethods(descSource, symbol)
+ if err != nil {
+ fail(err, "Failed to list methods for service %q", symbol)
+ }
+ if len(methods) == 0 {
+ fmt.Println("(No methods)") // probably unlikely
+ } else {
+ for _, m := range methods {
+ fmt.Printf("%s\n", m)
+ }
+ }
+ if err := writeProtoset(descSource, symbol); err != nil {
+ fail(err, "Failed to write protoset to %s", *protosetOut)
+ }
+ if err := writeProtos(descSource, symbol); err != nil {
+ fail(err, "Failed to write protos to %s", *protoOut)
+ }
+ }
+
+ } else if describe {
+ var symbols []string
+ if symbol != "" {
+ symbols = []string{symbol}
+ } else {
+ // if no symbol given, describe all exposed services
+ svcs, err := descSource.ListServices()
+ if err != nil {
+ fail(err, "Failed to list services")
+ }
+ if len(svcs) == 0 {
+ fmt.Println("Server returned an empty list of exposed services")
+ }
+ symbols = svcs
+ }
+ for _, s := range symbols {
+ if s[0] == '.' {
+ s = s[1:]
+ }
+
+ dsc, err := descSource.FindSymbol(s)
+ if err != nil {
+ fail(err, "Failed to resolve symbol %q", s)
+ }
+
+ fqn := dsc.GetFullyQualifiedName()
+ var elementType string
+ switch d := dsc.(type) {
+ case *desc.MessageDescriptor:
+ elementType = "a message"
+ parent, ok := d.GetParent().(*desc.MessageDescriptor)
+ if ok {
+ if d.IsMapEntry() {
+ for _, f := range parent.GetFields() {
+ if f.IsMap() && f.GetMessageType() == d {
+ // found it: describe the map field instead
+ elementType = "the entry type for a map field"
+ dsc = f
+ break
+ }
+ }
+ } else {
+ // see if it's a group
+ for _, f := range parent.GetFields() {
+ if f.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP && f.GetMessageType() == d {
+ // found it: describe the map field instead
+ elementType = "the type of a group field"
+ dsc = f
+ break
+ }
+ }
+ }
+ }
+ case *desc.FieldDescriptor:
+ elementType = "a field"
+ if d.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+ elementType = "a group field"
+ } else if d.IsExtension() {
+ elementType = "an extension"
+ }
+ case *desc.OneOfDescriptor:
+ elementType = "a one-of"
+ case *desc.EnumDescriptor:
+ elementType = "an enum"
+ case *desc.EnumValueDescriptor:
+ elementType = "an enum value"
+ case *desc.ServiceDescriptor:
+ elementType = "a service"
+ case *desc.MethodDescriptor:
+ elementType = "a method"
+ default:
+ err = fmt.Errorf("descriptor has unrecognized type %T", dsc)
+ fail(err, "Failed to describe symbol %q", s)
+ }
+
+ txt, err := grpcurl.GetDescriptorText(dsc, descSource)
+ if err != nil {
+ fail(err, "Failed to describe symbol %q", s)
+ }
+ fmt.Printf("%s is %s:\n", fqn, elementType)
+ fmt.Println(txt)
+
+ if dsc, ok := dsc.(*desc.MessageDescriptor); ok && *msgTemplate {
+ // for messages, also show a template in JSON, to make it easier to
+ // create a request to invoke an RPC
+ tmpl := grpcurl.MakeTemplate(dsc)
+ options := grpcurl.FormatOptions{EmitJSONDefaultFields: true}
+ _, formatter, err := grpcurl.RequestParserAndFormatter(grpcurl.Format(*format), descSource, nil, options)
+ if err != nil {
+ fail(err, "Failed to construct formatter for %q", *format)
+ }
+ str, err := formatter(tmpl)
+ if err != nil {
+ fail(err, "Failed to print template for message %s", s)
+ }
+ fmt.Println("\nMessage template:")
+ fmt.Println(str)
+ }
+ }
+ if err := writeProtoset(descSource, symbols...); err != nil {
+ fail(err, "Failed to write protoset to %s", *protosetOut)
+ }
+ if err := writeProtos(descSource, symbol); err != nil {
+ fail(err, "Failed to write protos to %s", *protoOut)
+ }
+
+ } else {
+ // Invoke an RPC
+ if cc == nil {
+ cc = dial()
+ }
+ var in io.Reader
+ if *data == "@" {
+ in = os.Stdin
+ } else {
+ in = strings.NewReader(*data)
+ }
+
+ // if not verbose output, then also include record delimiters
+ // between each message, so output could potentially be piped
+ // to another grpcurl process
+ includeSeparators := verbosityLevel == 0
+ options := grpcurl.FormatOptions{
+ EmitJSONDefaultFields: *emitDefaults,
+ IncludeTextSeparator: includeSeparators,
+ AllowUnknownFields: *allowUnknownFields,
+ }
+ rf, formatter, err := grpcurl.RequestParserAndFormatter(grpcurl.Format(*format), descSource, in, options)
+ if err != nil {
+ fail(err, "Failed to construct request parser and formatter for %q", *format)
+ }
+ h := &grpcurl.DefaultEventHandler{
+ Out: os.Stdout,
+ Formatter: formatter,
+ VerbosityLevel: verbosityLevel,
+ }
+
+ invokeTiming := rootTiming.Child("InvokeRPC")
+ err = grpcurl.InvokeRPC(ctx, descSource, cc, symbol, append(addlHeaders, rpcHeaders...), h, rf.Next)
+ invokeTiming.Done()
+ if err != nil {
+ if errStatus, ok := status.FromError(err); ok && *formatError {
+ h.Status = errStatus
+ } else {
+ fail(err, "Error invoking method %q", symbol)
+ }
+ }
+ reqSuffix := ""
+ respSuffix := ""
+ reqCount := rf.NumRequests()
+ if reqCount != 1 {
+ reqSuffix = "s"
+ }
+ if h.NumResponses != 1 {
+ respSuffix = "s"
+ }
+ if verbosityLevel > 0 {
+ fmt.Printf("Sent %d request%s and received %d response%s\n", reqCount, reqSuffix, h.NumResponses, respSuffix)
+ }
+ if h.Status.Code() != codes.OK {
+ if *formatError {
+ printFormattedStatus(os.Stderr, h.Status, formatter)
+ } else {
+ grpcurl.PrintStatus(os.Stderr, h.Status, formatter)
+ }
+ exit(statusCodeOffset + int(h.Status.Code()))
+ }
+ }
+}
+
+func dumpTiming(td *timingData, lvl int) {
+ ind := ""
+ for x := 0; x < lvl; x++ {
+ ind += " "
+ }
+ fmt.Printf("%s%s: %s\n", ind, td.Title, td.Value)
+ for _, sd := range td.Sub {
+ dumpTiming(sd, lvl+1)
+ }
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, `Usage:
+ %s [flags] [address] [list|describe] [symbol]
+
+The 'address' is only optional when used with 'list' or 'describe' and a
+protoset or proto flag is provided.
+
+If 'list' is indicated, the symbol (if present) should be a fully-qualified
+service name. If present, all methods of that service are listed. If not
+present, all exposed services are listed, or all services defined in protosets.
+
+If 'describe' is indicated, the descriptor for the given symbol is shown. The
+symbol should be a fully-qualified service, enum, or message name. If no symbol
+is given then the descriptors for all exposed or known services are shown.
+
+If neither verb is present, the symbol must be a fully-qualified method name in
+'service/method' or 'service.method' format. In this case, the request body will
+be used to invoke the named method. If no body is given but one is required
+(i.e. the method is unary or server-streaming), an empty instance of the
+method's request type will be sent.
+
+The address will typically be in the form "host:port" where host can be an IP
+address or a hostname and port is a numeric port or service name. If an IPv6
+address is given, it must be surrounded by brackets, like "[2001:db8::1]". For
+Unix variants, if a -unix=true flag is present, then the address must be the
+path to the domain socket.
+
+Available flags:
+`, os.Args[0])
+ flags.PrintDefaults()
+}
+
+func prettify(docString string) string {
+ parts := strings.Split(docString, "\n")
+
+ // cull empty lines and also remove trailing and leading spaces
+ // from each line in the doc string
+ j := 0
+ for _, part := range parts {
+ part = strings.TrimSpace(part)
+ if part == "" {
+ continue
+ }
+ parts[j] = part
+ j++
+ }
+
+ return strings.Join(parts[:j], "\n")
+}
+
+func warn(msg string, args ...interface{}) {
+ msg = fmt.Sprintf("Warning: %s\n", msg)
+ fmt.Fprintf(os.Stderr, msg, args...)
+}
+
+func fail(err error, msg string, args ...interface{}) {
+ if err != nil {
+ msg += ": %v"
+ args = append(args, err)
+ }
+ fmt.Fprintf(os.Stderr, msg, args...)
+ fmt.Fprintln(os.Stderr)
+ if err != nil {
+ exit(1)
+ } else {
+ // nil error means it was CLI usage issue
+ fmt.Fprintf(os.Stderr, "Try '%s -help' for more details.\n", os.Args[0])
+ exit(2)
+ }
+}
+
+func writeProtoset(descSource grpcurl.DescriptorSource, symbols ...string) error {
+ if *protosetOut == "" {
+ return nil
+ }
+ f, err := os.Create(*protosetOut)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ return grpcurl.WriteProtoset(f, descSource, symbols...)
+}
+
+func writeProtos(descSource grpcurl.DescriptorSource, symbols ...string) error {
+ if *protoOut == "" {
+ return nil
+ }
+ return grpcurl.WriteProtoFiles(*protoOut, descSource, symbols...)
+}
+
+type optionalBoolFlag struct {
+ set, val bool
+}
+
+func (f *optionalBoolFlag) String() string {
+ if !f.set {
+ return "unset"
+ }
+ return strconv.FormatBool(f.val)
+}
+
+func (f *optionalBoolFlag) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ if err != nil {
+ return err
+ }
+ f.set = true
+ f.val = v
+ return nil
+}
+
+func (f *optionalBoolFlag) IsBoolFlag() bool {
+ return true
+}
+
+func floatSecondsToDuration(seconds float64) time.Duration {
+ durationFloat := seconds * float64(time.Second)
+ if durationFloat > math.MaxInt64 {
+ // Avoid overflow
+ return math.MaxInt64
+ }
+ return time.Duration(durationFloat)
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/unix.go b/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/unix.go
new file mode 100644
index 0000000..cae4bed
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/cmd/grpcurl/unix.go
@@ -0,0 +1,15 @@
+//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || windows
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package main
+
+var (
+ unix = flags.Bool("unix", false, prettify(`
+ Indicates that the server address is the path to a Unix domain socket.`))
+)
+
+func init() {
+ isUnixSocket = func() bool {
+ return *unix
+ }
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/desc_source.go b/vendor/github.com/fullstorydev/grpcurl/desc_source.go
new file mode 100644
index 0000000..258c346
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/desc_source.go
@@ -0,0 +1,369 @@
+package grpcurl
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+
+ "github.com/golang/protobuf/proto" //lint:ignore SA1019 we have to import these because some of their types appear in exported API
+ "github.com/jhump/protoreflect/desc" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/desc/protoparse" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/desc/protoprint"
+ "github.com/jhump/protoreflect/dynamic" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/grpcreflect"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// ErrReflectionNotSupported is returned by DescriptorSource operations that
+// rely on interacting with the reflection service when the source does not
+// actually expose the reflection service. When this occurs, an alternate source
+// (like file descriptor sets) must be used.
+var ErrReflectionNotSupported = errors.New("server does not support the reflection API")
+
+// DescriptorSource is a source of protobuf descriptor information. It can be backed by a FileDescriptorSet
+// proto (like a file generated by protoc) or a remote server that supports the reflection API.
+type DescriptorSource interface {
+ // ListServices returns a list of fully-qualified service names. It will be all services in a set of
+ // descriptor files or the set of all services exposed by a gRPC server.
+ ListServices() ([]string, error)
+ // FindSymbol returns a descriptor for the given fully-qualified symbol name.
+ FindSymbol(fullyQualifiedName string) (desc.Descriptor, error)
+ // AllExtensionsForType returns all known extension fields that extend the given message type name.
+ AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error)
+}
+
+// DescriptorSourceFromProtoSets creates a DescriptorSource that is backed by the named files, whose contents
+// are encoded FileDescriptorSet protos.
+func DescriptorSourceFromProtoSets(fileNames ...string) (DescriptorSource, error) {
+ files := &descriptorpb.FileDescriptorSet{}
+ for _, fileName := range fileNames {
+ b, err := os.ReadFile(fileName)
+ if err != nil {
+ return nil, fmt.Errorf("could not load protoset file %q: %v", fileName, err)
+ }
+ var fs descriptorpb.FileDescriptorSet
+ err = proto.Unmarshal(b, &fs)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse contents of protoset file %q: %v", fileName, err)
+ }
+ files.File = append(files.File, fs.File...)
+ }
+ return DescriptorSourceFromFileDescriptorSet(files)
+}
+
+// DescriptorSourceFromProtoFiles creates a DescriptorSource that is backed by the named files,
+// whose contents are Protocol Buffer source files. The given importPaths are used to locate
+// any imported files.
+func DescriptorSourceFromProtoFiles(importPaths []string, fileNames ...string) (DescriptorSource, error) {
+ fileNames, err := protoparse.ResolveFilenames(importPaths, fileNames...)
+ if err != nil {
+ return nil, err
+ }
+ p := protoparse.Parser{
+ ImportPaths: importPaths,
+ InferImportPaths: len(importPaths) == 0,
+ IncludeSourceCodeInfo: true,
+ }
+ fds, err := p.ParseFiles(fileNames...)
+ if err != nil {
+ return nil, fmt.Errorf("could not parse given files: %v", err)
+ }
+ return DescriptorSourceFromFileDescriptors(fds...)
+}
+
+// DescriptorSourceFromFileDescriptorSet creates a DescriptorSource that is backed by the FileDescriptorSet.
+func DescriptorSourceFromFileDescriptorSet(files *descriptorpb.FileDescriptorSet) (DescriptorSource, error) {
+ unresolved := map[string]*descriptorpb.FileDescriptorProto{}
+ for _, fd := range files.File {
+ unresolved[fd.GetName()] = fd
+ }
+ resolved := map[string]*desc.FileDescriptor{}
+ for _, fd := range files.File {
+ _, err := resolveFileDescriptor(unresolved, resolved, fd.GetName())
+ if err != nil {
+ return nil, err
+ }
+ }
+ return &fileSource{files: resolved}, nil
+}
+
+func resolveFileDescriptor(unresolved map[string]*descriptorpb.FileDescriptorProto, resolved map[string]*desc.FileDescriptor, filename string) (*desc.FileDescriptor, error) {
+ if r, ok := resolved[filename]; ok {
+ return r, nil
+ }
+ fd, ok := unresolved[filename]
+ if !ok {
+ return nil, fmt.Errorf("no descriptor found for %q", filename)
+ }
+ deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency()))
+ for _, dep := range fd.GetDependency() {
+ depFd, err := resolveFileDescriptor(unresolved, resolved, dep)
+ if err != nil {
+ return nil, err
+ }
+ deps = append(deps, depFd)
+ }
+ result, err := desc.CreateFileDescriptor(fd, deps...)
+ if err != nil {
+ return nil, err
+ }
+ resolved[filename] = result
+ return result, nil
+}
+
+// DescriptorSourceFromFileDescriptors creates a DescriptorSource that is backed by the given
+// file descriptors
+func DescriptorSourceFromFileDescriptors(files ...*desc.FileDescriptor) (DescriptorSource, error) {
+ fds := map[string]*desc.FileDescriptor{}
+ for _, fd := range files {
+ if err := addFile(fd, fds); err != nil {
+ return nil, err
+ }
+ }
+ return &fileSource{files: fds}, nil
+}
+
+func addFile(fd *desc.FileDescriptor, fds map[string]*desc.FileDescriptor) error {
+ name := fd.GetName()
+ if existing, ok := fds[name]; ok {
+ // already added this file
+ if existing != fd {
+ // doh! duplicate files provided
+ return fmt.Errorf("given files include multiple copies of %q", name)
+ }
+ return nil
+ }
+ fds[name] = fd
+ for _, dep := range fd.GetDependencies() {
+ if err := addFile(dep, fds); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type fileSource struct {
+ files map[string]*desc.FileDescriptor
+ er *dynamic.ExtensionRegistry
+ erInit sync.Once
+}
+
+func (fs *fileSource) ListServices() ([]string, error) {
+ set := map[string]bool{}
+ for _, fd := range fs.files {
+ for _, svc := range fd.GetServices() {
+ set[svc.GetFullyQualifiedName()] = true
+ }
+ }
+ sl := make([]string, 0, len(set))
+ for svc := range set {
+ sl = append(sl, svc)
+ }
+ return sl, nil
+}
+
+// GetAllFiles returns all of the underlying file descriptors. This is
+// more thorough and more efficient than the fallback strategy used by
+// the GetAllFiles package method, for enumerating all files from a
+// descriptor source.
+func (fs *fileSource) GetAllFiles() ([]*desc.FileDescriptor, error) {
+ files := make([]*desc.FileDescriptor, len(fs.files))
+ i := 0
+ for _, fd := range fs.files {
+ files[i] = fd
+ i++
+ }
+ return files, nil
+}
+
+func (fs *fileSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) {
+ for _, fd := range fs.files {
+ if dsc := fd.FindSymbol(fullyQualifiedName); dsc != nil {
+ return dsc, nil
+ }
+ }
+ return nil, notFound("Symbol", fullyQualifiedName)
+}
+
+func (fs *fileSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) {
+ fs.erInit.Do(func() {
+ fs.er = &dynamic.ExtensionRegistry{}
+ for _, fd := range fs.files {
+ fs.er.AddExtensionsFromFile(fd)
+ }
+ })
+ return fs.er.AllExtensionsForType(typeName), nil
+}
+
+// DescriptorSourceFromServer creates a DescriptorSource that uses the given gRPC reflection client
+// to interrogate a server for descriptor information. If the server does not support the reflection
+// API then the various DescriptorSource methods will return ErrReflectionNotSupported
+func DescriptorSourceFromServer(_ context.Context, refClient *grpcreflect.Client) DescriptorSource {
+ return serverSource{client: refClient}
+}
+
+type serverSource struct {
+ client *grpcreflect.Client
+}
+
+func (ss serverSource) ListServices() ([]string, error) {
+ svcs, err := ss.client.ListServices()
+ return svcs, reflectionSupport(err)
+}
+
+func (ss serverSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) {
+ file, err := ss.client.FileContainingSymbol(fullyQualifiedName)
+ if err != nil {
+ return nil, reflectionSupport(err)
+ }
+ d := file.FindSymbol(fullyQualifiedName)
+ if d == nil {
+ return nil, notFound("Symbol", fullyQualifiedName)
+ }
+ return d, nil
+}
+
+func (ss serverSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) {
+ var exts []*desc.FieldDescriptor
+ nums, err := ss.client.AllExtensionNumbersForType(typeName)
+ if err != nil {
+ return nil, reflectionSupport(err)
+ }
+ for _, fieldNum := range nums {
+ ext, err := ss.client.ResolveExtension(typeName, fieldNum)
+ if err != nil {
+ return nil, reflectionSupport(err)
+ }
+ exts = append(exts, ext)
+ }
+ return exts, nil
+}
+
+func reflectionSupport(err error) error {
+ if err == nil {
+ return nil
+ }
+ if stat, ok := status.FromError(err); ok && stat.Code() == codes.Unimplemented {
+ return ErrReflectionNotSupported
+ }
+ return err
+}
+
+// WriteProtoset will use the given descriptor source to resolve all of the given
+// symbols and write a proto file descriptor set with their definitions to the
+// given output. The output will include descriptors for all files in which the
+// symbols are defined as well as their transitive dependencies.
+func WriteProtoset(out io.Writer, descSource DescriptorSource, symbols ...string) error {
+ filenames, fds, err := getFileDescriptors(symbols, descSource)
+ if err != nil {
+ return err
+ }
+ // now expand that to include transitive dependencies in topologically sorted
+ // order (such that file always appears after its dependencies)
+ expandedFiles := make(map[string]struct{}, len(fds))
+ allFilesSlice := make([]*descriptorpb.FileDescriptorProto, 0, len(fds))
+ for _, filename := range filenames {
+ allFilesSlice = addFilesToSet(allFilesSlice, expandedFiles, fds[filename])
+ }
+ // now we can serialize to file
+ b, err := proto.Marshal(&descriptorpb.FileDescriptorSet{File: allFilesSlice})
+ if err != nil {
+ return fmt.Errorf("failed to serialize file descriptor set: %v", err)
+ }
+ if _, err := out.Write(b); err != nil {
+ return fmt.Errorf("failed to write file descriptor set: %v", err)
+ }
+ return nil
+}
+
+func addFilesToSet(allFiles []*descriptorpb.FileDescriptorProto, expanded map[string]struct{}, fd *desc.FileDescriptor) []*descriptorpb.FileDescriptorProto {
+ if _, ok := expanded[fd.GetName()]; ok {
+ // already seen this one
+ return allFiles
+ }
+ expanded[fd.GetName()] = struct{}{}
+ // add all dependencies first
+ for _, dep := range fd.GetDependencies() {
+ allFiles = addFilesToSet(allFiles, expanded, dep)
+ }
+ return append(allFiles, fd.AsFileDescriptorProto())
+}
+
+// WriteProtoFiles will use the given descriptor source to resolve all the given
+// symbols and write proto files with their definitions to the given output directory.
+func WriteProtoFiles(outProtoDirPath string, descSource DescriptorSource, symbols ...string) error {
+ filenames, fds, err := getFileDescriptors(symbols, descSource)
+ if err != nil {
+ return err
+ }
+ // now expand that to include transitive dependencies in topologically sorted
+ // order (such that file always appears after its dependencies)
+ expandedFiles := make(map[string]struct{}, len(fds))
+ allFileDescriptors := make([]*desc.FileDescriptor, 0, len(fds))
+ for _, filename := range filenames {
+ allFileDescriptors = addFilesToFileDescriptorList(allFileDescriptors, expandedFiles, fds[filename])
+ }
+ pr := protoprint.Printer{}
+ // now we can serialize to files
+ for i := range allFileDescriptors {
+ if err := writeProtoFile(outProtoDirPath, allFileDescriptors[i], &pr); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeProtoFile(outProtoDirPath string, fd *desc.FileDescriptor, pr *protoprint.Printer) error {
+ outFile := filepath.Join(outProtoDirPath, fd.GetFullyQualifiedName())
+ outDir := filepath.Dir(outFile)
+ if err := os.MkdirAll(outDir, 0777); err != nil {
+ return fmt.Errorf("failed to create directory %q: %w", outDir, err)
+ }
+
+ f, err := os.Create(outFile)
+ if err != nil {
+ return fmt.Errorf("failed to create proto file %q: %w", outFile, err)
+ }
+ defer f.Close()
+ if err := pr.PrintProtoFile(fd, f); err != nil {
+ return fmt.Errorf("failed to write proto file %q: %w", outFile, err)
+ }
+ return nil
+}
+
+func getFileDescriptors(symbols []string, descSource DescriptorSource) ([]string, map[string]*desc.FileDescriptor, error) {
+ // compute set of file descriptors
+ filenames := make([]string, 0, len(symbols))
+ fds := make(map[string]*desc.FileDescriptor, len(symbols))
+ for _, sym := range symbols {
+ d, err := descSource.FindSymbol(sym)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to find descriptor for %q: %v", sym, err)
+ }
+ fd := d.GetFile()
+ if _, ok := fds[fd.GetName()]; !ok {
+ fds[fd.GetName()] = fd
+ filenames = append(filenames, fd.GetName())
+ }
+ }
+ return filenames, fds, nil
+}
+
+func addFilesToFileDescriptorList(allFiles []*desc.FileDescriptor, expanded map[string]struct{}, fd *desc.FileDescriptor) []*desc.FileDescriptor {
+ if _, ok := expanded[fd.GetName()]; ok {
+ // already seen this one
+ return allFiles
+ }
+ expanded[fd.GetName()] = struct{}{}
+ // add all dependencies first
+ for _, dep := range fd.GetDependencies() {
+ allFiles = addFilesToFileDescriptorList(allFiles, expanded, dep)
+ }
+ return append(allFiles, fd)
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/download_protoc.sh b/vendor/github.com/fullstorydev/grpcurl/download_protoc.sh
new file mode 100644
index 0000000..6bbd135
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/download_protoc.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+set -e
+
+cd $(dirname $0)
+
+if [[ -z "$PROTOC_VERSION" ]]; then
+ echo "Set PROTOC_VERSION env var to indicate the version to download" >&2
+ exit 1
+fi
+PROTOC_OS="$(uname -s)"
+PROTOC_ARCH="$(uname -m)"
+case "${PROTOC_OS}" in
+ Darwin) PROTOC_OS="osx" ;;
+ Linux) PROTOC_OS="linux" ;;
+ *)
+ echo "Invalid value for uname -s: ${PROTOC_OS}" >&2
+ exit 1
+esac
+
+# This is for macs with M1 chips. Precompiled binaries for osx/amd64 are not available for download, so for that case
+# we download the x86_64 version instead. This will work as long as rosetta2 is installed.
+if [ "$PROTOC_OS" = "osx" ] && [ "$PROTOC_ARCH" = "arm64" ]; then
+ PROTOC_ARCH="x86_64"
+fi
+
+PROTOC="${PWD}/.tmp/protoc/bin/protoc"
+
+if [[ "$(${PROTOC} --version 2>/dev/null)" != "libprotoc 3.${PROTOC_VERSION}" ]]; then
+ rm -rf ./.tmp/protoc
+ mkdir -p .tmp/protoc
+ curl -L "https://github.com/google/protobuf/releases/download/v${PROTOC_VERSION}/protoc-${PROTOC_VERSION}-${PROTOC_OS}-${PROTOC_ARCH}.zip" > .tmp/protoc/protoc.zip
+ pushd ./.tmp/protoc && unzip protoc.zip && popd
+fi
+
diff --git a/vendor/github.com/fullstorydev/grpcurl/format.go b/vendor/github.com/fullstorydev/grpcurl/format.go
new file mode 100644
index 0000000..e7f576b
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/format.go
@@ -0,0 +1,554 @@
+package grpcurl
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+
+ "github.com/golang/protobuf/jsonpb" //lint:ignore SA1019 we have to import these because some of their types appear in exported API
+ "github.com/golang/protobuf/proto" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/desc" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/dynamic" //lint:ignore SA1019 same as above
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// RequestParser processes input into messages.
+type RequestParser interface {
+ // Next parses input data into the given request message. If called after
+ // input is exhausted, it returns io.EOF. If the caller re-uses the same
+ // instance in multiple calls to Next, it should call msg.Reset() in between
+ // each call.
+ Next(msg proto.Message) error
+ // NumRequests returns the number of messages that have been parsed and
+ // returned by a call to Next.
+ NumRequests() int
+}
+
+type jsonRequestParser struct {
+ dec *json.Decoder
+ unmarshaler jsonpb.Unmarshaler
+ requestCount int
+}
+
+// NewJSONRequestParser returns a RequestParser that reads data in JSON format
+// from the given reader. The given resolver is used to assist with decoding of
+// google.protobuf.Any messages.
+//
+// Input data that contains more than one message should just include all
+// messages concatenated (though whitespace is necessary to separate some kinds
+// of values in JSON).
+//
+// If the given reader has no data, the returned parser will return io.EOF on
+// the very first call.
+func NewJSONRequestParser(in io.Reader, resolver jsonpb.AnyResolver) RequestParser {
+ return &jsonRequestParser{
+ dec: json.NewDecoder(in),
+ unmarshaler: jsonpb.Unmarshaler{AnyResolver: resolver},
+ }
+}
+
+// NewJSONRequestParserWithUnmarshaler is like NewJSONRequestParser but
+// accepts a protobuf jsonpb.Unmarshaler instead of jsonpb.AnyResolver.
+func NewJSONRequestParserWithUnmarshaler(in io.Reader, unmarshaler jsonpb.Unmarshaler) RequestParser {
+ return &jsonRequestParser{
+ dec: json.NewDecoder(in),
+ unmarshaler: unmarshaler,
+ }
+}
+
+func (f *jsonRequestParser) Next(m proto.Message) error {
+ var msg json.RawMessage
+ if err := f.dec.Decode(&msg); err != nil {
+ return err
+ }
+ f.requestCount++
+ return f.unmarshaler.Unmarshal(bytes.NewReader(msg), m)
+}
+
+func (f *jsonRequestParser) NumRequests() int {
+ return f.requestCount
+}
+
+const (
+ textSeparatorChar = '\x1e'
+)
+
+type textRequestParser struct {
+ r *bufio.Reader
+ err error
+ requestCount int
+}
+
+// NewTextRequestParser returns a RequestParser that reads data in the protobuf
+// text format from the given reader.
+//
+// Input data that contains more than one message should include an ASCII
+// 'Record Separator' character (0x1E) between each message.
+//
+// Empty text is a valid text format and represents an empty message. So if the
+// given reader has no data, the returned parser will yield an empty message
+// for the first call to Next and then return io.EOF thereafter. This also means
+// that if the input data ends with a record separator, then a final empty
+// message will be parsed *after* the separator.
+func NewTextRequestParser(in io.Reader) RequestParser {
+ return &textRequestParser{r: bufio.NewReader(in)}
+}
+
+func (f *textRequestParser) Next(m proto.Message) error {
+ if f.err != nil {
+ return f.err
+ }
+
+ var b []byte
+ b, f.err = f.r.ReadBytes(textSeparatorChar)
+ if f.err != nil && f.err != io.EOF {
+ return f.err
+ }
+ // remove delimiter
+ if len(b) > 0 && b[len(b)-1] == textSeparatorChar {
+ b = b[:len(b)-1]
+ }
+
+ f.requestCount++
+
+ return proto.UnmarshalText(string(b), m)
+}
+
+func (f *textRequestParser) NumRequests() int {
+ return f.requestCount
+}
+
+// Formatter translates messages into string representations.
+type Formatter func(proto.Message) (string, error)
+
+// NewJSONFormatter returns a formatter that returns JSON strings. The JSON will
+// include empty/default values (instead of just omitted them) if emitDefaults
+// is true. The given resolver is used to assist with encoding of
+// google.protobuf.Any messages.
+func NewJSONFormatter(emitDefaults bool, resolver jsonpb.AnyResolver) Formatter {
+ marshaler := jsonpb.Marshaler{
+ EmitDefaults: emitDefaults,
+ AnyResolver: resolver,
+ }
+ // Workaround for indentation issue in jsonpb with Any messages.
+ // Bug was originally fixed in https://github.com/golang/protobuf/pull/834
+ // but later re-introduced before the module was deprecated and frozen.
+ // If jsonpb is ever replaced with google.golang.org/protobuf/encoding/protojson
+ // this workaround will no longer be needed.
+ formatter := func(message proto.Message) (string, error) {
+ output, err := marshaler.MarshalToString(message)
+ if err != nil {
+ return "", err
+ }
+ var buf bytes.Buffer
+ if err := json.Indent(&buf, []byte(output), "", " "); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+ }
+ return formatter
+}
+
+// NewTextFormatter returns a formatter that returns strings in the protobuf
+// text format. If includeSeparator is true then, when invoked to format
+// multiple messages, all messages after the first one will be prefixed with the
+// ASCII 'Record Separator' character (0x1E).
+func NewTextFormatter(includeSeparator bool) Formatter {
+ tf := textFormatter{useSeparator: includeSeparator}
+ return tf.format
+}
+
+type textFormatter struct {
+ useSeparator bool
+ numFormatted int
+}
+
+var protoTextMarshaler = proto.TextMarshaler{ExpandAny: true}
+
+func (tf *textFormatter) format(m proto.Message) (string, error) {
+ var buf bytes.Buffer
+ if tf.useSeparator && tf.numFormatted > 0 {
+ if err := buf.WriteByte(textSeparatorChar); err != nil {
+ return "", err
+ }
+ }
+
+ // If message implements MarshalText method (such as a *dynamic.Message),
+ // it won't get details about whether or not to format to text compactly
+ // or with indentation. So first see if the message also implements a
+ // MarshalTextIndent method and use that instead if available.
+ type indentMarshaler interface {
+ MarshalTextIndent() ([]byte, error)
+ }
+
+ if indenter, ok := m.(indentMarshaler); ok {
+ b, err := indenter.MarshalTextIndent()
+ if err != nil {
+ return "", err
+ }
+ if _, err := buf.Write(b); err != nil {
+ return "", err
+ }
+ } else if err := protoTextMarshaler.Marshal(&buf, m); err != nil {
+ return "", err
+ }
+
+ // no trailing newline needed
+ str := buf.String()
+ if len(str) > 0 && str[len(str)-1] == '\n' {
+ str = str[:len(str)-1]
+ }
+
+ tf.numFormatted++
+
+ return str, nil
+}
+
+// Format of request data. The allowed values are 'json' or 'text'.
+type Format string
+
+const (
+ // FormatJSON specifies input data in JSON format. Multiple request values
+ // may be concatenated (messages with a JSON representation other than
+ // object must be separated by whitespace, such as a newline)
+ FormatJSON = Format("json")
+
+ // FormatText specifies input data must be in the protobuf text format.
+ // Multiple request values must be separated by the "record separator"
+ // ASCII character: 0x1E. The stream should not end in a record separator.
+ // If it does, it will be interpreted as a final, blank message after the
+ // separator.
+ FormatText = Format("text")
+)
+
+// AnyResolverFromDescriptorSource returns an AnyResolver that will search for
+// types using the given descriptor source.
+func AnyResolverFromDescriptorSource(source DescriptorSource) jsonpb.AnyResolver {
+ return &anyResolver{source: source}
+}
+
+// AnyResolverFromDescriptorSourceWithFallback returns an AnyResolver that will
+// search for types using the given descriptor source and then fallback to a
+// special message if the type is not found. The fallback type will render to
+// JSON with a "@type" property, just like an Any message, but also with a
+// custom "@value" property that includes the binary encoded payload.
+func AnyResolverFromDescriptorSourceWithFallback(source DescriptorSource) jsonpb.AnyResolver {
+ res := anyResolver{source: source}
+ return &anyResolverWithFallback{AnyResolver: &res}
+}
+
+type anyResolver struct {
+ source DescriptorSource
+
+ er dynamic.ExtensionRegistry
+
+ mu sync.RWMutex
+ mf *dynamic.MessageFactory
+ resolved map[string]func() proto.Message
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+
+ r.mu.RLock()
+ factory := r.resolved[mname]
+ r.mu.RUnlock()
+
+ // already resolved?
+ if factory != nil {
+ return factory(), nil
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ // double-check, in case we were racing with another goroutine
+ // that resolved this one
+ factory = r.resolved[mname]
+ if factory != nil {
+ return factory(), nil
+ }
+
+ // use descriptor source to resolve message type
+ d, err := r.source.FindSymbol(mname)
+ if err != nil {
+ return nil, err
+ }
+ md, ok := d.(*desc.MessageDescriptor)
+ if !ok {
+ return nil, fmt.Errorf("unknown message: %s", typeUrl)
+ }
+ // populate any extensions for this message, too (if there are any)
+ if exts, err := r.source.AllExtensionsForType(mname); err == nil {
+ if err := r.er.AddExtension(exts...); err != nil {
+ return nil, err
+ }
+ }
+
+ if r.mf == nil {
+ r.mf = dynamic.NewMessageFactoryWithExtensionRegistry(&r.er)
+ }
+
+ factory = func() proto.Message {
+ return r.mf.NewMessage(md)
+ }
+ if r.resolved == nil {
+ r.resolved = map[string]func() proto.Message{}
+ }
+ r.resolved[mname] = factory
+ return factory(), nil
+}
+
+// anyResolverWithFallback can provide a fallback value for unknown
+// messages that will format itself to JSON using an "@value" field
+// that has the base64-encoded data for the unknown message value.
+type anyResolverWithFallback struct {
+ jsonpb.AnyResolver
+}
+
+func (r anyResolverWithFallback) Resolve(typeUrl string) (proto.Message, error) {
+ msg, err := r.AnyResolver.Resolve(typeUrl)
+ if err == nil {
+ return msg, err
+ }
+
+ // Try "default" resolution logic. This mirrors the default behavior
+ // of jsonpb, which checks to see if the given message name is registered
+ // in the proto package.
+ mname := typeUrl
+ if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+ mname = mname[slash+1:]
+ }
+ //lint:ignore SA1019 new non-deprecated API requires other code changes; deferring...
+ mt := proto.MessageType(mname)
+ if mt != nil {
+ return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+ }
+
+ // finally, fallback to a special placeholder that can marshal itself
+ // to JSON using a special "@value" property to show base64-encoded
+ // data for the embedded message
+ return &unknownAny{TypeUrl: typeUrl, Error: fmt.Sprintf("%s is not recognized; see @value for raw binary message data", mname)}, nil
+}
+
+type unknownAny struct {
+ TypeUrl string `json:"@type"`
+ Error string `json:"@error"`
+ Value string `json:"@value"`
+}
+
+func (a *unknownAny) MarshalJSONPB(jsm *jsonpb.Marshaler) ([]byte, error) {
+ if jsm.Indent != "" {
+ return json.MarshalIndent(a, "", jsm.Indent)
+ }
+ return json.Marshal(a)
+}
+
+func (a *unknownAny) Unmarshal(b []byte) error {
+ a.Value = base64.StdEncoding.EncodeToString(b)
+ return nil
+}
+
+func (a *unknownAny) Reset() {
+ a.Value = ""
+}
+
+func (a *unknownAny) String() string {
+ b, err := a.MarshalJSONPB(&jsonpb.Marshaler{})
+ if err != nil {
+ return fmt.Sprintf("ERROR: %v", err.Error())
+ }
+ return string(b)
+}
+
+func (a *unknownAny) ProtoMessage() {
+}
+
+var _ proto.Message = (*unknownAny)(nil)
+
+// FormatOptions is a set of flags that are passed to a JSON or text formatter.
+type FormatOptions struct {
+ // EmitJSONDefaultFields flag, when true, includes empty/default values in the output.
+ // FormatJSON only flag.
+ EmitJSONDefaultFields bool
+
+ // AllowUnknownFields is an option for the parser. When true,
+ // it accepts input which includes unknown fields. These unknown fields
+ // are skipped instead of returning an error.
+ // FormatJSON only flag.
+ AllowUnknownFields bool
+
+ // IncludeTextSeparator is true then, when invoked to format multiple messages,
+ // all messages after the first one will be prefixed with the
+ // ASCII 'Record Separator' character (0x1E).
+ // It might be useful when the output is piped to another grpcurl process.
+ // FormatText only flag.
+ IncludeTextSeparator bool
+}
+
+// RequestParserAndFormatter returns a request parser and formatter for the
+// given format. The given descriptor source may be used for parsing message
+// data (if needed by the format).
+// It accepts a set of options. The field EmitJSONDefaultFields and IncludeTextSeparator
+// are options for JSON and protobuf text formats, respectively. The AllowUnknownFields field
+// is a JSON-only format flag.
+// Requests will be parsed from the given in.
+func RequestParserAndFormatter(format Format, descSource DescriptorSource, in io.Reader, opts FormatOptions) (RequestParser, Formatter, error) {
+ switch format {
+ case FormatJSON:
+ resolver := AnyResolverFromDescriptorSource(descSource)
+ unmarshaler := jsonpb.Unmarshaler{AnyResolver: resolver, AllowUnknownFields: opts.AllowUnknownFields}
+ return NewJSONRequestParserWithUnmarshaler(in, unmarshaler), NewJSONFormatter(opts.EmitJSONDefaultFields, anyResolverWithFallback{AnyResolver: resolver}), nil
+ case FormatText:
+ return NewTextRequestParser(in), NewTextFormatter(opts.IncludeTextSeparator), nil
+ default:
+ return nil, nil, fmt.Errorf("unknown format: %s", format)
+ }
+}
+
+// RequestParserAndFormatterFor returns a request parser and formatter for the
+// given format. The given descriptor source may be used for parsing message
+// data (if needed by the format). The flags emitJSONDefaultFields and
+// includeTextSeparator are options for JSON and protobuf text formats,
+// respectively. Requests will be parsed from the given in.
+// This function is deprecated. Please use RequestParserAndFormatter instead.
+// DEPRECATED
+func RequestParserAndFormatterFor(format Format, descSource DescriptorSource, emitJSONDefaultFields, includeTextSeparator bool, in io.Reader) (RequestParser, Formatter, error) {
+ return RequestParserAndFormatter(format, descSource, in, FormatOptions{
+ EmitJSONDefaultFields: emitJSONDefaultFields,
+ IncludeTextSeparator: includeTextSeparator,
+ })
+}
+
+// DefaultEventHandler logs events to a writer. This is not thread-safe, but is
+// safe for use with InvokeRPC as long as NumResponses and Status are not read
+// until the call to InvokeRPC completes.
+type DefaultEventHandler struct {
+ Out io.Writer
+ Formatter Formatter
+ // 0 = default
+ // 1 = verbose
+ // 2 = very verbose
+ VerbosityLevel int
+
+ // NumResponses is the number of responses that have been received.
+ NumResponses int
+ // Status is the status that was received at the end of an RPC. It is
+ // nil if the RPC is still in progress.
+ Status *status.Status
+}
+
+// NewDefaultEventHandler returns an InvocationEventHandler that logs events to
+// the given output. If verbose is true, all events are logged. Otherwise, only
+// response messages are logged.
+//
+// Deprecated: NewDefaultEventHandler exists for compatibility.
+// It doesn't allow fine control over the `VerbosityLevel`
+// and provides only 0 and 1 options (which corresponds to the `verbose` argument).
+// Use DefaultEventHandler{} initializer directly.
+func NewDefaultEventHandler(out io.Writer, descSource DescriptorSource, formatter Formatter, verbose bool) *DefaultEventHandler {
+ verbosityLevel := 0
+ if verbose {
+ verbosityLevel = 1
+ }
+ return &DefaultEventHandler{
+ Out: out,
+ Formatter: formatter,
+ VerbosityLevel: verbosityLevel,
+ }
+}
+
+var _ InvocationEventHandler = (*DefaultEventHandler)(nil)
+
+func (h *DefaultEventHandler) OnResolveMethod(md *desc.MethodDescriptor) {
+ if h.VerbosityLevel > 0 {
+ txt, err := GetDescriptorText(md, nil)
+ if err == nil {
+ fmt.Fprintf(h.Out, "\nResolved method descriptor:\n%s\n", txt)
+ }
+ }
+}
+
+func (h *DefaultEventHandler) OnSendHeaders(md metadata.MD) {
+ if h.VerbosityLevel > 0 {
+ fmt.Fprintf(h.Out, "\nRequest metadata to send:\n%s\n", MetadataToString(md))
+ }
+}
+
+func (h *DefaultEventHandler) OnReceiveHeaders(md metadata.MD) {
+ if h.VerbosityLevel > 0 {
+ fmt.Fprintf(h.Out, "\nResponse headers received:\n%s\n", MetadataToString(md))
+ }
+}
+
+func (h *DefaultEventHandler) OnReceiveResponse(resp proto.Message) {
+ h.NumResponses++
+ if h.VerbosityLevel > 1 {
+ fmt.Fprintf(h.Out, "\nEstimated response size: %d bytes\n", proto.Size(resp))
+ }
+ if h.VerbosityLevel > 0 {
+ fmt.Fprint(h.Out, "\nResponse contents:\n")
+ }
+ if respStr, err := h.Formatter(resp); err != nil {
+ fmt.Fprintf(h.Out, "Failed to format response message %d: %v\n", h.NumResponses, err)
+ } else {
+ fmt.Fprintln(h.Out, respStr)
+ }
+}
+
+func (h *DefaultEventHandler) OnReceiveTrailers(stat *status.Status, md metadata.MD) {
+ h.Status = stat
+ if h.VerbosityLevel > 0 {
+ fmt.Fprintf(h.Out, "\nResponse trailers received:\n%s\n", MetadataToString(md))
+ }
+}
+
+// PrintStatus prints details about the given status to the given writer. The given
+// formatter is used to print any detail messages that may be included in the status.
+// If the given status has a code of OK, "OK" is printed and that is all. Otherwise,
+// "ERROR:" is printed along with a line showing the code, one showing the message
+// string, and each detail message if any are present. The detail messages will be
+// printed as proto text format or JSON, depending on the given formatter.
+func PrintStatus(w io.Writer, stat *status.Status, formatter Formatter) {
+ if stat.Code() == codes.OK {
+ fmt.Fprintln(w, "OK")
+ return
+ }
+ fmt.Fprintf(w, "ERROR:\n Code: %s\n Message: %s\n", stat.Code().String(), stat.Message())
+
+ statpb := stat.Proto()
+ if len(statpb.Details) > 0 {
+ fmt.Fprintf(w, " Details:\n")
+ for i, det := range statpb.Details {
+ prefix := fmt.Sprintf(" %d)", i+1)
+ fmt.Fprintf(w, "%s\t", prefix)
+ prefix = strings.Repeat(" ", len(prefix)) + "\t"
+
+ output, err := formatter(det)
+ if err != nil {
+ fmt.Fprintf(w, "Error parsing detail message: %v\n", err)
+ } else {
+ lines := strings.Split(output, "\n")
+ for i, line := range lines {
+ if i == 0 {
+ // first line is already indented
+ fmt.Fprintf(w, "%s\n", line)
+ } else {
+ fmt.Fprintf(w, "%s%s\n", prefix, line)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/grpcurl.go b/vendor/github.com/fullstorydev/grpcurl/grpcurl.go
new file mode 100644
index 0000000..4407d8a
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/grpcurl.go
@@ -0,0 +1,694 @@
+// Package grpcurl provides the core functionality exposed by the grpcurl command, for
+// dynamically connecting to a server, using the reflection service to inspect the server,
+// and invoking RPCs. The grpcurl command-line tool constructs a DescriptorSource, based
+// on the command-line parameters, and supplies an InvocationEventHandler to supply request
+// data (which can come from command-line args or the process's stdin) and to log the
+// events (to the process's stdout).
+package grpcurl
+
+import (
+ "bytes"
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/golang/protobuf/proto" //lint:ignore SA1019 we have to import these because some of their types appear in exported API
+ "github.com/jhump/protoreflect/desc" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/desc/protoprint"
+ "github.com/jhump/protoreflect/dynamic" //lint:ignore SA1019 same as above
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ xdsCredentials "google.golang.org/grpc/credentials/xds"
+ _ "google.golang.org/grpc/health" // import grpc/health to enable transparent client side checking
+ "google.golang.org/grpc/metadata"
+ protov2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
+ "google.golang.org/protobuf/types/known/anypb"
+ "google.golang.org/protobuf/types/known/emptypb"
+ "google.golang.org/protobuf/types/known/structpb"
+)
+
+// ListServices uses the given descriptor source to return a sorted list of fully-qualified
+// service names.
+func ListServices(source DescriptorSource) ([]string, error) {
+ svcs, err := source.ListServices()
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(svcs)
+ return svcs, nil
+}
+
+type sourceWithFiles interface {
+ GetAllFiles() ([]*desc.FileDescriptor, error)
+}
+
+var _ sourceWithFiles = (*fileSource)(nil)
+
+// GetAllFiles uses the given descriptor source to return a list of file descriptors.
+func GetAllFiles(source DescriptorSource) ([]*desc.FileDescriptor, error) {
+ var files []*desc.FileDescriptor
+ srcFiles, ok := source.(sourceWithFiles)
+
+ // If an error occurs, we still try to load as many files as we can, so that
+ // caller can decide whether to ignore error or not.
+ var firstError error
+ if ok {
+ files, firstError = srcFiles.GetAllFiles()
+ } else {
+ // Source does not implement GetAllFiles method, so use ListServices
+ // and grab files from there.
+ svcNames, err := source.ListServices()
+ if err != nil {
+ firstError = err
+ } else {
+ allFiles := map[string]*desc.FileDescriptor{}
+ for _, name := range svcNames {
+ d, err := source.FindSymbol(name)
+ if err != nil {
+ if firstError == nil {
+ firstError = err
+ }
+ } else {
+ addAllFilesToSet(d.GetFile(), allFiles)
+ }
+ }
+ files = make([]*desc.FileDescriptor, len(allFiles))
+ i := 0
+ for _, fd := range allFiles {
+ files[i] = fd
+ i++
+ }
+ }
+ }
+
+ sort.Sort(filesByName(files))
+ return files, firstError
+}
+
+type filesByName []*desc.FileDescriptor
+
+func (f filesByName) Len() int {
+ return len(f)
+}
+
+func (f filesByName) Less(i, j int) bool {
+ return f[i].GetName() < f[j].GetName()
+}
+
+func (f filesByName) Swap(i, j int) {
+ f[i], f[j] = f[j], f[i]
+}
+
+func addAllFilesToSet(fd *desc.FileDescriptor, all map[string]*desc.FileDescriptor) {
+ if _, ok := all[fd.GetName()]; ok {
+ // already added
+ return
+ }
+ all[fd.GetName()] = fd
+ for _, dep := range fd.GetDependencies() {
+ addAllFilesToSet(dep, all)
+ }
+}
+
+// ListMethods uses the given descriptor source to return a sorted list of method names
+// for the specified fully-qualified service name.
+func ListMethods(source DescriptorSource, serviceName string) ([]string, error) {
+ dsc, err := source.FindSymbol(serviceName)
+ if err != nil {
+ return nil, err
+ }
+ if sd, ok := dsc.(*desc.ServiceDescriptor); !ok {
+ return nil, notFound("Service", serviceName)
+ } else {
+ methods := make([]string, 0, len(sd.GetMethods()))
+ for _, method := range sd.GetMethods() {
+ methods = append(methods, method.GetFullyQualifiedName())
+ }
+ sort.Strings(methods)
+ return methods, nil
+ }
+}
+
+// MetadataFromHeaders converts a list of header strings (each string in
+// "Header-Name: Header-Value" form) into metadata. If a string has a header
+// name without a value (e.g. does not contain a colon), the value is assumed
+// to be blank. Binary headers (those whose names end in "-bin") should be
+// base64-encoded. But if they cannot be base64-decoded, they will be assumed to
+// be in raw form and used as is.
+func MetadataFromHeaders(headers []string) metadata.MD {
+ md := make(metadata.MD)
+ for _, part := range headers {
+ if part != "" {
+ pieces := strings.SplitN(part, ":", 2)
+ if len(pieces) == 1 {
+ pieces = append(pieces, "") // if no value was specified, just make it "" (maybe the header value doesn't matter)
+ }
+ headerName := strings.ToLower(strings.TrimSpace(pieces[0]))
+ val := strings.TrimSpace(pieces[1])
+ if strings.HasSuffix(headerName, "-bin") {
+ if v, err := decode(val); err == nil {
+ val = v
+ }
+ }
+ md[headerName] = append(md[headerName], val)
+ }
+ }
+ return md
+}
+
+var envVarRegex = regexp.MustCompile(`\${\w+}`)
+
+// ExpandHeaders expands environment variables contained in the header string.
+// If no corresponding environment variable is found an error is returned.
+// TODO: Add escaping for `${`
+func ExpandHeaders(headers []string) ([]string, error) {
+ expandedHeaders := make([]string, len(headers))
+ for idx, header := range headers {
+ if header == "" {
+ continue
+ }
+ results := envVarRegex.FindAllString(header, -1)
+ if len(results) == 0 {
+ expandedHeaders[idx] = headers[idx]
+ continue
+ }
+ expandedHeader := header
+ for _, result := range results {
+ envVarName := result[2 : len(result)-1] // strip leading `${` and trailing `}`
+ envVarValue, ok := os.LookupEnv(envVarName)
+ if !ok {
+ return nil, fmt.Errorf("header %q refers to missing environment variable %q", header, envVarName)
+ }
+ expandedHeader = strings.Replace(expandedHeader, result, envVarValue, -1)
+ }
+ expandedHeaders[idx] = expandedHeader
+ }
+ return expandedHeaders, nil
+}
+
+var base64Codecs = []*base64.Encoding{base64.StdEncoding, base64.URLEncoding, base64.RawStdEncoding, base64.RawURLEncoding}
+
+func decode(val string) (string, error) {
+ var firstErr error
+ var b []byte
+ // we are lenient and can accept any of the flavors of base64 encoding
+ for _, d := range base64Codecs {
+ var err error
+ b, err = d.DecodeString(val)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+ return string(b), nil
+ }
+ return "", firstErr
+}
+
+// MetadataToString returns a string representation of the given metadata, for
+// displaying to users.
+func MetadataToString(md metadata.MD) string {
+ if len(md) == 0 {
+ return "(empty)"
+ }
+
+ keys := make([]string, 0, len(md))
+ for k := range md {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+
+ var b bytes.Buffer
+ first := true
+ for _, k := range keys {
+ vs := md[k]
+ for _, v := range vs {
+ if first {
+ first = false
+ } else {
+ b.WriteString("\n")
+ }
+ b.WriteString(k)
+ b.WriteString(": ")
+ if strings.HasSuffix(k, "-bin") {
+ v = base64.StdEncoding.EncodeToString([]byte(v))
+ }
+ b.WriteString(v)
+ }
+ }
+ return b.String()
+}
+
+var printer = &protoprint.Printer{
+ Compact: true,
+ OmitComments: protoprint.CommentsNonDoc,
+ SortElements: true,
+ ForceFullyQualifiedNames: true,
+}
+
+// GetDescriptorText returns a string representation of the given descriptor.
+// This returns a snippet of proto source that describes the given element.
+func GetDescriptorText(dsc desc.Descriptor, _ DescriptorSource) (string, error) {
+ // Note: DescriptorSource is not used, but remains an argument for backwards
+ // compatibility with previous implementation.
+ txt, err := printer.PrintProtoToString(dsc)
+ if err != nil {
+ return "", err
+ }
+ // callers don't expect trailing newlines
+ if txt[len(txt)-1] == '\n' {
+ txt = txt[:len(txt)-1]
+ }
+ return txt, nil
+}
+
+// EnsureExtensions uses the given descriptor source to download extensions for
+// the given message. It returns a copy of the given message, but as a dynamic
+// message that knows about all extensions known to the given descriptor source.
+func EnsureExtensions(source DescriptorSource, msg proto.Message) proto.Message {
+ // load any server extensions so we can properly describe custom options
+ dsc, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return msg
+ }
+
+ var ext dynamic.ExtensionRegistry
+ if err = fetchAllExtensions(source, &ext, dsc, map[string]bool{}); err != nil {
+ return msg
+ }
+
+ // convert message into dynamic message that knows about applicable extensions
+ // (that way we can show meaningful info for custom options instead of printing as unknown)
+ msgFactory := dynamic.NewMessageFactoryWithExtensionRegistry(&ext)
+ dm, err := fullyConvertToDynamic(msgFactory, msg)
+ if err != nil {
+ return msg
+ }
+ return dm
+}
+
+// fetchAllExtensions recursively fetches from the server extensions for the given message type as well as
+// for all message types of nested fields. The extensions are added to the given dynamic registry of extensions
+// so that all server-known extensions can be correctly parsed by grpcurl.
+func fetchAllExtensions(source DescriptorSource, ext *dynamic.ExtensionRegistry, md *desc.MessageDescriptor, alreadyFetched map[string]bool) error {
+ msgTypeName := md.GetFullyQualifiedName()
+ if alreadyFetched[msgTypeName] {
+ return nil
+ }
+ alreadyFetched[msgTypeName] = true
+ if len(md.GetExtensionRanges()) > 0 {
+ fds, err := source.AllExtensionsForType(msgTypeName)
+ if err != nil {
+ return fmt.Errorf("failed to query for extensions of type %s: %v", msgTypeName, err)
+ }
+ for _, fd := range fds {
+ if err := ext.AddExtension(fd); err != nil {
+ return fmt.Errorf("could not register extension %s of type %s: %v", fd.GetFullyQualifiedName(), msgTypeName, err)
+ }
+ }
+ }
+ // recursively fetch extensions for the types of any message fields
+ for _, fd := range md.GetFields() {
+ if fd.GetMessageType() != nil {
+ err := fetchAllExtensions(source, ext, fd.GetMessageType(), alreadyFetched)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// fullyConvertToDynamic attempts to convert the given message to a dynamic message as well
+// as any nested messages it may contain as field values. If the given message factory has
+// extensions registered that were not known when the given message was parsed, this effectively
+// allows re-parsing to identify those extensions.
+func fullyConvertToDynamic(msgFact *dynamic.MessageFactory, msg proto.Message) (proto.Message, error) {
+ if _, ok := msg.(*dynamic.Message); ok {
+ return msg, nil // already a dynamic message
+ }
+ md, err := desc.LoadMessageDescriptorForMessage(msg)
+ if err != nil {
+ return nil, err
+ }
+ newMsg := msgFact.NewMessage(md)
+ dm, ok := newMsg.(*dynamic.Message)
+ if !ok {
+ // if message factory didn't produce a dynamic message, then we should leave msg as is
+ return msg, nil
+ }
+
+ if err := dm.ConvertFrom(msg); err != nil {
+ return nil, err
+ }
+
+ // recursively convert all field values, too
+ for _, fd := range md.GetFields() {
+ if fd.IsMap() {
+ if fd.GetMapValueType().GetMessageType() != nil {
+ m := dm.GetField(fd).(map[interface{}]interface{})
+ for k, v := range m {
+ // keys can't be nested messages; so we only need to recurse through map values, not keys
+ newVal, err := fullyConvertToDynamic(msgFact, v.(proto.Message))
+ if err != nil {
+ return nil, err
+ }
+ dm.PutMapField(fd, k, newVal)
+ }
+ }
+ } else if fd.IsRepeated() {
+ if fd.GetMessageType() != nil {
+ s := dm.GetField(fd).([]interface{})
+ for i, e := range s {
+ newVal, err := fullyConvertToDynamic(msgFact, e.(proto.Message))
+ if err != nil {
+ return nil, err
+ }
+ dm.SetRepeatedField(fd, i, newVal)
+ }
+ }
+ } else {
+ if fd.GetMessageType() != nil {
+ v := dm.GetField(fd)
+ newVal, err := fullyConvertToDynamic(msgFact, v.(proto.Message))
+ if err != nil {
+ return nil, err
+ }
+ dm.SetField(fd, newVal)
+ }
+ }
+ }
+ return dm, nil
+}
+
+// MakeTemplate returns a message instance for the given descriptor that is a
+// suitable template for creating an instance of that message in JSON. In
+// particular, it ensures that any repeated fields (which include map fields)
+// are not empty, so they will render with a single element (to show the types
+// and optionally nested fields). It also ensures that nested messages are not
+// nil by setting them to a message that is also fleshed out as a template
+// message.
+func MakeTemplate(md *desc.MessageDescriptor) proto.Message {
+ return makeTemplate(md, nil)
+}
+
+func makeTemplate(md *desc.MessageDescriptor, path []*desc.MessageDescriptor) proto.Message {
+ switch md.GetFullyQualifiedName() {
+ case "google.protobuf.Any":
+ // empty type URL is not allowed by JSON representation
+ // so we must give it a dummy type
+ var anyVal anypb.Any
+ _ = anypb.MarshalFrom(&anyVal, &emptypb.Empty{}, protov2.MarshalOptions{})
+ return &anyVal
+ case "google.protobuf.Value":
+ // unset kind is not allowed by JSON representation
+ // so we must give it something
+ return &structpb.Value{
+ Kind: &structpb.Value_StructValue{StructValue: &structpb.Struct{
+ Fields: map[string]*structpb.Value{
+ "google.protobuf.Value": {Kind: &structpb.Value_StringValue{
+ StringValue: "supports arbitrary JSON",
+ }},
+ },
+ }},
+ }
+ case "google.protobuf.ListValue":
+ return &structpb.ListValue{
+ Values: []*structpb.Value{
+ {
+ Kind: &structpb.Value_StructValue{StructValue: &structpb.Struct{
+ Fields: map[string]*structpb.Value{
+ "google.protobuf.ListValue": {Kind: &structpb.Value_StringValue{
+ StringValue: "is an array of arbitrary JSON values",
+ }},
+ },
+ }},
+ },
+ },
+ }
+ case "google.protobuf.Struct":
+ return &structpb.Struct{
+ Fields: map[string]*structpb.Value{
+ "google.protobuf.Struct": {Kind: &structpb.Value_StringValue{
+ StringValue: "supports arbitrary JSON objects",
+ }},
+ },
+ }
+ }
+
+ dm := dynamic.NewMessage(md)
+
+ // if the message is a recursive structure, we don't want to blow the stack
+ for _, seen := range path {
+ if seen == md {
+ // already visited this type; avoid infinite recursion
+ return dm
+ }
+ }
+ path = append(path, dm.GetMessageDescriptor())
+
+ // for repeated fields, add a single element with default value
+ // and for message fields, add a message with all default fields
+ // that also has non-nil message and non-empty repeated fields
+
+ for _, fd := range dm.GetMessageDescriptor().GetFields() {
+ if fd.IsRepeated() {
+ switch fd.GetType() {
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT32:
+ dm.AddRepeatedField(fd, uint32(0))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_ENUM:
+ dm.AddRepeatedField(fd, int32(0))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT64:
+ dm.AddRepeatedField(fd, uint64(0))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_INT64:
+ dm.AddRepeatedField(fd, int64(0))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
+ dm.AddRepeatedField(fd, "")
+
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
+ dm.AddRepeatedField(fd, []byte{})
+
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
+ dm.AddRepeatedField(fd, false)
+
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
+ dm.AddRepeatedField(fd, float32(0))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
+ dm.AddRepeatedField(fd, float64(0))
+
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptorpb.FieldDescriptorProto_TYPE_GROUP:
+ dm.AddRepeatedField(fd, makeTemplate(fd.GetMessageType(), path))
+ }
+ } else if fd.GetMessageType() != nil {
+ dm.SetField(fd, makeTemplate(fd.GetMessageType(), path))
+ }
+ }
+ return dm
+}
+
+// ClientTransportCredentials is a helper function that constructs a TLS config with
+// the given properties (see ClientTLSConfig) and then constructs and returns gRPC
+// transport credentials using that config.
+//
+// Deprecated: Use grpcurl.ClientTLSConfig and credentials.NewTLS instead.
+func ClientTransportCredentials(insecureSkipVerify bool, cacertFile, clientCertFile, clientKeyFile string) (credentials.TransportCredentials, error) {
+ tlsConf, err := ClientTLSConfig(insecureSkipVerify, cacertFile, clientCertFile, clientKeyFile)
+ if err != nil {
+ return nil, err
+ }
+
+ return credentials.NewTLS(tlsConf), nil
+}
+
+// ClientTLSConfig builds transport-layer config for a gRPC client using the
+// given properties. If cacertFile is blank, only standard trusted certs are used to
+// verify the server certs. If clientCertFile is blank, the client will not use a client
+// certificate. If clientCertFile is not blank then clientKeyFile must not be blank.
+func ClientTLSConfig(insecureSkipVerify bool, cacertFile, clientCertFile, clientKeyFile string) (*tls.Config, error) {
+ var tlsConf tls.Config
+
+ if clientCertFile != "" {
+ // Load the client certificates from disk
+ certificate, err := tls.LoadX509KeyPair(clientCertFile, clientKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not load client key pair: %v", err)
+ }
+ tlsConf.Certificates = []tls.Certificate{certificate}
+ }
+
+ if insecureSkipVerify {
+ tlsConf.InsecureSkipVerify = true
+ } else if cacertFile != "" {
+ // Create a certificate pool from the certificate authority
+ certPool := x509.NewCertPool()
+ ca, err := os.ReadFile(cacertFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read ca certificate: %v", err)
+ }
+
+ // Append the certificates from the CA
+ if ok := certPool.AppendCertsFromPEM(ca); !ok {
+ return nil, errors.New("failed to append ca certs")
+ }
+
+ tlsConf.RootCAs = certPool
+ }
+
+ return &tlsConf, nil
+}
+
+// ServerTransportCredentials builds transport credentials for a gRPC server using the
+// given properties. If cacertFile is blank, the server will not request client certs
+// unless requireClientCerts is true. When requireClientCerts is false and cacertFile is
+// not blank, the server will verify client certs when presented, but will not require
+// client certs. The serverCertFile and serverKeyFile must both not be blank.
+func ServerTransportCredentials(cacertFile, serverCertFile, serverKeyFile string, requireClientCerts bool) (credentials.TransportCredentials, error) {
+ var tlsConf tls.Config
+ // TODO(jh): Remove this line once https://github.com/golang/go/issues/28779 is fixed
+ // in Go tip. Until then, the recently merged TLS 1.3 support breaks the TLS tests.
+ tlsConf.MaxVersion = tls.VersionTLS12
+
+ // Load the server certificates from disk
+ certificate, err := tls.LoadX509KeyPair(serverCertFile, serverKeyFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not load key pair: %v", err)
+ }
+ tlsConf.Certificates = []tls.Certificate{certificate}
+
+ if cacertFile != "" {
+ // Create a certificate pool from the certificate authority
+ certPool := x509.NewCertPool()
+ ca, err := os.ReadFile(cacertFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read ca certificate: %v", err)
+ }
+
+ // Append the certificates from the CA
+ if ok := certPool.AppendCertsFromPEM(ca); !ok {
+ return nil, errors.New("failed to append ca certs")
+ }
+
+ tlsConf.ClientCAs = certPool
+ }
+
+ if requireClientCerts {
+ tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
+ } else if cacertFile != "" {
+ tlsConf.ClientAuth = tls.VerifyClientCertIfGiven
+ } else {
+ tlsConf.ClientAuth = tls.NoClientCert
+ }
+
+ return credentials.NewTLS(&tlsConf), nil
+}
+
+// BlockingDial is a helper method to dial the given address, using optional TLS credentials,
+// and blocking until the returned connection is ready. If the given credentials are nil, the
+// connection will be insecure (plain-text).
+func BlockingDial(ctx context.Context, network, address string, creds credentials.TransportCredentials, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ if creds == nil {
+ creds = insecure.NewCredentials()
+ }
+
+ var err error
+ if strings.HasPrefix(address, "xds:///") {
+ // The xds:/// prefix is used to signal to the gRPC client to use an xDS server to resolve the
+ // target. The relevant credentials will be automatically pulled from the GRPC_XDS_BOOTSTRAP or
+ // GRPC_XDS_BOOTSTRAP_CONFIG env vars.
+ creds, err = xdsCredentials.NewClientCredentials(xdsCredentials.ClientOptions{FallbackCreds: creds})
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // grpc.Dial doesn't provide any information on permanent connection errors (like
+ // TLS handshake failures). So in order to provide good error messages, we need a
+ // custom dialer that can provide that info. That means we manage the TLS handshake.
+ result := make(chan interface{}, 1)
+
+ writeResult := func(res interface{}) {
+ // non-blocking write: we only need the first result
+ select {
+ case result <- res:
+ default:
+ }
+ }
+
+ // custom credentials and dialer will notify on error via the
+ // writeResult function
+ creds = &errSignalingCreds{
+ TransportCredentials: creds,
+ writeResult: writeResult,
+ }
+
+ // Even with grpc.FailOnNonTempDialError, this call will usually timeout in
+ // the face of TLS handshake errors. So we can't rely on grpc.WithBlock() to
+ // know when we're done. So we run it in a goroutine and then use result
+ // channel to either get the connection or fail-fast.
+ go func() {
+ // We put grpc.FailOnNonTempDialError *before* the explicitly provided
+ // options so that it could be overridden.
+ opts = append([]grpc.DialOption{grpc.FailOnNonTempDialError(true)}, opts...)
+ // But we don't want caller to be able to override these two, so we put
+ // them *after* the explicitly provided options.
+ opts = append(opts, grpc.WithBlock(), grpc.WithTransportCredentials(creds))
+
+ conn, err := grpc.DialContext(ctx, address, opts...)
+ var res interface{}
+ if err != nil {
+ res = err
+ } else {
+ res = conn
+ }
+ writeResult(res)
+ }()
+
+ select {
+ case res := <-result:
+ if conn, ok := res.(*grpc.ClientConn); ok {
+ return conn, nil
+ }
+ return nil, res.(error)
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+// errSignalingCreds is a wrapper around a TransportCredentials value, but
+// it will use the writeResult function to notify on error.
+type errSignalingCreds struct {
+ credentials.TransportCredentials
+ writeResult func(res interface{})
+}
+
+func (c *errSignalingCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+ conn, auth, err := c.TransportCredentials.ClientHandshake(ctx, addr, rawConn)
+ if err != nil {
+ c.writeResult(err)
+ }
+ return conn, auth, err
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/invoke.go b/vendor/github.com/fullstorydev/grpcurl/invoke.go
new file mode 100644
index 0000000..860dae5
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/invoke.go
@@ -0,0 +1,409 @@
+package grpcurl
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/golang/protobuf/jsonpb" //lint:ignore SA1019 we have to import these because some of their types appear in exported API
+ "github.com/golang/protobuf/proto" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/desc" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/dynamic" //lint:ignore SA1019 same as above
+ "github.com/jhump/protoreflect/dynamic/grpcdynamic"
+ "github.com/jhump/protoreflect/grpcreflect"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// InvocationEventHandler is a bag of callbacks for handling events that occur in the course
+// of invoking an RPC. The handler also provides request data that is sent. The callbacks are
+// generally called in the order they are listed below.
+type InvocationEventHandler interface {
+ // OnResolveMethod is called with a descriptor of the method that is being invoked.
+ OnResolveMethod(*desc.MethodDescriptor)
+ // OnSendHeaders is called with the request metadata that is being sent.
+ OnSendHeaders(metadata.MD)
+ // OnReceiveHeaders is called when response headers have been received.
+ OnReceiveHeaders(metadata.MD)
+ // OnReceiveResponse is called for each response message received.
+ OnReceiveResponse(proto.Message)
+ // OnReceiveTrailers is called when response trailers and final RPC status have been received.
+ OnReceiveTrailers(*status.Status, metadata.MD)
+}
+
+// RequestMessageSupplier is a function that is called to retrieve request
+// messages for a GRPC operation. This type is deprecated and will be removed in
+// a future release.
+//
+// Deprecated: This is only used with the deprecated InvokeRpc. Instead, use
+// RequestSupplier with InvokeRPC.
+type RequestMessageSupplier func() ([]byte, error)
+
+// InvokeRpc uses the given gRPC connection to invoke the given method. This function is deprecated
+// and will be removed in a future release. It just delegates to the similarly named InvokeRPC
+// method, whose signature is only slightly different.
+//
+// Deprecated: use InvokeRPC instead.
+func InvokeRpc(ctx context.Context, source DescriptorSource, cc *grpc.ClientConn, methodName string,
+ headers []string, handler InvocationEventHandler, requestData RequestMessageSupplier) error {
+
+ return InvokeRPC(ctx, source, cc, methodName, headers, handler, func(m proto.Message) error {
+ // New function is almost identical, but the request supplier function works differently.
+ // So we adapt the logic here to maintain compatibility.
+ data, err := requestData()
+ if err != nil {
+ return err
+ }
+ return jsonpb.Unmarshal(bytes.NewReader(data), m)
+ })
+}
+
+// RequestSupplier is a function that is called to populate messages for a gRPC operation. The
+// function should populate the given message or return a non-nil error. If the supplier has no
+// more messages, it should return io.EOF. When it returns io.EOF, it should not in any way
+// modify the given message argument.
+type RequestSupplier func(proto.Message) error
+
+// InvokeRPC uses the given gRPC channel to invoke the given method. The given descriptor source
+// is used to determine the type of method and the type of request and response message. The given
+// headers are sent as request metadata. Methods on the given event handler are called as the
+// invocation proceeds.
+//
+// The given requestData function supplies the actual data to send. It should return io.EOF when
+// there is no more request data. If the method being invoked is a unary or server-streaming RPC
+// (e.g. exactly one request message) and there is no request data (e.g. the first invocation of
+// the function returns io.EOF), then an empty request message is sent.
+//
+// If the requestData function and the given event handler coordinate or share any state, they should
+// be thread-safe. This is because the requestData function may be called from a different goroutine
+// than the one invoking event callbacks. (This only happens for bi-directional streaming RPCs, where
+// one goroutine sends request messages and another consumes the response messages).
+func InvokeRPC(ctx context.Context, source DescriptorSource, ch grpcdynamic.Channel, methodName string,
+ headers []string, handler InvocationEventHandler, requestData RequestSupplier) error {
+
+ md := MetadataFromHeaders(headers)
+
+ svc, mth := parseSymbol(methodName)
+ if svc == "" || mth == "" {
+ return fmt.Errorf("given method name %q is not in expected format: 'service/method' or 'service.method'", methodName)
+ }
+
+ dsc, err := source.FindSymbol(svc)
+ if err != nil {
+ // return a gRPC status error if hasStatus is true
+ errStatus, hasStatus := status.FromError(err)
+ switch {
+ case hasStatus && isNotFoundError(err):
+ return status.Errorf(errStatus.Code(), "target server does not expose service %q: %s", svc, errStatus.Message())
+ case hasStatus:
+ return status.Errorf(errStatus.Code(), "failed to query for service descriptor %q: %s", svc, errStatus.Message())
+ case isNotFoundError(err):
+ return fmt.Errorf("target server does not expose service %q", svc)
+ }
+ return fmt.Errorf("failed to query for service descriptor %q: %v", svc, err)
+ }
+ sd, ok := dsc.(*desc.ServiceDescriptor)
+ if !ok {
+ return fmt.Errorf("target server does not expose service %q", svc)
+ }
+ mtd := sd.FindMethodByName(mth)
+ if mtd == nil {
+ return fmt.Errorf("service %q does not include a method named %q", svc, mth)
+ }
+
+ handler.OnResolveMethod(mtd)
+
+ // we also download any applicable extensions so we can provide full support for parsing user-provided data
+ var ext dynamic.ExtensionRegistry
+ alreadyFetched := map[string]bool{}
+ if err = fetchAllExtensions(source, &ext, mtd.GetInputType(), alreadyFetched); err != nil {
+ return fmt.Errorf("error resolving server extensions for message %s: %v", mtd.GetInputType().GetFullyQualifiedName(), err)
+ }
+ if err = fetchAllExtensions(source, &ext, mtd.GetOutputType(), alreadyFetched); err != nil {
+ return fmt.Errorf("error resolving server extensions for message %s: %v", mtd.GetOutputType().GetFullyQualifiedName(), err)
+ }
+
+ msgFactory := dynamic.NewMessageFactoryWithExtensionRegistry(&ext)
+ req := msgFactory.NewMessage(mtd.GetInputType())
+
+ handler.OnSendHeaders(md)
+ ctx = metadata.NewOutgoingContext(ctx, md)
+
+ stub := grpcdynamic.NewStubWithMessageFactory(ch, msgFactory)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ if mtd.IsClientStreaming() && mtd.IsServerStreaming() {
+ return invokeBidi(ctx, stub, mtd, handler, requestData, req)
+ } else if mtd.IsClientStreaming() {
+ return invokeClientStream(ctx, stub, mtd, handler, requestData, req)
+ } else if mtd.IsServerStreaming() {
+ return invokeServerStream(ctx, stub, mtd, handler, requestData, req)
+ } else {
+ return invokeUnary(ctx, stub, mtd, handler, requestData, req)
+ }
+}
+
+func invokeUnary(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+ requestData RequestSupplier, req proto.Message) error {
+
+ err := requestData(req)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+ if err != io.EOF {
+ // verify there is no second message, which is a usage error
+ err := requestData(req)
+ if err == nil {
+ return fmt.Errorf("method %q is a unary RPC, but request data contained more than 1 message", md.GetFullyQualifiedName())
+ } else if err != io.EOF {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+ }
+
+ // Now we can actually invoke the RPC!
+ var respHeaders metadata.MD
+ var respTrailers metadata.MD
+ resp, err := stub.InvokeRpc(ctx, md, req, grpc.Trailer(&respTrailers), grpc.Header(&respHeaders))
+
+ stat, ok := status.FromError(err)
+ if !ok {
+ // Error codes sent from the server will get printed differently below.
+ // So just bail for other kinds of errors here.
+ return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ handler.OnReceiveHeaders(respHeaders)
+
+ if stat.Code() == codes.OK {
+ handler.OnReceiveResponse(resp)
+ }
+
+ handler.OnReceiveTrailers(stat, respTrailers)
+
+ return nil
+}
+
+func invokeClientStream(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+ requestData RequestSupplier, req proto.Message) error {
+
+ // invoke the RPC!
+ str, err := stub.InvokeRpcClientStream(ctx, md)
+
+ // Upload each request message in the stream
+ var resp proto.Message
+ for err == nil {
+ err = requestData(req)
+ if err == io.EOF {
+ resp, err = str.CloseAndReceive()
+ break
+ }
+ if err != nil {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+
+ err = str.SendMsg(req)
+ if err == io.EOF {
+ // We get EOF on send if the server says "go away"
+ // We have to use CloseAndReceive to get the actual code
+ resp, err = str.CloseAndReceive()
+ break
+ }
+
+ req.Reset()
+ }
+
+ // finally, process response data
+ stat, ok := status.FromError(err)
+ if !ok {
+ // Error codes sent from the server will get printed differently below.
+ // So just bail for other kinds of errors here.
+ return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ if str != nil {
+ if respHeaders, err := str.Header(); err == nil {
+ handler.OnReceiveHeaders(respHeaders)
+ }
+ }
+
+ if stat.Code() == codes.OK {
+ handler.OnReceiveResponse(resp)
+ }
+
+ if str != nil {
+ handler.OnReceiveTrailers(stat, str.Trailer())
+ }
+
+ return nil
+}
+
+func invokeServerStream(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+ requestData RequestSupplier, req proto.Message) error {
+
+ err := requestData(req)
+ if err != nil && err != io.EOF {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+ if err != io.EOF {
+ // verify there is no second message, which is a usage error
+ err := requestData(req)
+ if err == nil {
+ return fmt.Errorf("method %q is a server-streaming RPC, but request data contained more than 1 message", md.GetFullyQualifiedName())
+ } else if err != io.EOF {
+ return fmt.Errorf("error getting request data: %v", err)
+ }
+ }
+
+ // Now we can actually invoke the RPC!
+ str, err := stub.InvokeRpcServerStream(ctx, md, req)
+
+ if str != nil {
+ if respHeaders, err := str.Header(); err == nil {
+ handler.OnReceiveHeaders(respHeaders)
+ }
+ }
+
+ // Download each response message
+ for err == nil {
+ var resp proto.Message
+ resp, err = str.RecvMsg()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ handler.OnReceiveResponse(resp)
+ }
+
+ stat, ok := status.FromError(err)
+ if !ok {
+ // Error codes sent from the server will get printed differently below.
+ // So just bail for other kinds of errors here.
+ return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ if str != nil {
+ handler.OnReceiveTrailers(stat, str.Trailer())
+ }
+
+ return nil
+}
+
+func invokeBidi(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+ requestData RequestSupplier, req proto.Message) error {
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ // invoke the RPC!
+ str, err := stub.InvokeRpcBidiStream(ctx, md)
+
+ var wg sync.WaitGroup
+ var sendErr atomic.Value
+
+ defer wg.Wait()
+
+ if err == nil {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ // Concurrently upload each request message in the stream
+ var err error
+ for err == nil {
+ err = requestData(req)
+
+ if err == io.EOF {
+ err = str.CloseSend()
+ break
+ }
+ if err != nil {
+ err = fmt.Errorf("error getting request data: %v", err)
+ cancel()
+ break
+ }
+
+ err = str.SendMsg(req)
+
+ req.Reset()
+ }
+
+ if err != nil {
+ sendErr.Store(err)
+ }
+ }()
+ }
+
+ if str != nil {
+ if respHeaders, err := str.Header(); err == nil {
+ handler.OnReceiveHeaders(respHeaders)
+ }
+ }
+
+ // Download each response message
+ for err == nil {
+ var resp proto.Message
+ resp, err = str.RecvMsg()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ }
+ break
+ }
+ handler.OnReceiveResponse(resp)
+ }
+
+ if se, ok := sendErr.Load().(error); ok && se != io.EOF {
+ err = se
+ }
+
+ stat, ok := status.FromError(err)
+ if !ok {
+ // Error codes sent from the server will get printed differently below.
+ // So just bail for other kinds of errors here.
+ return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+ }
+
+ if str != nil {
+ handler.OnReceiveTrailers(stat, str.Trailer())
+ }
+
+ return nil
+}
+
+type notFoundError string
+
+func notFound(kind, name string) error {
+ return notFoundError(fmt.Sprintf("%s not found: %s", kind, name))
+}
+
+func (e notFoundError) Error() string {
+ return string(e)
+}
+
+func isNotFoundError(err error) bool {
+ if grpcreflect.IsElementNotFoundError(err) {
+ return true
+ }
+ _, ok := err.(notFoundError)
+ return ok
+}
+
+func parseSymbol(svcAndMethod string) (string, string) {
+ pos := strings.LastIndex(svcAndMethod, "/")
+ if pos < 0 {
+ pos = strings.LastIndex(svcAndMethod, ".")
+ if pos < 0 {
+ return "", ""
+ }
+ }
+ return svcAndMethod[:pos], svcAndMethod[pos+1:]
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh b/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh
new file mode 100644
index 0000000..51db6f4
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+set -e
+
+cd "$(dirname $0)"
+
+# Run this script to generate files used by tests.
+
+echo "Creating protosets..."
+protoc testing/test.proto \
+ --include_imports \
+ --descriptor_set_out=testing/test.protoset
+
+protoc testing/example.proto \
+ --include_imports \
+ --descriptor_set_out=testing/example.protoset
+
+protoc testing/jsonpb_test_proto/test_objects.proto \
+ --go_out=paths=source_relative:.
+
+echo "Creating certs for TLS testing..."
+if ! hash certstrap 2>/dev/null; then
+ # certstrap not found: try to install it
+ go get github.com/square/certstrap
+ go install github.com/square/certstrap
+fi
+
+function cs() {
+ certstrap --depot-path testing/tls "$@" --passphrase ""
+}
+
+rm -rf testing/tls
+
+# Create CA
+cs init --years 10 --common-name ca
+
+# Create client cert
+cs request-cert --common-name client
+cs sign client --years 10 --CA ca
+
+# Create server cert
+cs request-cert --common-name server --ip 127.0.0.1 --domain localhost
+cs sign server --years 10 --CA ca
+
+# Create another server cert for error testing
+cs request-cert --common-name other --ip 1.2.3.4 --domain foobar.com
+cs sign other --years 10 --CA ca
+
+# Create another CA and client cert for more
+# error testing
+cs init --years 10 --common-name wrong-ca
+cs request-cert --common-name wrong-client
+cs sign wrong-client --years 10 --CA wrong-ca
+
+# Create expired cert
+cs request-cert --common-name expired --ip 127.0.0.1 --domain localhost
+cs sign expired --years 0 --CA ca