From a0de68067e3077481d526ef42bbb1c5708b044c8 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 11:15:51 -0600 Subject: docs: add notes on SpiceDB --- share/man/spicedb/README.md | 91 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 share/man/spicedb/README.md diff --git a/share/man/spicedb/README.md b/share/man/spicedb/README.md new file mode 100644 index 00000000..bd6d7798 --- /dev/null +++ b/share/man/spicedb/README.md @@ -0,0 +1,91 @@ +# Spice DB + +SpiceDB is a re-implementation of the [Google Zanzibar][1]. + +## Components + +* `zed`: Command line client +* `spicedb`: The Server + +### zed + +```bash +モ zed --help +A command-line client for managing SpiceDB clusters. + +Usage: + zed [command] + +Available Commands: + backup Create, restore, and inspect permissions system backups + completion Generate the autocompletion script for the specified shell + context Manage configurations for connecting to SpiceDB deployments + help Help about any command + import Imports schema and relationships from a file or url + permission Query the permissions in a permissions system + preview Experimental commands that have been made available for preview + relationship Query and mutate the relationships in a permissions system + schema Manage schema for a permissions system + use Alias for `zed context use` + validate Validates the given validation file (.yaml, .zaml) or schema file (.zed) + version Display zed and SpiceDB version information + +Flags: + --certificate-path string path to certificate authority used to verify secure connections + --endpoint string spicedb gRPC API endpoint + -h, --help help for zed + --hostname-override string override the hostname used in the connection to the endpoint + --insecure connect over a plaintext connection + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --max-message-size int maximum size *in bytes* (defaults to 4_194_304 bytes ~= 4MB) of a gRPC message that can be sent or received by zed + --max-retries uint maximum number of sequential retries to attempt when a request fails (default 10) + --no-verify-ca do not attempt to verify the server's certificate chain and host name + --permissions-system string permissions system to query + --proxy string specify a SOCKS5 proxy address + --request-id string optional id to send along with SpiceDB requests for tracing + --skip-version-check if true, no version check is performed against the server + --token string token used to authenticate to SpiceDB + +Use "zed [command] --help" for more information about a command. +``` + + +### server + +```bash +モ spicedb --help +A database that stores, computes, and validates application permissions + +Usage: + spicedb [command] + +Examples: + No TLS and in-memory: + spicedb serve --grpc-preshared-key "somerandomkeyhere" + + TLS and a real datastore: + spicedb serve --grpc-preshared-key "realkeyhere" --grpc-tls-cert-path path/to/tls/cert --grpc-tls-key-path path/to/tls/key \ + --http-tls-cert-path path/to/tls/cert --http-tls-key-path path/to/tls/key \ + --datastore-engine postgres --datastore-conn-uri "postgres-connection-string-here" + + +Available Commands: + completion Generate the autocompletion script for the specified shell + datastore datastore operations + help Help about any command + lsp serve language server protocol + serve serve the permissions database + serve-testing test server with an in-memory datastore + version displays the version of SpiceDB + +Flags: + -h, --help help for spicedb + --log-format string format of logs ("auto", "console", "json") (default "auto") + --log-level string verbosity of logging ("trace", "debug", "info", "warn", "error") (default "info") + --skip-release-check if true, skips checking for new SpiceDB releases + +Use "spicedb [command] --help" for more information about a command. +``` + +[1]: https://authzed.com/blog/what-is-google-zanzibar -- cgit v1.2.3 From fa92e7d1b3a61deb3d16db2f6546677040e395cd Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 11:34:18 -0600 Subject: chore: add make targets to setup spice schema --- Makefile | 10 ++++++++++ etc/authzd/spice.schema | 7 +++++++ 2 files changed, 17 insertions(+) create mode 100644 etc/authzd/spice.schema diff --git a/Makefile b/Makefile index 82d6f5a7..35886d64 100644 --- a/Makefile +++ b/Makefile @@ -78,3 +78,13 @@ production-entities: $(AUTHZD_BIN) check-gitlab-token @$(AUTHZD_BIN) generate --project gitlab-org/gitlab --output etc/authzd/gitlab.com/gitlab-org/gitlab/entities.json @$(AUTHZD_BIN) generate --project gitlab-org/software-supply-chain-security/authorization/authzd --output etc/authzd/gitlab.com/gitlab-org/software-supply-chain-security/authorization/authzd/entities.json @$(AUTHZD_BIN) generate --project gitlab-org/software-supply-chain-security/authorization/sparkled --output etc/authzd/gitlab.com/gitlab-org/software-supply-chain-security/authorization/sparkled/entities.json + +# spice target +run-spicedb: + @spicedb serve --grpc-preshared-key "secret" + +run-spice-schema-load: + @zed --endpoint ":50051" --token "secret" --insecure schema write etc/authzd/spice.schema + +run-spice-schema-read: + @zed --endpoint ":50051" --token "secret" --insecure schema read diff --git a/etc/authzd/spice.schema b/etc/authzd/spice.schema new file mode 100644 index 00000000..74d0a7d6 --- /dev/null +++ b/etc/authzd/spice.schema @@ -0,0 +1,7 @@ +definition user {} +definition post { + relation reader: user + relation writer: user + permission read = reader + writer + permission write = writer +} -- cgit v1.2.3 From 30ffd692e3749e38f9ab05d04a15d0dcdf186610 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 11:45:00 -0600 Subject: chore: rework examples to use project and gitlab roles --- Makefile | 12 +++++++++--- etc/authzd/spice.schema | 10 +++++----- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 35886d64..8eeadc86 100644 --- a/Makefile +++ b/Makefile @@ -83,8 +83,14 @@ production-entities: $(AUTHZD_BIN) check-gitlab-token run-spicedb: @spicedb serve --grpc-preshared-key "secret" -run-spice-schema-load: +run-spicedb-setup: @zed --endpoint ":50051" --token "secret" --insecure schema write etc/authzd/spice.schema - -run-spice-schema-read: @zed --endpoint ":50051" --token "secret" --insecure schema read + @zed --endpoint ":50051" --token "secret" --insecure relationship create project:1 maintainer user:mokhax + @zed --endpoint ":50051" --token "secret" --insecure relationship create project:1 developer user:tanuki + +run-spicedb-permission-check: + @zed --endpoint ":50051" --token "secret" --insecure permission check project:1 read user:mokhax + @zed --endpoint ":50051" --token "secret" --insecure permission check project:1 write user:mokhax + @zed --endpoint ":50051" --token "secret" --insecure permission check project:1 read user:tanuki + @zed --endpoint ":50051" --token "secret" --insecure permission check project:1 write user:tanuki diff --git a/etc/authzd/spice.schema b/etc/authzd/spice.schema index 74d0a7d6..0d6a6482 100644 --- a/etc/authzd/spice.schema +++ b/etc/authzd/spice.schema @@ -1,7 +1,7 @@ definition user {} -definition post { - relation reader: user - relation writer: user - permission read = reader + writer - permission write = writer +definition project { + relation developer: user + relation maintainer: user + permission read = developer + maintainer + permission write = maintainer } -- cgit v1.2.3 From 5c9e5b297cdf5d761740e6604b1e36d390aa86a8 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 12:46:58 -0600 Subject: chore: run spicedb via Procfile --- Makefile | 5 +---- Procfile | 1 + 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 8eeadc86..8cc74a9b 100644 --- a/Makefile +++ b/Makefile @@ -79,10 +79,7 @@ production-entities: $(AUTHZD_BIN) check-gitlab-token @$(AUTHZD_BIN) generate --project gitlab-org/software-supply-chain-security/authorization/authzd --output etc/authzd/gitlab.com/gitlab-org/software-supply-chain-security/authorization/authzd/entities.json @$(AUTHZD_BIN) generate --project gitlab-org/software-supply-chain-security/authorization/sparkled --output etc/authzd/gitlab.com/gitlab-org/software-supply-chain-security/authorization/sparkled/entities.json -# spice target -run-spicedb: - @spicedb serve --grpc-preshared-key "secret" - +# spice targets run-spicedb-setup: @zed --endpoint ":50051" --token "secret" --insecure schema write etc/authzd/spice.schema @zed --endpoint ":50051" --token "secret" --insecure schema read diff --git a/Procfile b/Procfile index 39bbbeaf..02173f12 100644 --- a/Procfile +++ b/Procfile @@ -1,2 +1,3 @@ authzd: ./bin/authzd server envoy: ./bin/envoy -c ./etc/envoy/envoy.yaml --base-id 1 --log-level warn --component-log-level admin:warn,connection:warn,grpc:warn,http:warn,http2:warn,router:warn,upstream:warn +spicedb: spicedb serve --grpc-preshared-key "secret" -- cgit v1.2.3 From 8a16f4388896e3be4e9994d82ca002156b2381de Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 13:56:11 -0600 Subject: chore: install spicedb binary in docker image --- Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Dockerfile b/Dockerfile index c088574e..3bfeb657 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,6 +20,9 @@ RUN apt-get update && apt-get install -y wget && \ wget -O /usr/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.2.5/dumb-init_1.2.5_x86_64 && \ chmod +x /usr/bin/dumb-init +# Build stage for getting SpiceDB binary +FROM ghcr.io/authzed/spicedb:latest AS spicedb-binary + # Final stage FROM gcr.io/distroless/base-debian12:nonroot EXPOSE 20000 @@ -31,6 +34,7 @@ COPY --from=authzd-builder /app/target/x86_64-unknown-linux-musl/release/authzd COPY --from=envoy-binary /usr/local/bin/envoy /bin/envoy COPY --from=minit-builder /go/bin/minit /bin/minit COPY --from=dumb-init-builder /usr/bin/dumb-init /usr/bin/dumb-init +COPY --from=spicedb-binary /usr/local/bin/spicedb /bin/spicedb ENTRYPOINT ["/usr/bin/dumb-init", "--"] CMD ["/bin/minit"] -- cgit v1.2.3 From 66ad68976185bba6953790c1587ef52a1b456431 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 13:57:21 -0600 Subject: chore: specify spicedb flags --- Procfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Procfile b/Procfile index 02173f12..6483045c 100644 --- a/Procfile +++ b/Procfile @@ -1,3 +1,3 @@ authzd: ./bin/authzd server envoy: ./bin/envoy -c ./etc/envoy/envoy.yaml --base-id 1 --log-level warn --component-log-level admin:warn,connection:warn,grpc:warn,http:warn,http2:warn,router:warn,upstream:warn -spicedb: spicedb serve --grpc-preshared-key "secret" +spicedb: spicedb serve --grpc-preshared-key "secret" --http-addr :8080 --grpc-addr :50051 --datastore-engine memory -- cgit v1.2.3 From 2e6d541b2d182f3750dd7033d0e60b849b3e23f8 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 14:02:17 -0600 Subject: chore: run authzd on 50052 instead of 50051 to prevent port collision --- Procfile | 2 +- etc/envoy/envoy.yaml | 2 +- src/bin/cli.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Procfile b/Procfile index 6483045c..4db49ea7 100644 --- a/Procfile +++ b/Procfile @@ -1,3 +1,3 @@ -authzd: ./bin/authzd server +authzd: ./bin/authzd server --addr 127.0.0.1:50052 envoy: ./bin/envoy -c ./etc/envoy/envoy.yaml --base-id 1 --log-level warn --component-log-level admin:warn,connection:warn,grpc:warn,http:warn,http2:warn,router:warn,upstream:warn spicedb: spicedb serve --grpc-preshared-key "secret" --http-addr :8080 --grpc-addr :50051 --datastore-engine memory diff --git a/etc/envoy/envoy.yaml b/etc/envoy/envoy.yaml index 19df6a4f..9594c9e4 100644 --- a/etc/envoy/envoy.yaml +++ b/etc/envoy/envoy.yaml @@ -34,7 +34,7 @@ static_resources: address: socket_address: address: 127.0.0.1 - port_value: 50051 + port_value: 50052 typed_extension_protocol_options: envoy.extensions.upstreams.http.v3.HttpProtocolOptions: "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions diff --git a/src/bin/cli.rs b/src/bin/cli.rs index 837ef80f..78aa1ba1 100644 --- a/src/bin/cli.rs +++ b/src/bin/cli.rs @@ -40,7 +40,7 @@ enum Commands { }, Server { /// Address to bind to - #[arg(short, long, env = "BIND_ADDR", default_value = "127.0.0.1:50051")] + #[arg(short, long, env = "BIND_ADDR", default_value = "127.0.0.1:50052")] addr: String, }, } -- cgit v1.2.3 From d3b876c7181731a8596d58750d1c2046bad8e8a5 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 14:23:07 -0600 Subject: chore: update envoy to proxy requests directly to spicedb --- etc/envoy/envoy.yaml | 42 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/etc/envoy/envoy.yaml b/etc/envoy/envoy.yaml index 9594c9e4..bfe2ce16 100644 --- a/etc/envoy/envoy.yaml +++ b/etc/envoy/envoy.yaml @@ -53,6 +53,37 @@ static_resources: max_pending_requests: 1024 max_requests: 1024 max_retries: 3 + - name: spicedb + connect_timeout: 5s + type: STATIC + lb_policy: ROUND_ROBIN + load_assignment: + cluster_name: spicedb + endpoints: + - lb_endpoints: + - endpoint: + address: + socket_address: + address: 127.0.0.1 + port_value: 50051 + typed_extension_protocol_options: + envoy.extensions.upstreams.http.v3.HttpProtocolOptions: + "@type": type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions + explicit_http_config: + http2_protocol_options: {} + health_checks: + - timeout: 3s + interval: 5s + unhealthy_threshold: 2 + healthy_threshold: 2 + grpc_health_check: {} + circuit_breakers: + thresholds: + - priority: DEFAULT + max_connections: 1024 + max_pending_requests: 1024 + max_requests: 1024 + max_retries: 3 listeners: - name: main_listener address: @@ -120,14 +151,21 @@ static_resources: key: "x-xss-protection" value: "1; mode=block" virtual_hosts: - - name: backend + - name: grpc_services domains: ["*"] routes: + # Route ext_authz to authzd - match: - prefix: "/" + prefix: "/envoy.service.auth.v3.Authorization/" route: cluster: authzd timeout: 30s + # Default route - everything else goes to SpiceDB + - match: + prefix: "/" + route: + cluster: spicedb + timeout: 30s retry_policy: retry_on: "5xx,reset,connect-failure,retriable-status-codes" num_retries: 3 -- cgit v1.2.3 From d90cc880aedbcea0f8678ed0c906a51a56c749e5 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 14:24:43 -0600 Subject: docs: update README to include link to install spicedb --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 065aa7ef..6de81523 100644 --- a/README.md +++ b/README.md @@ -45,8 +45,9 @@ It integrates with an identity provider (IdP) and uses message queues to stay in ### Prerequisites -- [mise](https://mise.jdx.dev/) - [make](https://www.gnu.org/software/make/) +- [mise](https://mise.jdx.dev/) +- [spicedb](https://authzed.com/docs/spicedb/getting-started/installing-spicedb) 1. Install tools: -- cgit v1.2.3 From debf8403c595c98213bf17913824b081262c15e2 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 14:32:53 -0600 Subject: chore: handle health checks in envoy rather than authzd --- Cargo.toml | 2 -- etc/envoy/envoy.yaml | 3 +++ src/authorization/server.rs | 16 +--------------- 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c99f5625..0a3f3483 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,8 +22,6 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tokio = { version = "1.0.0", features = ["macros", "rt-multi-thread"] } tonic = "0.13.1" -tonic-health = "0.13.1" -tonic-reflection = "0.13.1" tracing = "0.1" tracing-subscriber = { version = "0.3", features = ["json"] } urlencoding = "2.1" diff --git a/etc/envoy/envoy.yaml b/etc/envoy/envoy.yaml index bfe2ce16..62f8345b 100644 --- a/etc/envoy/envoy.yaml +++ b/etc/envoy/envoy.yaml @@ -131,6 +131,9 @@ static_resources: - name: ":path" string_match: exact: "/health" + cluster_min_healthy_percentages: + authzd: 100.0 + spicedb: 100.0 - name: envoy.filters.http.router typed_config: "@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router diff --git a/src/authorization/server.rs b/src/authorization/server.rs index 90d3edf6..31bf2af8 100644 --- a/src/authorization/server.rs +++ b/src/authorization/server.rs @@ -9,25 +9,11 @@ pub struct Server { impl Server { pub fn new(authorizer: T) -> Result> { - let (health_reporter, health_service) = tonic_health::server::health_reporter(); - std::mem::drop( - health_reporter.set_service_status("", tonic_health::ServingStatus::Serving), - ); let authorization_service = AuthorizationServer::new(CheckService::new(Arc::new(authorizer))); - let reflection_service = tonic_reflection::server::Builder::configure() - .register_encoded_file_descriptor_set(tonic_health::pb::FILE_DESCRIPTOR_SET) - .register_encoded_file_descriptor_set(include_bytes!( - "../../vendor/envoy-types/src/generated/types.bin" - )) - .build_v1()?; - Ok(Self::new_with(|mut builder| { - builder - .add_service(authorization_service) - .add_service(health_service) - .add_service(reflection_service) + builder.add_service(authorization_service) })) } -- cgit v1.2.3 From 73e7880ca39983a386955b783e1a73fd947b2534 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 14:39:05 -0600 Subject: test: update integration test to test ext_authz service --- Cargo.lock | 39 +------------------------------------- tests/authorization/server_test.rs | 18 ++++++++---------- 2 files changed, 9 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d9f7eca9..63ea22e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -168,8 +168,6 @@ dependencies = [ "tokio-test", "tonic", "tonic-build", - "tonic-health", - "tonic-reflection", "tracing", "tracing-subscriber", "urlencoding", @@ -1697,7 +1695,7 @@ dependencies = [ "petgraph 0.6.5", "prettyplease", "prost 0.12.6", - "prost-types 0.12.6", + "prost-types", "regex", "syn", "tempfile", @@ -1738,15 +1736,6 @@ dependencies = [ "prost 0.12.6", ] -[[package]] -name = "prost-types" -version = "0.13.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" -dependencies = [ - "prost 0.13.5", -] - [[package]] name = "psm" version = "0.1.26" @@ -2428,7 +2417,6 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", - "tokio-util", ] [[package]] @@ -2499,31 +2487,6 @@ dependencies = [ "syn", ] -[[package]] -name = "tonic-health" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb87334d340313fefa513b6e60794d44a86d5f039b523229c99c323e4e19ca4b" -dependencies = [ - "prost 0.13.5", - "tokio", - "tokio-stream", - "tonic", -] - -[[package]] -name = "tonic-reflection" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9687bd5bfeafebdded2356950f278bba8226f0b32109537c4253406e09aafe1" -dependencies = [ - "prost 0.13.5", - "prost-types 0.13.5", - "tokio", - "tokio-stream", - "tonic", -] - [[package]] name = "tower" version = "0.5.2" diff --git a/tests/authorization/server_test.rs b/tests/authorization/server_test.rs index fe8c8a73..5a92dcff 100644 --- a/tests/authorization/server_test.rs +++ b/tests/authorization/server_test.rs @@ -27,21 +27,19 @@ mod tests { } #[tokio::test] - async fn test_health_check_service() { + async fn test_health_ext_authz_service() { let (addr, server) = start_server().await; - let mut client = - build_rpc_client(addr, tonic_health::pb::health_client::HealthClient::new).await; - let request = tonic::Request::new(tonic_health::pb::HealthCheckRequest { - service: String::new(), - }); + let mut client = build_rpc_client( + addr, + envoy_types::pb::envoy::service::auth::v3::authorization_client::AuthorizationClient::new, + ) + .await; + + let request = tonic::Request::new(envoy_types::ext_authz::v3::pb::CheckRequest::default()); let response = client.check(request).await; assert!(response.is_ok()); - assert_eq!( - response.unwrap().into_inner().status(), - tonic_health::pb::health_check_response::ServingStatus::Serving - ); server.abort(); } -- cgit v1.2.3 From 24d5c2cfbec143a47f1128472389e09e54435f27 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 14:40:51 -0600 Subject: chore: add shell script to shim request to spicedb --- Procfile | 2 +- bin/spicedb | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100755 bin/spicedb diff --git a/Procfile b/Procfile index 4db49ea7..923aa187 100644 --- a/Procfile +++ b/Procfile @@ -1,3 +1,3 @@ authzd: ./bin/authzd server --addr 127.0.0.1:50052 envoy: ./bin/envoy -c ./etc/envoy/envoy.yaml --base-id 1 --log-level warn --component-log-level admin:warn,connection:warn,grpc:warn,http:warn,http2:warn,router:warn,upstream:warn -spicedb: spicedb serve --grpc-preshared-key "secret" --http-addr :8080 --grpc-addr :50051 --datastore-engine memory +spicedb: ./bin/spicedb serve --grpc-preshared-key "secret" --http-addr :8080 --grpc-addr :50051 --datastore-engine memory diff --git a/bin/spicedb b/bin/spicedb new file mode 100755 index 00000000..acd1ee09 --- /dev/null +++ b/bin/spicedb @@ -0,0 +1,3 @@ +#!/bin/sh + +exec spiced $@ -- cgit v1.2.3 From 34fa928a67dd25e55e8ee292e0b10635b49b73bd Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 14:44:34 -0600 Subject: chore: provide error message when spicedb is not installed --- bin/spicedb | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/bin/spicedb b/bin/spicedb index acd1ee09..6c43a153 100755 --- a/bin/spicedb +++ b/bin/spicedb @@ -1,3 +1,8 @@ #!/bin/sh -exec spiced $@ +if ! command -v spicedb >/dev/null 2>&1; then + echo "Install spicedb: https://authzed.com/docs/spicedb/getting-started/installing-spicedb" + exit 1 +fi + +exec spicedb "$@" -- cgit v1.2.3 From 0cdde3e8b16d8a9b03ac8e5ba1214508fc8931b4 Mon Sep 17 00:00:00 2001 From: mo khan Date: Mon, 14 Jul 2025 14:44:51 -0600 Subject: chore: removed vendored code for removed crates --- vendor/prost-types-0.12.6/.cargo-checksum.json | 1 - vendor/prost-types-0.12.6/Cargo.toml | 42 - vendor/prost-types-0.12.6/LICENSE | 201 -- vendor/prost-types-0.12.6/README.md | 21 - vendor/prost-types-0.12.6/src/any.rs | 69 - vendor/prost-types-0.12.6/src/compiler.rs | 174 -- vendor/prost-types-0.12.6/src/datetime.rs | 864 -------- vendor/prost-types-0.12.6/src/duration.rs | 333 --- vendor/prost-types-0.12.6/src/lib.rs | 55 - vendor/prost-types-0.12.6/src/protobuf.rs | 2309 -------------------- vendor/prost-types-0.12.6/src/timestamp.rs | 416 ---- vendor/prost-types-0.12.6/src/type_url.rs | 70 - vendor/prost-types/.cargo-checksum.json | 2 +- vendor/prost-types/Cargo.lock | 471 ---- vendor/prost-types/Cargo.toml | 32 +- vendor/prost-types/README.md | 2 +- vendor/prost-types/src/compiler.rs | 13 +- vendor/prost-types/src/conversions.rs | 62 - vendor/prost-types/src/datetime.rs | 205 +- vendor/prost-types/src/duration.rs | 228 +- vendor/prost-types/src/lib.rs | 33 +- vendor/prost-types/src/protobuf.rs | 224 +- vendor/prost-types/src/timestamp.rs | 81 +- vendor/tonic-health/.cargo-checksum.json | 1 - vendor/tonic-health/Cargo.lock | 474 ---- vendor/tonic-health/Cargo.toml | 97 - vendor/tonic-health/LICENSE | 19 - vendor/tonic-health/README.md | 14 - vendor/tonic-health/proto/health.proto | 63 - .../tonic-health/src/generated/grpc_health_v1.rs | 459 ---- .../src/generated/grpc_health_v1_fds.rs | 63 - vendor/tonic-health/src/lib.rs | 76 - vendor/tonic-health/src/server.rs | 353 --- vendor/tonic-reflection/.cargo-checksum.json | 1 - vendor/tonic-reflection/Cargo.lock | 728 ------ vendor/tonic-reflection/Cargo.toml | 128 -- vendor/tonic-reflection/LICENSE | 19 - vendor/tonic-reflection/README.md | 3 - vendor/tonic-reflection/proto/reflection_v1.proto | 147 -- .../proto/reflection_v1alpha.proto | 136 -- .../src/generated/grpc_reflection_v1.rs | 461 ---- .../src/generated/grpc_reflection_v1alpha.rs | 461 ---- .../src/generated/reflection_v1_fds.rs | 161 -- .../src/generated/reflection_v1alpha1_fds.rs | 153 -- vendor/tonic-reflection/src/lib.rs | 66 - vendor/tonic-reflection/src/server/mod.rs | 326 --- vendor/tonic-reflection/src/server/v1.rs | 138 -- vendor/tonic-reflection/src/server/v1alpha.rs | 138 -- vendor/tonic-reflection/tests/server.rs | 151 -- vendor/tonic-reflection/tests/versions.rs | 172 -- 50 files changed, 236 insertions(+), 10680 deletions(-) delete mode 100644 vendor/prost-types-0.12.6/.cargo-checksum.json delete mode 100644 vendor/prost-types-0.12.6/Cargo.toml delete mode 100644 vendor/prost-types-0.12.6/LICENSE delete mode 100644 vendor/prost-types-0.12.6/README.md delete mode 100644 vendor/prost-types-0.12.6/src/any.rs delete mode 100644 vendor/prost-types-0.12.6/src/compiler.rs delete mode 100644 vendor/prost-types-0.12.6/src/datetime.rs delete mode 100644 vendor/prost-types-0.12.6/src/duration.rs delete mode 100644 vendor/prost-types-0.12.6/src/lib.rs delete mode 100644 vendor/prost-types-0.12.6/src/protobuf.rs delete mode 100644 vendor/prost-types-0.12.6/src/timestamp.rs delete mode 100644 vendor/prost-types-0.12.6/src/type_url.rs delete mode 100644 vendor/prost-types/Cargo.lock delete mode 100644 vendor/prost-types/src/conversions.rs delete mode 100644 vendor/tonic-health/.cargo-checksum.json delete mode 100644 vendor/tonic-health/Cargo.lock delete mode 100644 vendor/tonic-health/Cargo.toml delete mode 100644 vendor/tonic-health/LICENSE delete mode 100644 vendor/tonic-health/README.md delete mode 100644 vendor/tonic-health/proto/health.proto delete mode 100644 vendor/tonic-health/src/generated/grpc_health_v1.rs delete mode 100644 vendor/tonic-health/src/generated/grpc_health_v1_fds.rs delete mode 100644 vendor/tonic-health/src/lib.rs delete mode 100644 vendor/tonic-health/src/server.rs delete mode 100644 vendor/tonic-reflection/.cargo-checksum.json delete mode 100644 vendor/tonic-reflection/Cargo.lock delete mode 100644 vendor/tonic-reflection/Cargo.toml delete mode 100644 vendor/tonic-reflection/LICENSE delete mode 100644 vendor/tonic-reflection/README.md delete mode 100644 vendor/tonic-reflection/proto/reflection_v1.proto delete mode 100644 vendor/tonic-reflection/proto/reflection_v1alpha.proto delete mode 100644 vendor/tonic-reflection/src/generated/grpc_reflection_v1.rs delete mode 100644 vendor/tonic-reflection/src/generated/grpc_reflection_v1alpha.rs delete mode 100644 vendor/tonic-reflection/src/generated/reflection_v1_fds.rs delete mode 100644 vendor/tonic-reflection/src/generated/reflection_v1alpha1_fds.rs delete mode 100644 vendor/tonic-reflection/src/lib.rs delete mode 100644 vendor/tonic-reflection/src/server/mod.rs delete mode 100644 vendor/tonic-reflection/src/server/v1.rs delete mode 100644 vendor/tonic-reflection/src/server/v1alpha.rs delete mode 100644 vendor/tonic-reflection/tests/server.rs delete mode 100644 vendor/tonic-reflection/tests/versions.rs diff --git a/vendor/prost-types-0.12.6/.cargo-checksum.json b/vendor/prost-types-0.12.6/.cargo-checksum.json deleted file mode 100644 index 2cb8563d..00000000 --- a/vendor/prost-types-0.12.6/.cargo-checksum.json +++ /dev/null @@ -1 +0,0 @@ -{"files":{"Cargo.toml":"cadf2579e0a1e10bf59134e5341555e9c8557ccccf2f390e4ef2320bb76de718","LICENSE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","README.md":"05bf3eb034345e386d76f957e8ccdb26960cf5f78c050804b074ef3f01f92477","src/any.rs":"663ad6e55a0e15ace05ab66df21555e5fa81258ca5e9624e3cacb1ec56277b72","src/compiler.rs":"cdeb17a1df6f555c358dbfb0270f2a151ad759cae42be4a66af05b686f517d0f","src/datetime.rs":"df4fd7aee4d6fb5e28850d797cbd490ba9446a2e3fd6bbec015baf8a7ccfe4e4","src/duration.rs":"7378442f6ae52b9799fd114b4c6be6edc1bc41834b1f5b56f98e3c0b7037a6f2","src/lib.rs":"e3c05512b314b7a9b64d302f1a240830553cd1f28629b9ad439591f49935af41","src/protobuf.rs":"5d92f618bb6ad3ac3939a182a4ff8c106c90ec6588054738b0e65caaf1e90e76","src/timestamp.rs":"8eaa6dd53633f2a05839e5e5790da7adcb50ed67fb2ceb5358e2440080492be8","src/type_url.rs":"dc69abaa0ebaaaa58ea81dfba6712bc5be00c35bfff5a3da80b5df0c49c7725f"},"package":"9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0"} \ No newline at end of file diff --git a/vendor/prost-types-0.12.6/Cargo.toml b/vendor/prost-types-0.12.6/Cargo.toml deleted file mode 100644 index 0c71fd73..00000000 --- a/vendor/prost-types-0.12.6/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO -# -# When uploading crates to the registry Cargo will automatically -# "normalize" Cargo.toml files for maximal compatibility -# with all versions of Cargo and also rewrite `path` dependencies -# to registry (e.g., crates.io) dependencies. -# -# If you are reading this file be aware that the original Cargo.toml -# will likely look very different (and much more reasonable). -# See Cargo.toml.orig for the original contents. - -[package] -edition = "2021" -rust-version = "1.70" -name = "prost-types" -version = "0.12.6" -authors = [ - "Dan Burkert ", - "Lucio Franco ", - "Casper Meijn ", - "Tokio Contributors ", -] -description = "Prost definitions of Protocol Buffers well known types." -documentation = "https://docs.rs/prost-types" -readme = "README.md" -license = "Apache-2.0" -repository = "https://github.com/tokio-rs/prost" - -[lib] -doctest = false - -[dependencies.prost] -version = "0.12.6" -features = ["prost-derive"] -default-features = false - -[dev-dependencies.proptest] -version = "1" - -[features] -default = ["std"] -std = ["prost/std"] diff --git a/vendor/prost-types-0.12.6/LICENSE b/vendor/prost-types-0.12.6/LICENSE deleted file mode 100644 index 16fe87b0..00000000 --- a/vendor/prost-types-0.12.6/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/prost-types-0.12.6/README.md b/vendor/prost-types-0.12.6/README.md deleted file mode 100644 index 8724577b..00000000 --- a/vendor/prost-types-0.12.6/README.md +++ /dev/null @@ -1,21 +0,0 @@ -[![Documentation](https://docs.rs/prost-types/badge.svg)](https://docs.rs/prost-types/) -[![Crate](https://img.shields.io/crates/v/prost-types.svg)](https://crates.io/crates/prost-types) - -# `prost-types` - -Prost definitions of Protocol Buffers well known types. See the [Protobuf reference][1] for more -information about well known types. - -[1]: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf - -## License - -`prost-types` is distributed under the terms of the Apache License (Version 2.0). -`prost-types` includes code imported from the Protocol Buffers projet, which is -included under its original ([BSD][2]) license. - -[2]: https://github.com/google/protobuf/blob/master/LICENSE - -See [LICENSE](..LICENSE) for details. - -Copyright 2017 Dan Burkert diff --git a/vendor/prost-types-0.12.6/src/any.rs b/vendor/prost-types-0.12.6/src/any.rs deleted file mode 100644 index af3e0e4d..00000000 --- a/vendor/prost-types-0.12.6/src/any.rs +++ /dev/null @@ -1,69 +0,0 @@ -use super::*; - -impl Any { - /// Serialize the given message type `M` as [`Any`]. - pub fn from_msg(msg: &M) -> Result - where - M: Name, - { - let type_url = M::type_url(); - let mut value = Vec::new(); - Message::encode(msg, &mut value)?; - Ok(Any { type_url, value }) - } - - /// Decode the given message type `M` from [`Any`], validating that it has - /// the expected type URL. - pub fn to_msg(&self) -> Result - where - M: Default + Name + Sized, - { - let expected_type_url = M::type_url(); - - if let (Some(expected), Some(actual)) = ( - TypeUrl::new(&expected_type_url), - TypeUrl::new(&self.type_url), - ) { - if expected == actual { - return M::decode(self.value.as_slice()); - } - } - - let mut err = DecodeError::new(format!( - "expected type URL: \"{}\" (got: \"{}\")", - expected_type_url, &self.type_url - )); - err.push("unexpected type URL", "type_url"); - Err(err) - } -} - -impl Name for Any { - const PACKAGE: &'static str = PACKAGE; - const NAME: &'static str = "Any"; - - fn type_url() -> String { - type_url_for::() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn check_any_serialization() { - let message = Timestamp::date(2000, 1, 1).unwrap(); - let any = Any::from_msg(&message).unwrap(); - assert_eq!( - &any.type_url, - "type.googleapis.com/google.protobuf.Timestamp" - ); - - let message2 = any.to_msg::().unwrap(); - assert_eq!(message, message2); - - // Wrong type URL - assert!(any.to_msg::().is_err()); - } -} diff --git a/vendor/prost-types-0.12.6/src/compiler.rs b/vendor/prost-types-0.12.6/src/compiler.rs deleted file mode 100644 index 0a3b4680..00000000 --- a/vendor/prost-types-0.12.6/src/compiler.rs +++ /dev/null @@ -1,174 +0,0 @@ -// This file is @generated by prost-build. -/// The version number of protocol compiler. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Version { - #[prost(int32, optional, tag = "1")] - pub major: ::core::option::Option, - #[prost(int32, optional, tag = "2")] - pub minor: ::core::option::Option, - #[prost(int32, optional, tag = "3")] - pub patch: ::core::option::Option, - /// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - /// be empty for mainline stable releases. - #[prost(string, optional, tag = "4")] - pub suffix: ::core::option::Option<::prost::alloc::string::String>, -} -/// An encoded CodeGeneratorRequest is written to the plugin's stdin. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CodeGeneratorRequest { - /// The .proto files that were explicitly listed on the command-line. The - /// code generator should generate code only for these files. Each file's - /// descriptor will be included in proto_file, below. - #[prost(string, repeated, tag = "1")] - pub file_to_generate: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The generator parameter passed on the command-line. - #[prost(string, optional, tag = "2")] - pub parameter: ::core::option::Option<::prost::alloc::string::String>, - /// FileDescriptorProtos for all files in files_to_generate and everything - /// they import. The files will appear in topological order, so each file - /// appears before any file that imports it. - /// - /// protoc guarantees that all proto_files will be written after - /// the fields above, even though this is not technically guaranteed by the - /// protobuf wire format. This theoretically could allow a plugin to stream - /// in the FileDescriptorProtos and handle them one by one rather than read - /// the entire set into memory at once. However, as of this writing, this - /// is not similarly optimized on protoc's end -- it will store all fields in - /// memory at once before sending them to the plugin. - /// - /// Type names of fields and extensions in the FileDescriptorProto are always - /// fully qualified. - #[prost(message, repeated, tag = "15")] - pub proto_file: ::prost::alloc::vec::Vec, - /// The version number of protocol compiler. - #[prost(message, optional, tag = "3")] - pub compiler_version: ::core::option::Option, -} -/// The plugin writes an encoded CodeGeneratorResponse to stdout. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CodeGeneratorResponse { - /// Error message. If non-empty, code generation failed. The plugin process - /// should exit with status code zero even if it reports an error in this way. - /// - /// This should be used to indicate errors in .proto files which prevent the - /// code generator from generating correct code. Errors which indicate a - /// problem in protoc itself -- such as the input CodeGeneratorRequest being - /// unparseable -- should be reported by writing a message to stderr and - /// exiting with a non-zero status code. - #[prost(string, optional, tag = "1")] - pub error: ::core::option::Option<::prost::alloc::string::String>, - /// A bitmask of supported features that the code generator supports. - /// This is a bitwise "or" of values from the Feature enum. - #[prost(uint64, optional, tag = "2")] - pub supported_features: ::core::option::Option, - #[prost(message, repeated, tag = "15")] - pub file: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `CodeGeneratorResponse`. -pub mod code_generator_response { - /// Represents a single generated file. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct File { - /// The file name, relative to the output directory. The name must not - /// contain "." or ".." components and must be relative, not be absolute (so, - /// the file cannot lie outside the output directory). "/" must be used as - /// the path separator, not "". - /// - /// If the name is omitted, the content will be appended to the previous - /// file. This allows the generator to break large files into small chunks, - /// and allows the generated text to be streamed back to protoc so that large - /// files need not reside completely in memory at one time. Note that as of - /// this writing protoc does not optimize for this -- it will read the entire - /// CodeGeneratorResponse before writing files to disk. - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - /// If non-empty, indicates that the named file should already exist, and the - /// content here is to be inserted into that file at a defined insertion - /// point. This feature allows a code generator to extend the output - /// produced by another code generator. The original generator may provide - /// insertion points by placing special annotations in the file that look - /// like: - /// @@protoc_insertion_point(NAME) - /// The annotation can have arbitrary text before and after it on the line, - /// which allows it to be placed in a comment. NAME should be replaced with - /// an identifier naming the point -- this is what other generators will use - /// as the insertion_point. Code inserted at this point will be placed - /// immediately above the line containing the insertion point (thus multiple - /// insertions to the same point will come out in the order they were added). - /// The double-@ is intended to make it unlikely that the generated code - /// could contain things that look like insertion points by accident. - /// - /// For example, the C++ code generator places the following line in the - /// .pb.h files that it generates: - /// // @@protoc_insertion_point(namespace_scope) - /// This line appears within the scope of the file's package namespace, but - /// outside of any particular class. Another plugin can then specify the - /// insertion_point "namespace_scope" to generate additional classes or - /// other declarations that should be placed in this scope. - /// - /// Note that if the line containing the insertion point begins with - /// whitespace, the same whitespace will be added to every line of the - /// inserted text. This is useful for languages like Python, where - /// indentation matters. In these languages, the insertion point comment - /// should be indented the same amount as any inserted code will need to be - /// in order to work correctly in that context. - /// - /// The code generator that generates the initial file and the one which - /// inserts into it must both run as part of a single invocation of protoc. - /// Code generators are executed in the order in which they appear on the - /// command line. - /// - /// If |insertion_point| is present, |name| must also be present. - #[prost(string, optional, tag = "2")] - pub insertion_point: ::core::option::Option<::prost::alloc::string::String>, - /// The file contents. - #[prost(string, optional, tag = "15")] - pub content: ::core::option::Option<::prost::alloc::string::String>, - /// Information describing the file content being inserted. If an insertion - /// point is used, this information will be appropriately offset and inserted - /// into the code generation metadata for the generated files. - #[prost(message, optional, tag = "16")] - pub generated_code_info: ::core::option::Option, - } - /// Sync with code_generator.h. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Feature { - None = 0, - Proto3Optional = 1, - } - impl Feature { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Feature::None => "FEATURE_NONE", - Feature::Proto3Optional => "FEATURE_PROTO3_OPTIONAL", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "FEATURE_NONE" => Some(Self::None), - "FEATURE_PROTO3_OPTIONAL" => Some(Self::Proto3Optional), - _ => None, - } - } - } -} diff --git a/vendor/prost-types-0.12.6/src/datetime.rs b/vendor/prost-types-0.12.6/src/datetime.rs deleted file mode 100644 index 2435ffe7..00000000 --- a/vendor/prost-types-0.12.6/src/datetime.rs +++ /dev/null @@ -1,864 +0,0 @@ -//! A date/time type which exists primarily to convert [`Timestamp`]s into an RFC 3339 formatted -//! string. - -use core::fmt; - -use crate::Duration; -use crate::Timestamp; - -/// A point in time, represented as a date and time in the UTC timezone. -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] -pub(crate) struct DateTime { - /// The year. - pub(crate) year: i64, - /// The month of the year, from 1 to 12, inclusive. - pub(crate) month: u8, - /// The day of the month, from 1 to 31, inclusive. - pub(crate) day: u8, - /// The hour of the day, from 0 to 23, inclusive. - pub(crate) hour: u8, - /// The minute of the hour, from 0 to 59, inclusive. - pub(crate) minute: u8, - /// The second of the minute, from 0 to 59, inclusive. - pub(crate) second: u8, - /// The nanoseconds, from 0 to 999_999_999, inclusive. - pub(crate) nanos: u32, -} - -impl DateTime { - /// The minimum representable [`Timestamp`] as a `DateTime`. - pub(crate) const MIN: DateTime = DateTime { - year: -292_277_022_657, - month: 1, - day: 27, - hour: 8, - minute: 29, - second: 52, - nanos: 0, - }; - - /// The maximum representable [`Timestamp`] as a `DateTime`. - pub(crate) const MAX: DateTime = DateTime { - year: 292_277_026_596, - month: 12, - day: 4, - hour: 15, - minute: 30, - second: 7, - nanos: 999_999_999, - }; - - /// Returns `true` if the `DateTime` is a valid calendar date. - pub(crate) fn is_valid(&self) -> bool { - self >= &DateTime::MIN - && self <= &DateTime::MAX - && self.month > 0 - && self.month <= 12 - && self.day > 0 - && self.day <= days_in_month(self.year, self.month) - && self.hour < 24 - && self.minute < 60 - && self.second < 60 - && self.nanos < 1_000_000_000 - } -} - -impl fmt::Display for DateTime { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Pad years to at least 4 digits. - if self.year > 9999 { - write!(f, "+{}", self.year)?; - } else if self.year < 0 { - write!(f, "{:05}", self.year)?; - } else { - write!(f, "{:04}", self.year)?; - }; - - write!( - f, - "-{:02}-{:02}T{:02}:{:02}:{:02}", - self.month, self.day, self.hour, self.minute, self.second, - )?; - - // Format subseconds to either nothing, millis, micros, or nanos. - let nanos = self.nanos; - if nanos == 0 { - write!(f, "Z") - } else if nanos % 1_000_000 == 0 { - write!(f, ".{:03}Z", nanos / 1_000_000) - } else if nanos % 1_000 == 0 { - write!(f, ".{:06}Z", nanos / 1_000) - } else { - write!(f, ".{:09}Z", nanos) - } - } -} - -impl From for DateTime { - /// musl's [`__secs_to_tm`][1] converted to Rust via [c2rust][2] and then cleaned up by hand. - /// - /// All existing `strftime`-like APIs in Rust are unable to handle the full range of timestamps - /// representable by `Timestamp`, including `strftime` itself, since tm.tm_year is an int. - /// - /// [1]: http://git.musl-libc.org/cgit/musl/tree/src/time/__secs_to_tm.c - /// [2]: https://c2rust.com/ - fn from(mut timestamp: Timestamp) -> DateTime { - timestamp.normalize(); - - let t = timestamp.seconds; - let nanos = timestamp.nanos; - - // 2000-03-01 (mod 400 year, immediately after feb29 - const LEAPOCH: i64 = 946_684_800 + 86400 * (31 + 29); - const DAYS_PER_400Y: i32 = 365 * 400 + 97; - const DAYS_PER_100Y: i32 = 365 * 100 + 24; - const DAYS_PER_4Y: i32 = 365 * 4 + 1; - const DAYS_IN_MONTH: [u8; 12] = [31, 30, 31, 30, 31, 31, 30, 31, 30, 31, 31, 29]; - - // Note(dcb): this bit is rearranged slightly to avoid integer overflow. - let mut days: i64 = (t / 86_400) - (LEAPOCH / 86_400); - let mut remsecs: i32 = (t % 86_400) as i32; - if remsecs < 0i32 { - remsecs += 86_400; - days -= 1 - } - - let mut qc_cycles: i32 = (days / i64::from(DAYS_PER_400Y)) as i32; - let mut remdays: i32 = (days % i64::from(DAYS_PER_400Y)) as i32; - if remdays < 0 { - remdays += DAYS_PER_400Y; - qc_cycles -= 1; - } - - let mut c_cycles: i32 = remdays / DAYS_PER_100Y; - if c_cycles == 4 { - c_cycles -= 1; - } - remdays -= c_cycles * DAYS_PER_100Y; - - let mut q_cycles: i32 = remdays / DAYS_PER_4Y; - if q_cycles == 25 { - q_cycles -= 1; - } - remdays -= q_cycles * DAYS_PER_4Y; - - let mut remyears: i32 = remdays / 365; - if remyears == 4 { - remyears -= 1; - } - remdays -= remyears * 365; - - let mut years: i64 = i64::from(remyears) - + 4 * i64::from(q_cycles) - + 100 * i64::from(c_cycles) - + 400 * i64::from(qc_cycles); - - let mut months: i32 = 0; - while i32::from(DAYS_IN_MONTH[months as usize]) <= remdays { - remdays -= i32::from(DAYS_IN_MONTH[months as usize]); - months += 1 - } - - if months >= 10 { - months -= 12; - years += 1; - } - - let date_time = DateTime { - year: years + 2000, - month: (months + 3) as u8, - day: (remdays + 1) as u8, - hour: (remsecs / 3600) as u8, - minute: (remsecs / 60 % 60) as u8, - second: (remsecs % 60) as u8, - nanos: nanos as u32, - }; - debug_assert!(date_time.is_valid()); - date_time - } -} - -/// Returns the number of days in the month. -fn days_in_month(year: i64, month: u8) -> u8 { - const DAYS_IN_MONTH: [u8; 12] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]; - let (_, is_leap) = year_to_seconds(year); - DAYS_IN_MONTH[usize::from(month - 1)] + u8::from(is_leap && month == 2) -} - -macro_rules! ensure { - ($expr:expr) => {{ - if !$expr { - return None; - } - }}; -} - -/// Parses a date in RFC 3339 format from ASCII string `b`, returning the year, month, day, and -/// remaining input. -/// -/// The date is not validated according to a calendar. -fn parse_date(s: &str) -> Option<(i64, u8, u8, &str)> { - debug_assert!(s.is_ascii()); - - // Smallest valid date is YYYY-MM-DD. - ensure!(s.len() >= 10); - - // Parse the year in one of three formats: - // * +YYYY[Y]+ - // * -[Y]+ - // * YYYY - let (year, s) = match s.as_bytes()[0] { - b'+' => { - let (digits, s) = parse_digits(&s[1..]); - ensure!(digits.len() >= 5); - let date: i64 = digits.parse().ok()?; - (date, s) - } - b'-' => { - let (digits, s) = parse_digits(&s[1..]); - ensure!(digits.len() >= 4); - let date: i64 = digits.parse().ok()?; - (-date, s) - } - _ => { - // Parse a 4 digit numeric. - let (n1, s) = parse_two_digit_numeric(s)?; - let (n2, s) = parse_two_digit_numeric(s)?; - (i64::from(n1) * 100 + i64::from(n2), s) - } - }; - - let s = parse_char(s, b'-')?; - let (month, s) = parse_two_digit_numeric(s)?; - let s = parse_char(s, b'-')?; - let (day, s) = parse_two_digit_numeric(s)?; - Some((year, month, day, s)) -} - -/// Parses a time in RFC 3339 format from ASCII string `s`, returning the hour, minute, second, and -/// nanos. -/// -/// The date is not validated according to a calendar. -fn parse_time(s: &str) -> Option<(u8, u8, u8, u32, &str)> { - debug_assert!(s.is_ascii()); - - let (hour, s) = parse_two_digit_numeric(s)?; - let s = parse_char(s, b':')?; - let (minute, s) = parse_two_digit_numeric(s)?; - let s = parse_char(s, b':')?; - let (second, s) = parse_two_digit_numeric(s)?; - - let (nanos, s) = parse_nanos(s)?; - - Some((hour, minute, second, nanos, s)) -} - -/// Parses an optional nanosecond time from ASCII string `s`, returning the nanos and remaining -/// string. -fn parse_nanos(s: &str) -> Option<(u32, &str)> { - debug_assert!(s.is_ascii()); - - // Parse the nanoseconds, if present. - let (nanos, s) = if let Some(s) = parse_char(s, b'.') { - let (digits, s) = parse_digits(s); - ensure!(digits.len() <= 9); - let nanos = 10u32.pow(9 - digits.len() as u32) * digits.parse::().ok()?; - (nanos, s) - } else { - (0, s) - }; - - Some((nanos, s)) -} - -/// Parses a timezone offset in RFC 3339 format from ASCII string `s`, returning the offset hour, -/// offset minute, and remaining input. -fn parse_offset(s: &str) -> Option<(i8, i8, &str)> { - debug_assert!(s.is_ascii()); - - if s.is_empty() { - // If no timezone specified, assume UTC. - return Some((0, 0, s)); - } - - // Snowflake's timestamp format contains a space separator before the offset. - let s = parse_char(s, b' ').unwrap_or(s); - - if let Some(s) = parse_char_ignore_case(s, b'Z') { - Some((0, 0, s)) - } else { - let (is_positive, s) = if let Some(s) = parse_char(s, b'+') { - (true, s) - } else if let Some(s) = parse_char(s, b'-') { - (false, s) - } else { - return None; - }; - - let (hour, s) = parse_two_digit_numeric(s)?; - - let (minute, s) = if s.is_empty() { - // No offset minutes are specified, e.g. +00 or +07. - (0, s) - } else { - // Optional colon separator between the hour and minute digits. - let s = parse_char(s, b':').unwrap_or(s); - let (minute, s) = parse_two_digit_numeric(s)?; - (minute, s) - }; - - // '-00:00' indicates an unknown local offset. - ensure!(is_positive || hour > 0 || minute > 0); - - ensure!(hour < 24 && minute < 60); - - let hour = hour as i8; - let minute = minute as i8; - - if is_positive { - Some((hour, minute, s)) - } else { - Some((-hour, -minute, s)) - } - } -} - -/// Parses a two-digit base-10 number from ASCII string `s`, returning the number and the remaining -/// string. -fn parse_two_digit_numeric(s: &str) -> Option<(u8, &str)> { - debug_assert!(s.is_ascii()); - - let (digits, s) = s.split_at(2); - Some((digits.parse().ok()?, s)) -} - -/// Splits ASCII string `s` at the first occurrence of a non-digit character. -fn parse_digits(s: &str) -> (&str, &str) { - debug_assert!(s.is_ascii()); - - let idx = s - .as_bytes() - .iter() - .position(|c| !c.is_ascii_digit()) - .unwrap_or(s.len()); - s.split_at(idx) -} - -/// Attempts to parse ASCII character `c` from ASCII string `s`, returning the remaining string. If -/// the character can not be parsed, returns `None`. -fn parse_char(s: &str, c: u8) -> Option<&str> { - debug_assert!(s.is_ascii()); - - ensure!(*s.as_bytes().first()? == c); - Some(&s[1..]) -} - -/// Attempts to parse ASCII character `c` from ASCII string `s`, ignoring ASCII case, returning the -/// remaining string. If the character can not be parsed, returns `None`. -fn parse_char_ignore_case(s: &str, c: u8) -> Option<&str> { - debug_assert!(s.is_ascii()); - - ensure!(s.as_bytes().first()?.eq_ignore_ascii_case(&c)); - Some(&s[1..]) -} - -/// Returns the offset in seconds from the Unix epoch of the date time. -/// -/// This is musl's [`__tm_to_secs`][1] converted to Rust via [c2rust[2] and then cleaned up by -/// hand. -/// -/// [1]: https://git.musl-libc.org/cgit/musl/tree/src/time/__tm_to_secs.c -/// [2]: https://c2rust.com/ -fn date_time_to_seconds(tm: &DateTime) -> i64 { - let (start_of_year, is_leap) = year_to_seconds(tm.year); - - let seconds_within_year = month_to_seconds(tm.month, is_leap) - + 86400 * u32::from(tm.day - 1) - + 3600 * u32::from(tm.hour) - + 60 * u32::from(tm.minute) - + u32::from(tm.second); - - (start_of_year + i128::from(seconds_within_year)) as i64 -} - -/// Returns the number of seconds in the year prior to the start of the provided month. -/// -/// This is musl's [`__month_to_secs`][1] converted to Rust via c2rust and then cleaned up by hand. -/// -/// [1]: https://git.musl-libc.org/cgit/musl/tree/src/time/__month_to_secs.c -fn month_to_seconds(month: u8, is_leap: bool) -> u32 { - const SECS_THROUGH_MONTH: [u32; 12] = [ - 0, - 31 * 86400, - 59 * 86400, - 90 * 86400, - 120 * 86400, - 151 * 86400, - 181 * 86400, - 212 * 86400, - 243 * 86400, - 273 * 86400, - 304 * 86400, - 334 * 86400, - ]; - let t = SECS_THROUGH_MONTH[usize::from(month - 1)]; - if is_leap && month > 2 { - t + 86400 - } else { - t - } -} - -/// Returns the offset in seconds from the Unix epoch of the start of a year. -/// -/// musl's [`__year_to_secs`][1] converted to Rust via c2rust and then cleaned up by hand. -/// -/// Returns an i128 because the start of the earliest supported year underflows i64. -/// -/// [1]: https://git.musl-libc.org/cgit/musl/tree/src/time/__year_to_secs.c -pub(crate) fn year_to_seconds(year: i64) -> (i128, bool) { - let is_leap; - let year = year - 1900; - - // Fast path for years 1900 - 2038. - if year as u64 <= 138 { - let mut leaps: i64 = (year - 68) >> 2; - if (year - 68).trailing_zeros() >= 2 { - leaps -= 1; - is_leap = true; - } else { - is_leap = false; - } - return ( - i128::from(31_536_000 * (year - 70) + 86400 * leaps), - is_leap, - ); - } - - let centuries: i64; - let mut leaps: i64; - - let mut cycles: i64 = (year - 100) / 400; - let mut rem: i64 = (year - 100) % 400; - - if rem < 0 { - cycles -= 1; - rem += 400 - } - if rem == 0 { - is_leap = true; - centuries = 0; - leaps = 0; - } else { - if rem >= 200 { - if rem >= 300 { - centuries = 3; - rem -= 300; - } else { - centuries = 2; - rem -= 200; - } - } else if rem >= 100 { - centuries = 1; - rem -= 100; - } else { - centuries = 0; - } - if rem == 0 { - is_leap = false; - leaps = 0; - } else { - leaps = rem / 4; - rem %= 4; - is_leap = rem == 0; - } - } - leaps += 97 * cycles + 24 * centuries - i64::from(is_leap); - - ( - i128::from((year - 100) * 31_536_000) + i128::from(leaps * 86400 + 946_684_800 + 86400), - is_leap, - ) -} - -/// Parses a timestamp in RFC 3339 format from `s`. -pub(crate) fn parse_timestamp(s: &str) -> Option { - // Check that the string is ASCII, since subsequent parsing steps use byte-level indexing. - ensure!(s.is_ascii()); - - let (year, month, day, s) = parse_date(s)?; - - if s.is_empty() { - // The string only contained a date. - let date_time = DateTime { - year, - month, - day, - ..DateTime::default() - }; - - ensure!(date_time.is_valid()); - - return Some(Timestamp::from(date_time)); - } - - // Accept either 'T' or ' ' as delimiter between date and time. - let s = parse_char_ignore_case(s, b'T').or_else(|| parse_char(s, b' '))?; - let (hour, minute, mut second, nanos, s) = parse_time(s)?; - let (offset_hour, offset_minute, s) = parse_offset(s)?; - - ensure!(s.is_empty()); - - // Detect whether the timestamp falls in a leap second. If this is the case, roll it back - // to the previous second. To be maximally conservative, this should be checking that the - // timestamp is the last second in the UTC day (23:59:60), and even potentially checking - // that it's the final day of the UTC month, however these checks are non-trivial because - // at this point we have, in effect, a local date time, since the offset has not been - // applied. - if second == 60 { - second = 59; - } - - let date_time = DateTime { - year, - month, - day, - hour, - minute, - second, - nanos, - }; - - ensure!(date_time.is_valid()); - - let Timestamp { seconds, nanos } = Timestamp::from(date_time); - - let seconds = - seconds.checked_sub(i64::from(offset_hour) * 3600 + i64::from(offset_minute) * 60)?; - - Some(Timestamp { seconds, nanos }) -} - -/// Parse a duration in the [Protobuf JSON encoding spec format][1]. -/// -/// [1]: https://developers.google.com/protocol-buffers/docs/proto3#json -pub(crate) fn parse_duration(s: &str) -> Option { - // Check that the string is ASCII, since subsequent parsing steps use byte-level indexing. - ensure!(s.is_ascii()); - - let (is_negative, s) = match parse_char(s, b'-') { - Some(s) => (true, s), - None => (false, s), - }; - - let (digits, s) = parse_digits(s); - let seconds = digits.parse::().ok()?; - - let (nanos, s) = parse_nanos(s)?; - - let s = parse_char(s, b's')?; - ensure!(s.is_empty()); - ensure!(nanos < crate::NANOS_PER_SECOND as u32); - - // If the duration is negative, also flip the nanos sign. - let (seconds, nanos) = if is_negative { - (-seconds, -(nanos as i32)) - } else { - (seconds, nanos as i32) - }; - - Some(Duration { seconds, nanos }) -} - -impl From for Timestamp { - fn from(date_time: DateTime) -> Timestamp { - let seconds = date_time_to_seconds(&date_time); - let nanos = date_time.nanos; - Timestamp { - seconds, - nanos: nanos as i32, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use proptest::prelude::*; - - #[test] - fn test_min_max() { - assert_eq!( - DateTime::MIN, - DateTime::from(Timestamp { - seconds: i64::MIN, - nanos: 0 - }), - ); - assert_eq!( - DateTime::MAX, - DateTime::from(Timestamp { - seconds: i64::MAX, - nanos: 999_999_999 - }), - ); - } - - #[cfg(feature = "std")] - #[test] - fn test_datetime_from_timestamp() { - let case = |expected: &str, secs: i64, nanos: i32| { - let timestamp = Timestamp { - seconds: secs, - nanos, - }; - assert_eq!( - expected, - format!("{}", DateTime::from(timestamp.clone())), - "timestamp: {:?}", - timestamp - ); - }; - - // Mostly generated with: - // - date -jur +"%Y-%m-%dT%H:%M:%S.000000000Z" - // - http://unixtimestamp.50x.eu/ - - case("1970-01-01T00:00:00Z", 0, 0); - - case("1970-01-01T00:00:00.000000001Z", 0, 1); - case("1970-01-01T00:00:00.123450Z", 0, 123_450_000); - case("1970-01-01T00:00:00.050Z", 0, 50_000_000); - case("1970-01-01T00:00:01.000000001Z", 1, 1); - case("1970-01-01T00:01:01.000000001Z", 60 + 1, 1); - case("1970-01-01T01:01:01.000000001Z", 60 * 60 + 60 + 1, 1); - case( - "1970-01-02T01:01:01.000000001Z", - 24 * 60 * 60 + 60 * 60 + 60 + 1, - 1, - ); - - case("1969-12-31T23:59:59Z", -1, 0); - case("1969-12-31T23:59:59.000001Z", -1, 1_000); - case("1969-12-31T23:59:59.500Z", -1, 500_000_000); - case("1969-12-31T23:58:59.000001Z", -60 - 1, 1_000); - case("1969-12-31T22:58:59.000001Z", -60 * 60 - 60 - 1, 1_000); - case( - "1969-12-30T22:58:59.000000001Z", - -24 * 60 * 60 - 60 * 60 - 60 - 1, - 1, - ); - - case("2038-01-19T03:14:07Z", i32::MAX as i64, 0); - case("2038-01-19T03:14:08Z", i32::MAX as i64 + 1, 0); - case("1901-12-13T20:45:52Z", i32::MIN as i64, 0); - case("1901-12-13T20:45:51Z", i32::MIN as i64 - 1, 0); - - // Skipping these tests on windows as std::time::SystemTime range is low - // on Windows compared with that of Unix which can cause the following - // high date value tests to panic - #[cfg(not(target_os = "windows"))] - { - case("+292277026596-12-04T15:30:07Z", i64::MAX, 0); - case("+292277026596-12-04T15:30:06Z", i64::MAX - 1, 0); - case("-292277022657-01-27T08:29:53Z", i64::MIN + 1, 0); - } - - case("1900-01-01T00:00:00Z", -2_208_988_800, 0); - case("1899-12-31T23:59:59Z", -2_208_988_801, 0); - case("0000-01-01T00:00:00Z", -62_167_219_200, 0); - case("-0001-12-31T23:59:59Z", -62_167_219_201, 0); - - case("1234-05-06T07:08:09Z", -23_215_049_511, 0); - case("-1234-05-06T07:08:09Z", -101_097_651_111, 0); - case("2345-06-07T08:09:01Z", 11_847_456_541, 0); - case("-2345-06-07T08:09:01Z", -136_154_620_259, 0); - } - - #[test] - fn test_parse_timestamp() { - // RFC 3339 Section 5.8 Examples - assert_eq!( - "1985-04-12T23:20:50.52Z".parse::(), - Timestamp::date_time_nanos(1985, 4, 12, 23, 20, 50, 520_000_000), - ); - assert_eq!( - "1996-12-19T16:39:57-08:00".parse::(), - Timestamp::date_time(1996, 12, 20, 0, 39, 57), - ); - assert_eq!( - "1996-12-19T16:39:57-08:00".parse::(), - Timestamp::date_time(1996, 12, 20, 0, 39, 57), - ); - assert_eq!( - "1990-12-31T23:59:60Z".parse::(), - Timestamp::date_time(1990, 12, 31, 23, 59, 59), - ); - assert_eq!( - "1990-12-31T15:59:60-08:00".parse::(), - Timestamp::date_time(1990, 12, 31, 23, 59, 59), - ); - assert_eq!( - "1937-01-01T12:00:27.87+00:20".parse::(), - Timestamp::date_time_nanos(1937, 1, 1, 11, 40, 27, 870_000_000), - ); - - // Date - assert_eq!( - "1937-01-01".parse::(), - Timestamp::date(1937, 1, 1), - ); - - // Negative year - assert_eq!( - "-0008-01-01".parse::(), - Timestamp::date(-8, 1, 1), - ); - - // Plus year - assert_eq!( - "+19370-01-01".parse::(), - Timestamp::date(19370, 1, 1), - ); - - // Full nanos - assert_eq!( - "2020-02-03T01:02:03.123456789Z".parse::(), - Timestamp::date_time_nanos(2020, 2, 3, 1, 2, 3, 123_456_789), - ); - - // Leap day - assert_eq!( - "2020-02-29T01:02:03.00Z".parse::().unwrap(), - Timestamp::from(DateTime { - year: 2020, - month: 2, - day: 29, - hour: 1, - minute: 2, - second: 3, - nanos: 0, - }), - ); - - // Test extensions to RFC 3339. - // ' ' instead of 'T' as date/time separator. - assert_eq!( - "1985-04-12 23:20:50.52Z".parse::(), - Timestamp::date_time_nanos(1985, 4, 12, 23, 20, 50, 520_000_000), - ); - - // No time zone specified. - assert_eq!( - "1985-04-12T23:20:50.52".parse::(), - Timestamp::date_time_nanos(1985, 4, 12, 23, 20, 50, 520_000_000), - ); - - // Offset without minutes specified. - assert_eq!( - "1996-12-19T16:39:57-08".parse::(), - Timestamp::date_time(1996, 12, 20, 0, 39, 57), - ); - - // Snowflake stage style. - assert_eq!( - "2015-09-12 00:47:19.591 Z".parse::(), - Timestamp::date_time_nanos(2015, 9, 12, 0, 47, 19, 591_000_000), - ); - assert_eq!( - "2020-06-15 00:01:02.123 +0800".parse::(), - Timestamp::date_time_nanos(2020, 6, 14, 16, 1, 2, 123_000_000), - ); - } - - #[test] - fn test_parse_duration() { - let case = |s: &str, seconds: i64, nanos: i32| { - assert_eq!( - s.parse::().unwrap(), - Duration { seconds, nanos }, - "duration: {}", - s - ); - }; - - case("0s", 0, 0); - case("0.0s", 0, 0); - case("0.000s", 0, 0); - - case("-0s", 0, 0); - case("-0.0s", 0, 0); - case("-0.000s", 0, 0); - - case("-0s", 0, 0); - case("-0.0s", 0, 0); - case("-0.000s", 0, 0); - - case("0.05s", 0, 50_000_000); - case("0.050s", 0, 50_000_000); - - case("-0.05s", 0, -50_000_000); - case("-0.050s", 0, -50_000_000); - - case("1s", 1, 0); - case("1.0s", 1, 0); - case("1.000s", 1, 0); - - case("-1s", -1, 0); - case("-1.0s", -1, 0); - case("-1.000s", -1, 0); - - case("15s", 15, 0); - case("15.1s", 15, 100_000_000); - case("15.100s", 15, 100_000_000); - - case("-15s", -15, 0); - case("-15.1s", -15, -100_000_000); - case("-15.100s", -15, -100_000_000); - - case("100.000000009s", 100, 9); - case("-100.000000009s", -100, -9); - } - - #[test] - fn test_parse_non_ascii() { - assert!("2021️⃣-06-15 00:01:02.123 +0800" - .parse::() - .is_err()); - - assert!("1️⃣s".parse::().is_err()); - } - - proptest! { - #[cfg(feature = "std")] - #[test] - fn check_timestamp_parse_to_string_roundtrip( - system_time in std::time::SystemTime::arbitrary(), - ) { - - let ts = Timestamp::from(system_time); - - assert_eq!( - ts, - ts.to_string().parse::().unwrap(), - ) - } - - #[cfg(feature = "std")] - #[test] - fn check_duration_parse_to_string_roundtrip( - duration in core::time::Duration::arbitrary(), - ) { - let duration = match Duration::try_from(duration) { - Ok(duration) => duration, - Err(_) => return Err(TestCaseError::reject("duration out of range")), - }; - - prop_assert_eq!( - &duration, - &duration.to_string().parse::().unwrap(), - "{}", duration.to_string() - ); - } - } -} diff --git a/vendor/prost-types-0.12.6/src/duration.rs b/vendor/prost-types-0.12.6/src/duration.rs deleted file mode 100644 index 60071693..00000000 --- a/vendor/prost-types-0.12.6/src/duration.rs +++ /dev/null @@ -1,333 +0,0 @@ -use super::*; - -#[cfg(feature = "std")] -impl std::hash::Hash for Duration { - fn hash(&self, state: &mut H) { - self.seconds.hash(state); - self.nanos.hash(state); - } -} - -impl Duration { - /// Normalizes the duration to a canonical format. - /// - /// Based on [`google::protobuf::util::CreateNormalized`][1]. - /// - /// [1]: https://github.com/google/protobuf/blob/v3.3.2/src/google/protobuf/util/time_util.cc#L79-L100 - pub fn normalize(&mut self) { - // Make sure nanos is in the range. - if self.nanos <= -NANOS_PER_SECOND || self.nanos >= NANOS_PER_SECOND { - if let Some(seconds) = self - .seconds - .checked_add((self.nanos / NANOS_PER_SECOND) as i64) - { - self.seconds = seconds; - self.nanos %= NANOS_PER_SECOND; - } else if self.nanos < 0 { - // Negative overflow! Set to the least normal value. - self.seconds = i64::MIN; - self.nanos = -NANOS_MAX; - } else { - // Positive overflow! Set to the greatest normal value. - self.seconds = i64::MAX; - self.nanos = NANOS_MAX; - } - } - - // nanos should have the same sign as seconds. - if self.seconds < 0 && self.nanos > 0 { - if let Some(seconds) = self.seconds.checked_add(1) { - self.seconds = seconds; - self.nanos -= NANOS_PER_SECOND; - } else { - // Positive overflow! Set to the greatest normal value. - debug_assert_eq!(self.seconds, i64::MAX); - self.nanos = NANOS_MAX; - } - } else if self.seconds > 0 && self.nanos < 0 { - if let Some(seconds) = self.seconds.checked_sub(1) { - self.seconds = seconds; - self.nanos += NANOS_PER_SECOND; - } else { - // Negative overflow! Set to the least normal value. - debug_assert_eq!(self.seconds, i64::MIN); - self.nanos = -NANOS_MAX; - } - } - // TODO: should this be checked? - // debug_assert!(self.seconds >= -315_576_000_000 && self.seconds <= 315_576_000_000, - // "invalid duration: {:?}", self); - } -} - -impl Name for Duration { - const PACKAGE: &'static str = PACKAGE; - const NAME: &'static str = "Duration"; - - fn type_url() -> String { - type_url_for::() - } -} - -impl TryFrom for Duration { - type Error = DurationError; - - /// Converts a `std::time::Duration` to a `Duration`, failing if the duration is too large. - fn try_from(duration: time::Duration) -> Result { - let seconds = i64::try_from(duration.as_secs()).map_err(|_| DurationError::OutOfRange)?; - let nanos = duration.subsec_nanos() as i32; - - let mut duration = Duration { seconds, nanos }; - duration.normalize(); - Ok(duration) - } -} - -impl TryFrom for time::Duration { - type Error = DurationError; - - /// Converts a `Duration` to a `std::time::Duration`, failing if the duration is negative. - fn try_from(mut duration: Duration) -> Result { - duration.normalize(); - if duration.seconds >= 0 && duration.nanos >= 0 { - Ok(time::Duration::new( - duration.seconds as u64, - duration.nanos as u32, - )) - } else { - Err(DurationError::NegativeDuration(time::Duration::new( - (-duration.seconds) as u64, - (-duration.nanos) as u32, - ))) - } - } -} - -impl fmt::Display for Duration { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut d = self.clone(); - d.normalize(); - if self.seconds < 0 && self.nanos < 0 { - write!(f, "-")?; - } - write!(f, "{}", d.seconds.abs())?; - - // Format subseconds to either nothing, millis, micros, or nanos. - let nanos = d.nanos.abs(); - if nanos == 0 { - write!(f, "s") - } else if nanos % 1_000_000 == 0 { - write!(f, ".{:03}s", nanos / 1_000_000) - } else if nanos % 1_000 == 0 { - write!(f, ".{:06}s", nanos / 1_000) - } else { - write!(f, ".{:09}s", nanos) - } - } -} - -/// A duration handling error. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Debug, PartialEq)] -#[non_exhaustive] -pub enum DurationError { - /// Indicates failure to parse a [`Duration`] from a string. - /// - /// The [`Duration`] string format is specified in the [Protobuf JSON mapping specification][1]. - /// - /// [1]: https://developers.google.com/protocol-buffers/docs/proto3#json - ParseFailure, - - /// Indicates failure to convert a `prost_types::Duration` to a `std::time::Duration` because - /// the duration is negative. The included `std::time::Duration` matches the magnitude of the - /// original negative `prost_types::Duration`. - NegativeDuration(time::Duration), - - /// Indicates failure to convert a `std::time::Duration` to a `prost_types::Duration`. - /// - /// Converting a `std::time::Duration` to a `prost_types::Duration` fails if the magnitude - /// exceeds that representable by `prost_types::Duration`. - OutOfRange, -} - -impl fmt::Display for DurationError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - DurationError::ParseFailure => write!(f, "failed to parse duration"), - DurationError::NegativeDuration(duration) => { - write!(f, "failed to convert negative duration: {:?}", duration) - } - DurationError::OutOfRange => { - write!(f, "failed to convert duration out of range") - } - } - } -} - -#[cfg(feature = "std")] -impl std::error::Error for DurationError {} - -impl FromStr for Duration { - type Err = DurationError; - - fn from_str(s: &str) -> Result { - datetime::parse_duration(s).ok_or(DurationError::ParseFailure) - } -} -#[cfg(test)] -mod tests { - use super::*; - - #[cfg(feature = "std")] - use proptest::prelude::*; - - #[cfg(feature = "std")] - proptest! { - #[test] - fn check_duration_roundtrip( - seconds in u64::arbitrary(), - nanos in 0u32..1_000_000_000u32, - ) { - let std_duration = time::Duration::new(seconds, nanos); - let prost_duration = match Duration::try_from(std_duration) { - Ok(duration) => duration, - Err(_) => return Err(TestCaseError::reject("duration out of range")), - }; - prop_assert_eq!(time::Duration::try_from(prost_duration.clone()).unwrap(), std_duration); - - if std_duration != time::Duration::default() { - let neg_prost_duration = Duration { - seconds: -prost_duration.seconds, - nanos: -prost_duration.nanos, - }; - - prop_assert!( - matches!( - time::Duration::try_from(neg_prost_duration), - Err(DurationError::NegativeDuration(d)) if d == std_duration, - ) - ) - } - } - - #[test] - fn check_duration_roundtrip_nanos( - nanos in u32::arbitrary(), - ) { - let seconds = 0; - let std_duration = std::time::Duration::new(seconds, nanos); - let prost_duration = match Duration::try_from(std_duration) { - Ok(duration) => duration, - Err(_) => return Err(TestCaseError::reject("duration out of range")), - }; - prop_assert_eq!(time::Duration::try_from(prost_duration.clone()).unwrap(), std_duration); - - if std_duration != time::Duration::default() { - let neg_prost_duration = Duration { - seconds: -prost_duration.seconds, - nanos: -prost_duration.nanos, - }; - - prop_assert!( - matches!( - time::Duration::try_from(neg_prost_duration), - Err(DurationError::NegativeDuration(d)) if d == std_duration, - ) - ) - } - } - } - - #[cfg(feature = "std")] - #[test] - fn check_duration_try_from_negative_nanos() { - let seconds: u64 = 0; - let nanos: u32 = 1; - let std_duration = std::time::Duration::new(seconds, nanos); - - let neg_prost_duration = Duration { - seconds: 0, - nanos: -1, - }; - - assert!(matches!( - time::Duration::try_from(neg_prost_duration), - Err(DurationError::NegativeDuration(d)) if d == std_duration, - )) - } - - #[test] - fn check_duration_normalize() { - #[rustfmt::skip] // Don't mangle the table formatting. - let cases = [ - // --- Table of test cases --- - // test seconds test nanos expected seconds expected nanos - (line!(), 0, 0, 0, 0), - (line!(), 1, 1, 1, 1), - (line!(), -1, -1, -1, -1), - (line!(), 0, 999_999_999, 0, 999_999_999), - (line!(), 0, -999_999_999, 0, -999_999_999), - (line!(), 0, 1_000_000_000, 1, 0), - (line!(), 0, -1_000_000_000, -1, 0), - (line!(), 0, 1_000_000_001, 1, 1), - (line!(), 0, -1_000_000_001, -1, -1), - (line!(), -1, 1, 0, -999_999_999), - (line!(), 1, -1, 0, 999_999_999), - (line!(), -1, 1_000_000_000, 0, 0), - (line!(), 1, -1_000_000_000, 0, 0), - (line!(), i64::MIN , 0, i64::MIN , 0), - (line!(), i64::MIN + 1, 0, i64::MIN + 1, 0), - (line!(), i64::MIN , 1, i64::MIN + 1, -999_999_999), - (line!(), i64::MIN , 1_000_000_000, i64::MIN + 1, 0), - (line!(), i64::MIN , -1_000_000_000, i64::MIN , -999_999_999), - (line!(), i64::MIN + 1, -1_000_000_000, i64::MIN , 0), - (line!(), i64::MIN + 2, -1_000_000_000, i64::MIN + 1, 0), - (line!(), i64::MIN , -1_999_999_998, i64::MIN , -999_999_999), - (line!(), i64::MIN + 1, -1_999_999_998, i64::MIN , -999_999_998), - (line!(), i64::MIN + 2, -1_999_999_998, i64::MIN + 1, -999_999_998), - (line!(), i64::MIN , -1_999_999_999, i64::MIN , -999_999_999), - (line!(), i64::MIN + 1, -1_999_999_999, i64::MIN , -999_999_999), - (line!(), i64::MIN + 2, -1_999_999_999, i64::MIN + 1, -999_999_999), - (line!(), i64::MIN , -2_000_000_000, i64::MIN , -999_999_999), - (line!(), i64::MIN + 1, -2_000_000_000, i64::MIN , -999_999_999), - (line!(), i64::MIN + 2, -2_000_000_000, i64::MIN , 0), - (line!(), i64::MIN , -999_999_998, i64::MIN , -999_999_998), - (line!(), i64::MIN + 1, -999_999_998, i64::MIN + 1, -999_999_998), - (line!(), i64::MAX , 0, i64::MAX , 0), - (line!(), i64::MAX - 1, 0, i64::MAX - 1, 0), - (line!(), i64::MAX , -1, i64::MAX - 1, 999_999_999), - (line!(), i64::MAX , 1_000_000_000, i64::MAX , 999_999_999), - (line!(), i64::MAX - 1, 1_000_000_000, i64::MAX , 0), - (line!(), i64::MAX - 2, 1_000_000_000, i64::MAX - 1, 0), - (line!(), i64::MAX , 1_999_999_998, i64::MAX , 999_999_999), - (line!(), i64::MAX - 1, 1_999_999_998, i64::MAX , 999_999_998), - (line!(), i64::MAX - 2, 1_999_999_998, i64::MAX - 1, 999_999_998), - (line!(), i64::MAX , 1_999_999_999, i64::MAX , 999_999_999), - (line!(), i64::MAX - 1, 1_999_999_999, i64::MAX , 999_999_999), - (line!(), i64::MAX - 2, 1_999_999_999, i64::MAX - 1, 999_999_999), - (line!(), i64::MAX , 2_000_000_000, i64::MAX , 999_999_999), - (line!(), i64::MAX - 1, 2_000_000_000, i64::MAX , 999_999_999), - (line!(), i64::MAX - 2, 2_000_000_000, i64::MAX , 0), - (line!(), i64::MAX , 999_999_998, i64::MAX , 999_999_998), - (line!(), i64::MAX - 1, 999_999_998, i64::MAX - 1, 999_999_998), - ]; - - for case in cases.iter() { - let mut test_duration = Duration { - seconds: case.1, - nanos: case.2, - }; - test_duration.normalize(); - - assert_eq!( - test_duration, - Duration { - seconds: case.3, - nanos: case.4, - }, - "test case on line {} doesn't match", - case.0, - ); - } - } -} diff --git a/vendor/prost-types-0.12.6/src/lib.rs b/vendor/prost-types-0.12.6/src/lib.rs deleted file mode 100644 index a2a94d43..00000000 --- a/vendor/prost-types-0.12.6/src/lib.rs +++ /dev/null @@ -1,55 +0,0 @@ -#![doc(html_root_url = "https://docs.rs/prost-types/0.12.6")] - -//! Protocol Buffers well-known types. -//! -//! Note that the documentation for the types defined in this crate are generated from the Protobuf -//! definitions, so code examples are not in Rust. -//! -//! See the [Protobuf reference][1] for more information about well-known types. -//! -//! ## Feature Flags -//! - `std`: Enable integration with standard library. Disable this feature for `no_std` support. This feature is enabled by default. -//! -//! [1]: https://developers.google.com/protocol-buffers/docs/reference/google.protobuf - -#![cfg_attr(not(feature = "std"), no_std)] - -#[rustfmt::skip] -pub mod compiler; -mod datetime; -#[rustfmt::skip] -mod protobuf; - -use core::convert::TryFrom; -use core::fmt; -use core::i32; -use core::i64; -use core::str::FromStr; -use core::time; - -use prost::alloc::format; -use prost::alloc::string::String; -use prost::alloc::vec::Vec; -use prost::{DecodeError, EncodeError, Message, Name}; - -pub use protobuf::*; - -// The Protobuf `Duration` and `Timestamp` types can't delegate to the standard library equivalents -// because the Protobuf versions are signed. To make them easier to work with, `From` conversions -// are defined in both directions. - -const NANOS_PER_SECOND: i32 = 1_000_000_000; -const NANOS_MAX: i32 = NANOS_PER_SECOND - 1; - -const PACKAGE: &str = "google.protobuf"; - -mod any; - -mod duration; -pub use duration::DurationError; - -mod timestamp; -pub use timestamp::TimestampError; - -mod type_url; -pub(crate) use type_url::{type_url_for, TypeUrl}; diff --git a/vendor/prost-types-0.12.6/src/protobuf.rs b/vendor/prost-types-0.12.6/src/protobuf.rs deleted file mode 100644 index edc1361b..00000000 --- a/vendor/prost-types-0.12.6/src/protobuf.rs +++ /dev/null @@ -1,2309 +0,0 @@ -// This file is @generated by prost-build. -/// The protocol compiler can output a FileDescriptorSet containing the .proto -/// files it parses. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FileDescriptorSet { - #[prost(message, repeated, tag = "1")] - pub file: ::prost::alloc::vec::Vec, -} -/// Describes a complete .proto file. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FileDescriptorProto { - /// file name, relative to root of source tree - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - /// e.g. "foo", "foo.bar", etc. - #[prost(string, optional, tag = "2")] - pub package: ::core::option::Option<::prost::alloc::string::String>, - /// Names of files imported by this file. - #[prost(string, repeated, tag = "3")] - pub dependency: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Indexes of the public imported files in the dependency list above. - #[prost(int32, repeated, packed = "false", tag = "10")] - pub public_dependency: ::prost::alloc::vec::Vec, - /// Indexes of the weak imported files in the dependency list. - /// For Google-internal migration only. Do not use. - #[prost(int32, repeated, packed = "false", tag = "11")] - pub weak_dependency: ::prost::alloc::vec::Vec, - /// All top-level definitions in this file. - #[prost(message, repeated, tag = "4")] - pub message_type: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "5")] - pub enum_type: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "6")] - pub service: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "7")] - pub extension: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "8")] - pub options: ::core::option::Option, - /// This field contains optional information about the original source code. - /// You may safely remove this entire field without harming runtime - /// functionality of the descriptors -- the information is needed only by - /// development tools. - #[prost(message, optional, tag = "9")] - pub source_code_info: ::core::option::Option, - /// The syntax of the proto file. - /// The supported values are "proto2" and "proto3". - #[prost(string, optional, tag = "12")] - pub syntax: ::core::option::Option<::prost::alloc::string::String>, -} -/// Describes a message type. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DescriptorProto { - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, repeated, tag = "2")] - pub field: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "6")] - pub extension: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "3")] - pub nested_type: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "4")] - pub enum_type: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "5")] - pub extension_range: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "8")] - pub oneof_decl: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "7")] - pub options: ::core::option::Option, - #[prost(message, repeated, tag = "9")] - pub reserved_range: ::prost::alloc::vec::Vec, - /// Reserved field names, which may not be used by fields in the same message. - /// A given name may only be reserved once. - #[prost(string, repeated, tag = "10")] - pub reserved_name: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Nested message and enum types in `DescriptorProto`. -pub mod descriptor_proto { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct ExtensionRange { - /// Inclusive. - #[prost(int32, optional, tag = "1")] - pub start: ::core::option::Option, - /// Exclusive. - #[prost(int32, optional, tag = "2")] - pub end: ::core::option::Option, - #[prost(message, optional, tag = "3")] - pub options: ::core::option::Option, - } - /// Range of reserved tag numbers. Reserved tag numbers may not be used by - /// fields or extension ranges in the same message. Reserved ranges may - /// not overlap. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct ReservedRange { - /// Inclusive. - #[prost(int32, optional, tag = "1")] - pub start: ::core::option::Option, - /// Exclusive. - #[prost(int32, optional, tag = "2")] - pub end: ::core::option::Option, - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExtensionRangeOptions { - /// The parser stores options it doesn't recognize here. See above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -/// Describes a field within a message. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FieldDescriptorProto { - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(int32, optional, tag = "3")] - pub number: ::core::option::Option, - #[prost(enumeration = "field_descriptor_proto::Label", optional, tag = "4")] - pub label: ::core::option::Option, - /// If type_name is set, this need not be set. If both this and type_name - /// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - #[prost(enumeration = "field_descriptor_proto::Type", optional, tag = "5")] - pub r#type: ::core::option::Option, - /// For message and enum types, this is the name of the type. If the name - /// starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - /// rules are used to find the type (i.e. first the nested types within this - /// message are searched, then within the parent, on up to the root - /// namespace). - #[prost(string, optional, tag = "6")] - pub type_name: ::core::option::Option<::prost::alloc::string::String>, - /// For extensions, this is the name of the type being extended. It is - /// resolved in the same manner as type_name. - #[prost(string, optional, tag = "2")] - pub extendee: ::core::option::Option<::prost::alloc::string::String>, - /// For numeric types, contains the original text representation of the value. - /// For booleans, "true" or "false". - /// For strings, contains the default text contents (not escaped in any way). - /// For bytes, contains the C escaped value. All bytes >= 128 are escaped. - /// TODO(kenton): Base-64 encode? - #[prost(string, optional, tag = "7")] - pub default_value: ::core::option::Option<::prost::alloc::string::String>, - /// If set, gives the index of a oneof in the containing type's oneof_decl - /// list. This field is a member of that oneof. - #[prost(int32, optional, tag = "9")] - pub oneof_index: ::core::option::Option, - /// JSON name of this field. The value is set by protocol compiler. If the - /// user has set a "json_name" option on this field, that option's value - /// will be used. Otherwise, it's deduced from the field's name by converting - /// it to camelCase. - #[prost(string, optional, tag = "10")] - pub json_name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, optional, tag = "8")] - pub options: ::core::option::Option, - /// If true, this is a proto3 "optional". When a proto3 field is optional, it - /// tracks presence regardless of field type. - /// - /// When proto3_optional is true, this field must be belong to a oneof to - /// signal to old proto3 clients that presence is tracked for this field. This - /// oneof is known as a "synthetic" oneof, and this field must be its sole - /// member (each proto3 optional field gets its own synthetic oneof). Synthetic - /// oneofs exist in the descriptor only, and do not generate any API. Synthetic - /// oneofs must be ordered after all "real" oneofs. - /// - /// For message fields, proto3_optional doesn't create any semantic change, - /// since non-repeated message fields always track presence. However it still - /// indicates the semantic detail of whether the user wrote "optional" or not. - /// This can be useful for round-tripping the .proto file. For consistency we - /// give message fields a synthetic oneof also, even though it is not required - /// to track presence. This is especially important because the parser can't - /// tell if a field is a message or an enum, so it must always create a - /// synthetic oneof. - /// - /// Proto2 optional fields do not set this flag, because they already indicate - /// optional with `LABEL_OPTIONAL`. - #[prost(bool, optional, tag = "17")] - pub proto3_optional: ::core::option::Option, -} -/// Nested message and enum types in `FieldDescriptorProto`. -pub mod field_descriptor_proto { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Type { - /// 0 is reserved for errors. - /// Order is weird for historical reasons. - Double = 1, - Float = 2, - /// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - /// negative values are likely. - Int64 = 3, - Uint64 = 4, - /// Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - /// negative values are likely. - Int32 = 5, - Fixed64 = 6, - Fixed32 = 7, - Bool = 8, - String = 9, - /// Tag-delimited aggregate. - /// Group type is deprecated and not supported in proto3. However, Proto3 - /// implementations should still be able to parse the group wire format and - /// treat group fields as unknown fields. - Group = 10, - /// Length-delimited aggregate. - Message = 11, - /// New in version 2. - Bytes = 12, - Uint32 = 13, - Enum = 14, - Sfixed32 = 15, - Sfixed64 = 16, - /// Uses ZigZag encoding. - Sint32 = 17, - /// Uses ZigZag encoding. - Sint64 = 18, - } - impl Type { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Type::Double => "TYPE_DOUBLE", - Type::Float => "TYPE_FLOAT", - Type::Int64 => "TYPE_INT64", - Type::Uint64 => "TYPE_UINT64", - Type::Int32 => "TYPE_INT32", - Type::Fixed64 => "TYPE_FIXED64", - Type::Fixed32 => "TYPE_FIXED32", - Type::Bool => "TYPE_BOOL", - Type::String => "TYPE_STRING", - Type::Group => "TYPE_GROUP", - Type::Message => "TYPE_MESSAGE", - Type::Bytes => "TYPE_BYTES", - Type::Uint32 => "TYPE_UINT32", - Type::Enum => "TYPE_ENUM", - Type::Sfixed32 => "TYPE_SFIXED32", - Type::Sfixed64 => "TYPE_SFIXED64", - Type::Sint32 => "TYPE_SINT32", - Type::Sint64 => "TYPE_SINT64", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TYPE_DOUBLE" => Some(Self::Double), - "TYPE_FLOAT" => Some(Self::Float), - "TYPE_INT64" => Some(Self::Int64), - "TYPE_UINT64" => Some(Self::Uint64), - "TYPE_INT32" => Some(Self::Int32), - "TYPE_FIXED64" => Some(Self::Fixed64), - "TYPE_FIXED32" => Some(Self::Fixed32), - "TYPE_BOOL" => Some(Self::Bool), - "TYPE_STRING" => Some(Self::String), - "TYPE_GROUP" => Some(Self::Group), - "TYPE_MESSAGE" => Some(Self::Message), - "TYPE_BYTES" => Some(Self::Bytes), - "TYPE_UINT32" => Some(Self::Uint32), - "TYPE_ENUM" => Some(Self::Enum), - "TYPE_SFIXED32" => Some(Self::Sfixed32), - "TYPE_SFIXED64" => Some(Self::Sfixed64), - "TYPE_SINT32" => Some(Self::Sint32), - "TYPE_SINT64" => Some(Self::Sint64), - _ => None, - } - } - } - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Label { - /// 0 is reserved for errors - Optional = 1, - Required = 2, - Repeated = 3, - } - impl Label { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Label::Optional => "LABEL_OPTIONAL", - Label::Required => "LABEL_REQUIRED", - Label::Repeated => "LABEL_REPEATED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "LABEL_OPTIONAL" => Some(Self::Optional), - "LABEL_REQUIRED" => Some(Self::Required), - "LABEL_REPEATED" => Some(Self::Repeated), - _ => None, - } - } - } -} -/// Describes a oneof. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OneofDescriptorProto { - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, optional, tag = "2")] - pub options: ::core::option::Option, -} -/// Describes an enum type. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnumDescriptorProto { - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, repeated, tag = "2")] - pub value: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "3")] - pub options: ::core::option::Option, - /// Range of reserved numeric values. Reserved numeric values may not be used - /// by enum values in the same enum declaration. Reserved ranges may not - /// overlap. - #[prost(message, repeated, tag = "4")] - pub reserved_range: ::prost::alloc::vec::Vec< - enum_descriptor_proto::EnumReservedRange, - >, - /// Reserved enum value names, which may not be reused. A given name may only - /// be reserved once. - #[prost(string, repeated, tag = "5")] - pub reserved_name: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Nested message and enum types in `EnumDescriptorProto`. -pub mod enum_descriptor_proto { - /// Range of reserved numeric values. Reserved values may not be used by - /// entries in the same enum. Reserved ranges may not overlap. - /// - /// Note that this is distinct from DescriptorProto.ReservedRange in that it - /// is inclusive such that it can appropriately represent the entire int32 - /// domain. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct EnumReservedRange { - /// Inclusive. - #[prost(int32, optional, tag = "1")] - pub start: ::core::option::Option, - /// Inclusive. - #[prost(int32, optional, tag = "2")] - pub end: ::core::option::Option, - } -} -/// Describes a value within an enum. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnumValueDescriptorProto { - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(int32, optional, tag = "2")] - pub number: ::core::option::Option, - #[prost(message, optional, tag = "3")] - pub options: ::core::option::Option, -} -/// Describes a service. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ServiceDescriptorProto { - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, repeated, tag = "2")] - pub method: ::prost::alloc::vec::Vec, - #[prost(message, optional, tag = "3")] - pub options: ::core::option::Option, -} -/// Describes a method of a service. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MethodDescriptorProto { - #[prost(string, optional, tag = "1")] - pub name: ::core::option::Option<::prost::alloc::string::String>, - /// Input and output type names. These are resolved in the same way as - /// FieldDescriptorProto.type_name, but must refer to a message type. - #[prost(string, optional, tag = "2")] - pub input_type: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag = "3")] - pub output_type: ::core::option::Option<::prost::alloc::string::String>, - #[prost(message, optional, tag = "4")] - pub options: ::core::option::Option, - /// Identifies if client streams multiple client messages - #[prost(bool, optional, tag = "5", default = "false")] - pub client_streaming: ::core::option::Option, - /// Identifies if server streams multiple server messages - #[prost(bool, optional, tag = "6", default = "false")] - pub server_streaming: ::core::option::Option, -} -/// Each of the definitions above may have "options" attached. These are -/// just annotations which may cause code to be generated slightly differently -/// or may contain hints for code that manipulates protocol messages. -/// -/// Clients may define custom options as extensions of the \*Options messages. -/// These extensions may not yet be known at parsing time, so the parser cannot -/// store the values in them. Instead it stores them in a field in the \*Options -/// message called uninterpreted_option. This field must have the same name -/// across all \*Options messages. We then use this field to populate the -/// extensions when we build a descriptor, at which point all protos have been -/// parsed and so all extensions are known. -/// -/// Extension numbers for custom options may be chosen as follows: -/// -/// * For options which will only be used within a single application or -/// organization, or for experimental options, use field numbers 50000 -/// through 99999. It is up to you to ensure that you do not use the -/// same number for multiple options. -/// * For options which will be published and used publicly by multiple -/// independent entities, e-mail protobuf-global-extension-registry@google.com -/// to reserve extension numbers. Simply provide your project name (e.g. -/// Objective-C plugin) and your project website (if available) -- there's no -/// need to explain how you intend to use them. Usually you only need one -/// extension number. You can declare multiple options with only one extension -/// number by putting them in a sub-message. See the Custom Options section of -/// the docs for examples: -/// -/// If this turns out to be popular, a web service will be set up -/// to automatically assign option numbers. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FileOptions { - /// Sets the Java package where classes generated from this .proto will be - /// placed. By default, the proto package is used, but this is often - /// inappropriate because proto packages do not normally start with backwards - /// domain names. - #[prost(string, optional, tag = "1")] - pub java_package: ::core::option::Option<::prost::alloc::string::String>, - /// Controls the name of the wrapper Java class generated for the .proto file. - /// That class will always contain the .proto file's getDescriptor() method as - /// well as any top-level extensions defined in the .proto file. - /// If java_multiple_files is disabled, then all the other classes from the - /// .proto file will be nested inside the single wrapper outer class. - #[prost(string, optional, tag = "8")] - pub java_outer_classname: ::core::option::Option<::prost::alloc::string::String>, - /// If enabled, then the Java code generator will generate a separate .java - /// file for each top-level message, enum, and service defined in the .proto - /// file. Thus, these types will *not* be nested inside the wrapper class - /// named by java_outer_classname. However, the wrapper class will still be - /// generated to contain the file's getDescriptor() method as well as any - /// top-level extensions defined in the file. - #[prost(bool, optional, tag = "10", default = "false")] - pub java_multiple_files: ::core::option::Option, - /// This option does nothing. - #[deprecated] - #[prost(bool, optional, tag = "20")] - pub java_generate_equals_and_hash: ::core::option::Option, - /// If set true, then the Java2 code generator will generate code that - /// throws an exception whenever an attempt is made to assign a non-UTF-8 - /// byte sequence to a string field. - /// Message reflection will do the same. - /// However, an extension field still accepts non-UTF-8 byte sequences. - /// This option has no effect on when used with the lite runtime. - #[prost(bool, optional, tag = "27", default = "false")] - pub java_string_check_utf8: ::core::option::Option, - #[prost( - enumeration = "file_options::OptimizeMode", - optional, - tag = "9", - default = "Speed" - )] - pub optimize_for: ::core::option::Option, - /// Sets the Go package where structs generated from this .proto will be - /// placed. If omitted, the Go package will be derived from the following: - /// - /// * The basename of the package import path, if provided. - /// * Otherwise, the package statement in the .proto file, if present. - /// * Otherwise, the basename of the .proto file, without extension. - #[prost(string, optional, tag = "11")] - pub go_package: ::core::option::Option<::prost::alloc::string::String>, - /// Should generic services be generated in each language? "Generic" services - /// are not specific to any particular RPC system. They are generated by the - /// main code generators in each language (without additional plugins). - /// Generic services were the only kind of service generation supported by - /// early versions of google.protobuf. - /// - /// Generic services are now considered deprecated in favor of using plugins - /// that generate code specific to your particular RPC system. Therefore, - /// these default to false. Old code which depends on generic services should - /// explicitly set them to true. - #[prost(bool, optional, tag = "16", default = "false")] - pub cc_generic_services: ::core::option::Option, - #[prost(bool, optional, tag = "17", default = "false")] - pub java_generic_services: ::core::option::Option, - #[prost(bool, optional, tag = "18", default = "false")] - pub py_generic_services: ::core::option::Option, - #[prost(bool, optional, tag = "42", default = "false")] - pub php_generic_services: ::core::option::Option, - /// Is this file deprecated? - /// Depending on the target platform, this can emit Deprecated annotations - /// for everything in the file, or it will be completely ignored; in the very - /// least, this is a formalization for deprecating files. - #[prost(bool, optional, tag = "23", default = "false")] - pub deprecated: ::core::option::Option, - /// Enables the use of arenas for the proto messages in this file. This applies - /// only to generated classes for C++. - #[prost(bool, optional, tag = "31", default = "true")] - pub cc_enable_arenas: ::core::option::Option, - /// Sets the objective c class prefix which is prepended to all objective c - /// generated classes from this .proto. There is no default. - #[prost(string, optional, tag = "36")] - pub objc_class_prefix: ::core::option::Option<::prost::alloc::string::String>, - /// Namespace for generated classes; defaults to the package. - #[prost(string, optional, tag = "37")] - pub csharp_namespace: ::core::option::Option<::prost::alloc::string::String>, - /// By default Swift generators will take the proto package and CamelCase it - /// replacing '.' with underscore and use that to prefix the types/symbols - /// defined. When this options is provided, they will use this value instead - /// to prefix the types/symbols defined. - #[prost(string, optional, tag = "39")] - pub swift_prefix: ::core::option::Option<::prost::alloc::string::String>, - /// Sets the php class prefix which is prepended to all php generated classes - /// from this .proto. Default is empty. - #[prost(string, optional, tag = "40")] - pub php_class_prefix: ::core::option::Option<::prost::alloc::string::String>, - /// Use this option to change the namespace of php generated classes. Default - /// is empty. When this option is empty, the package name will be used for - /// determining the namespace. - #[prost(string, optional, tag = "41")] - pub php_namespace: ::core::option::Option<::prost::alloc::string::String>, - /// Use this option to change the namespace of php generated metadata classes. - /// Default is empty. When this option is empty, the proto file name will be - /// used for determining the namespace. - #[prost(string, optional, tag = "44")] - pub php_metadata_namespace: ::core::option::Option<::prost::alloc::string::String>, - /// Use this option to change the package of ruby generated classes. Default - /// is empty. When this option is not set, the package name will be used for - /// determining the ruby package. - #[prost(string, optional, tag = "45")] - pub ruby_package: ::core::option::Option<::prost::alloc::string::String>, - /// The parser stores options it doesn't recognize here. - /// See the documentation for the "Options" section above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `FileOptions`. -pub mod file_options { - /// Generated classes can be optimized for speed or code size. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum OptimizeMode { - /// Generate complete code for parsing, serialization, - Speed = 1, - /// etc. - /// - /// Use ReflectionOps to implement these methods. - CodeSize = 2, - /// Generate code using MessageLite and the lite runtime. - LiteRuntime = 3, - } - impl OptimizeMode { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - OptimizeMode::Speed => "SPEED", - OptimizeMode::CodeSize => "CODE_SIZE", - OptimizeMode::LiteRuntime => "LITE_RUNTIME", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "SPEED" => Some(Self::Speed), - "CODE_SIZE" => Some(Self::CodeSize), - "LITE_RUNTIME" => Some(Self::LiteRuntime), - _ => None, - } - } - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MessageOptions { - /// Set true to use the old proto1 MessageSet wire format for extensions. - /// This is provided for backwards-compatibility with the MessageSet wire - /// format. You should not use this for any other reason: It's less - /// efficient, has fewer features, and is more complicated. - /// - /// The message must be defined exactly as follows: - /// message Foo { - /// option message_set_wire_format = true; - /// extensions 4 to max; - /// } - /// Note that the message cannot have any defined fields; MessageSets only - /// have extensions. - /// - /// All extensions of your type must be singular messages; e.g. they cannot - /// be int32s, enums, or repeated messages. - /// - /// Because this is an option, the above two restrictions are not enforced by - /// the protocol compiler. - #[prost(bool, optional, tag = "1", default = "false")] - pub message_set_wire_format: ::core::option::Option, - /// Disables the generation of the standard "descriptor()" accessor, which can - /// conflict with a field of the same name. This is meant to make migration - /// from proto1 easier; new code should avoid fields named "descriptor". - #[prost(bool, optional, tag = "2", default = "false")] - pub no_standard_descriptor_accessor: ::core::option::Option, - /// Is this message deprecated? - /// Depending on the target platform, this can emit Deprecated annotations - /// for the message, or it will be completely ignored; in the very least, - /// this is a formalization for deprecating messages. - #[prost(bool, optional, tag = "3", default = "false")] - pub deprecated: ::core::option::Option, - /// Whether the message is an automatically generated map entry type for the - /// maps field. - /// - /// For maps fields: - /// map\ map_field = 1; - /// The parsed descriptor looks like: - /// message MapFieldEntry { - /// option map_entry = true; - /// optional KeyType key = 1; - /// optional ValueType value = 2; - /// } - /// repeated MapFieldEntry map_field = 1; - /// - /// Implementations may choose not to generate the map_entry=true message, but - /// use a native map in the target language to hold the keys and values. - /// The reflection APIs in such implementations still need to work as - /// if the field is a repeated message field. - /// - /// NOTE: Do not set the option in .proto files. Always use the maps syntax - /// instead. The option should only be implicitly set by the proto compiler - /// parser. - #[prost(bool, optional, tag = "7")] - pub map_entry: ::core::option::Option, - /// The parser stores options it doesn't recognize here. See above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FieldOptions { - /// The ctype option instructs the C++ code generator to use a different - /// representation of the field than it normally would. See the specific - /// options below. This option is not yet implemented in the open source - /// release -- sorry, we'll try to include it in a future version! - #[prost( - enumeration = "field_options::CType", - optional, - tag = "1", - default = "String" - )] - pub ctype: ::core::option::Option, - /// The packed option can be enabled for repeated primitive fields to enable - /// a more efficient representation on the wire. Rather than repeatedly - /// writing the tag and type for each element, the entire array is encoded as - /// a single length-delimited blob. In proto3, only explicit setting it to - /// false will avoid using packed encoding. - #[prost(bool, optional, tag = "2")] - pub packed: ::core::option::Option, - /// The jstype option determines the JavaScript type used for values of the - /// field. The option is permitted only for 64 bit integral and fixed types - /// (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - /// is represented as JavaScript string, which avoids loss of precision that - /// can happen when a large value is converted to a floating point JavaScript. - /// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - /// use the JavaScript "number" type. The behavior of the default option - /// JS_NORMAL is implementation dependent. - /// - /// This option is an enum to permit additional types to be added, e.g. - /// goog.math.Integer. - #[prost( - enumeration = "field_options::JsType", - optional, - tag = "6", - default = "JsNormal" - )] - pub jstype: ::core::option::Option, - /// Should this field be parsed lazily? Lazy applies only to message-type - /// fields. It means that when the outer message is initially parsed, the - /// inner message's contents will not be parsed but instead stored in encoded - /// form. The inner message will actually be parsed when it is first accessed. - /// - /// This is only a hint. Implementations are free to choose whether to use - /// eager or lazy parsing regardless of the value of this option. However, - /// setting this option true suggests that the protocol author believes that - /// using lazy parsing on this field is worth the additional bookkeeping - /// overhead typically needed to implement it. - /// - /// This option does not affect the public interface of any generated code; - /// all method signatures remain the same. Furthermore, thread-safety of the - /// interface is not affected by this option; const methods remain safe to - /// call from multiple threads concurrently, while non-const methods continue - /// to require exclusive access. - /// - /// Note that implementations may choose not to check required fields within - /// a lazy sub-message. That is, calling IsInitialized() on the outer message - /// may return true even if the inner message has missing required fields. - /// This is necessary because otherwise the inner message would have to be - /// parsed in order to perform the check, defeating the purpose of lazy - /// parsing. An implementation which chooses not to check required fields - /// must be consistent about it. That is, for any particular sub-message, the - /// implementation must either *always* check its required fields, or *never* - /// check its required fields, regardless of whether or not the message has - /// been parsed. - #[prost(bool, optional, tag = "5", default = "false")] - pub lazy: ::core::option::Option, - /// Is this field deprecated? - /// Depending on the target platform, this can emit Deprecated annotations - /// for accessors, or it will be completely ignored; in the very least, this - /// is a formalization for deprecating fields. - #[prost(bool, optional, tag = "3", default = "false")] - pub deprecated: ::core::option::Option, - /// For Google-internal migration only. Do not use. - #[prost(bool, optional, tag = "10", default = "false")] - pub weak: ::core::option::Option, - /// The parser stores options it doesn't recognize here. See above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `FieldOptions`. -pub mod field_options { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum CType { - /// Default mode. - String = 0, - Cord = 1, - StringPiece = 2, - } - impl CType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - CType::String => "STRING", - CType::Cord => "CORD", - CType::StringPiece => "STRING_PIECE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "STRING" => Some(Self::String), - "CORD" => Some(Self::Cord), - "STRING_PIECE" => Some(Self::StringPiece), - _ => None, - } - } - } - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum JsType { - /// Use the default type. - JsNormal = 0, - /// Use JavaScript strings. - JsString = 1, - /// Use JavaScript numbers. - JsNumber = 2, - } - impl JsType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - JsType::JsNormal => "JS_NORMAL", - JsType::JsString => "JS_STRING", - JsType::JsNumber => "JS_NUMBER", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "JS_NORMAL" => Some(Self::JsNormal), - "JS_STRING" => Some(Self::JsString), - "JS_NUMBER" => Some(Self::JsNumber), - _ => None, - } - } - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OneofOptions { - /// The parser stores options it doesn't recognize here. See above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnumOptions { - /// Set this option to true to allow mapping different tag names to the same - /// value. - #[prost(bool, optional, tag = "2")] - pub allow_alias: ::core::option::Option, - /// Is this enum deprecated? - /// Depending on the target platform, this can emit Deprecated annotations - /// for the enum, or it will be completely ignored; in the very least, this - /// is a formalization for deprecating enums. - #[prost(bool, optional, tag = "3", default = "false")] - pub deprecated: ::core::option::Option, - /// The parser stores options it doesn't recognize here. See above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnumValueOptions { - /// Is this enum value deprecated? - /// Depending on the target platform, this can emit Deprecated annotations - /// for the enum value, or it will be completely ignored; in the very least, - /// this is a formalization for deprecating enum values. - #[prost(bool, optional, tag = "1", default = "false")] - pub deprecated: ::core::option::Option, - /// The parser stores options it doesn't recognize here. See above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ServiceOptions { - /// Is this service deprecated? - /// Depending on the target platform, this can emit Deprecated annotations - /// for the service, or it will be completely ignored; in the very least, - /// this is a formalization for deprecating services. - #[prost(bool, optional, tag = "33", default = "false")] - pub deprecated: ::core::option::Option, - /// The parser stores options it doesn't recognize here. See above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MethodOptions { - /// Is this method deprecated? - /// Depending on the target platform, this can emit Deprecated annotations - /// for the method, or it will be completely ignored; in the very least, - /// this is a formalization for deprecating methods. - #[prost(bool, optional, tag = "33", default = "false")] - pub deprecated: ::core::option::Option, - #[prost( - enumeration = "method_options::IdempotencyLevel", - optional, - tag = "34", - default = "IdempotencyUnknown" - )] - pub idempotency_level: ::core::option::Option, - /// The parser stores options it doesn't recognize here. See above. - #[prost(message, repeated, tag = "999")] - pub uninterpreted_option: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `MethodOptions`. -pub mod method_options { - /// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - /// or neither? HTTP based RPC implementation may choose GET verb for safe - /// methods, and PUT verb for idempotent methods instead of the default POST. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum IdempotencyLevel { - IdempotencyUnknown = 0, - /// implies idempotent - NoSideEffects = 1, - /// idempotent, but may have side effects - Idempotent = 2, - } - impl IdempotencyLevel { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - IdempotencyLevel::IdempotencyUnknown => "IDEMPOTENCY_UNKNOWN", - IdempotencyLevel::NoSideEffects => "NO_SIDE_EFFECTS", - IdempotencyLevel::Idempotent => "IDEMPOTENT", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "IDEMPOTENCY_UNKNOWN" => Some(Self::IdempotencyUnknown), - "NO_SIDE_EFFECTS" => Some(Self::NoSideEffects), - "IDEMPOTENT" => Some(Self::Idempotent), - _ => None, - } - } - } -} -/// A message representing a option the parser does not recognize. This only -/// appears in options protos created by the compiler::Parser class. -/// DescriptorPool resolves these when building Descriptor objects. Therefore, -/// options protos in descriptor objects (e.g. returned by Descriptor::options(), -/// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -/// in them. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UninterpretedOption { - #[prost(message, repeated, tag = "2")] - pub name: ::prost::alloc::vec::Vec, - /// The value of the uninterpreted option, in whatever type the tokenizer - /// identified it as during parsing. Exactly one of these should be set. - #[prost(string, optional, tag = "3")] - pub identifier_value: ::core::option::Option<::prost::alloc::string::String>, - #[prost(uint64, optional, tag = "4")] - pub positive_int_value: ::core::option::Option, - #[prost(int64, optional, tag = "5")] - pub negative_int_value: ::core::option::Option, - #[prost(double, optional, tag = "6")] - pub double_value: ::core::option::Option, - #[prost(bytes = "vec", optional, tag = "7")] - pub string_value: ::core::option::Option<::prost::alloc::vec::Vec>, - #[prost(string, optional, tag = "8")] - pub aggregate_value: ::core::option::Option<::prost::alloc::string::String>, -} -/// Nested message and enum types in `UninterpretedOption`. -pub mod uninterpreted_option { - /// The name of the uninterpreted option. Each string represents a segment in - /// a dot-separated name. is_extension is true iff a segment represents an - /// extension (denoted with parentheses in options specs in .proto files). - /// E.g.,{ \["foo", false\], \["bar.baz", true\], \["qux", false\] } represents - /// "foo.(bar.baz).qux". - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct NamePart { - #[prost(string, required, tag = "1")] - pub name_part: ::prost::alloc::string::String, - #[prost(bool, required, tag = "2")] - pub is_extension: bool, - } -} -/// Encapsulates information about the original source file from which a -/// FileDescriptorProto was generated. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SourceCodeInfo { - /// A Location identifies a piece of source code in a .proto file which - /// corresponds to a particular definition. This information is intended - /// to be useful to IDEs, code indexers, documentation generators, and similar - /// tools. - /// - /// For example, say we have a file like: - /// message Foo { - /// optional string foo = 1; - /// } - /// Let's look at just the field definition: - /// optional string foo = 1; - /// ^ ^^ ^^ ^ ^^^ - /// a bc de f ghi - /// We have the following locations: - /// span path represents - /// \[a,i) \[ 4, 0, 2, 0 \] The whole field definition. - /// \[a,b) \[ 4, 0, 2, 0, 4 \] The label (optional). - /// \[c,d) \[ 4, 0, 2, 0, 5 \] The type (string). - /// \[e,f) \[ 4, 0, 2, 0, 1 \] The name (foo). - /// \[g,h) \[ 4, 0, 2, 0, 3 \] The number (1). - /// - /// Notes: - /// - /// * A location may refer to a repeated field itself (i.e. not to any - /// particular index within it). This is used whenever a set of elements are - /// logically enclosed in a single code segment. For example, an entire - /// extend block (possibly containing multiple extension definitions) will - /// have an outer location whose path refers to the "extensions" repeated - /// field without an index. - /// * Multiple locations may have the same path. This happens when a single - /// logical declaration is spread out across multiple places. The most - /// obvious example is the "extend" block again -- there may be multiple - /// extend blocks in the same scope, each of which will have the same path. - /// * A location's span is not always a subset of its parent's span. For - /// example, the "extendee" of an extension declaration appears at the - /// beginning of the "extend" block and is shared by all extensions within - /// the block. - /// * Just because a location's span is a subset of some other location's span - /// does not mean that it is a descendant. For example, a "group" defines - /// both a type and a field in a single declaration. Thus, the locations - /// corresponding to the type and field and their components will overlap. - /// * Code which tries to interpret locations should probably be designed to - /// ignore those that it doesn't understand, as more types of locations could - /// be recorded in the future. - #[prost(message, repeated, tag = "1")] - pub location: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `SourceCodeInfo`. -pub mod source_code_info { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Location { - /// Identifies which part of the FileDescriptorProto was defined at this - /// location. - /// - /// Each element is a field number or an index. They form a path from - /// the root FileDescriptorProto to the place where the definition. For - /// example, this path: - /// \[ 4, 3, 2, 7, 1 \] - /// refers to: - /// file.message_type(3) // 4, 3 - /// .field(7) // 2, 7 - /// .name() // 1 - /// This is because FileDescriptorProto.message_type has field number 4: - /// repeated DescriptorProto message_type = 4; - /// and DescriptorProto.field has field number 2: - /// repeated FieldDescriptorProto field = 2; - /// and FieldDescriptorProto.name has field number 1: - /// optional string name = 1; - /// - /// Thus, the above path gives the location of a field name. If we removed - /// the last element: - /// \[ 4, 3, 2, 7 \] - /// this path refers to the whole field declaration (from the beginning - /// of the label to the terminating semicolon). - #[prost(int32, repeated, tag = "1")] - pub path: ::prost::alloc::vec::Vec, - /// Always has exactly three or four elements: start line, start column, - /// end line (optional, otherwise assumed same as start line), end column. - /// These are packed into a single field for efficiency. Note that line - /// and column numbers are zero-based -- typically you will want to add - /// 1 to each before displaying to a user. - #[prost(int32, repeated, tag = "2")] - pub span: ::prost::alloc::vec::Vec, - /// If this SourceCodeInfo represents a complete declaration, these are any - /// comments appearing before and after the declaration which appear to be - /// attached to the declaration. - /// - /// A series of line comments appearing on consecutive lines, with no other - /// tokens appearing on those lines, will be treated as a single comment. - /// - /// leading_detached_comments will keep paragraphs of comments that appear - /// before (but not connected to) the current element. Each paragraph, - /// separated by empty lines, will be one comment element in the repeated - /// field. - /// - /// Only the comment content is provided; comment markers (e.g. //) are - /// stripped out. For block comments, leading whitespace and an asterisk - /// will be stripped from the beginning of each line other than the first. - /// Newlines are included in the output. - /// - /// Examples: - /// - /// optional int32 foo = 1; // Comment attached to foo. - /// // Comment attached to bar. - /// optional int32 bar = 2; - /// - /// optional string baz = 3; - /// // Comment attached to baz. - /// // Another line attached to baz. - /// - /// // Comment attached to qux. - /// // - /// // Another line attached to qux. - /// optional double qux = 4; - /// - /// // Detached comment for corge. This is not leading or trailing comments - /// // to qux or corge because there are blank lines separating it from - /// // both. - /// - /// // Detached comment for corge paragraph 2. - /// - /// optional string corge = 5; - /// /\* Block comment attached - /// \* to corge. Leading asterisks - /// \* will be removed. */ - /// /* Block comment attached to - /// \* grault. \*/ - /// optional int32 grault = 6; - /// - /// // ignored detached comments. - #[prost(string, optional, tag = "3")] - pub leading_comments: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, optional, tag = "4")] - pub trailing_comments: ::core::option::Option<::prost::alloc::string::String>, - #[prost(string, repeated, tag = "6")] - pub leading_detached_comments: ::prost::alloc::vec::Vec< - ::prost::alloc::string::String, - >, - } -} -/// Describes the relationship between generated code and its original source -/// file. A GeneratedCodeInfo message is associated with only one generated -/// source file, but may contain references to different source .proto files. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GeneratedCodeInfo { - /// An Annotation connects some span of text in generated code to an element - /// of its generating .proto file. - #[prost(message, repeated, tag = "1")] - pub annotation: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `GeneratedCodeInfo`. -pub mod generated_code_info { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct Annotation { - /// Identifies the element in the original source .proto file. This field - /// is formatted the same as SourceCodeInfo.Location.path. - #[prost(int32, repeated, tag = "1")] - pub path: ::prost::alloc::vec::Vec, - /// Identifies the filesystem path to the original source .proto. - #[prost(string, optional, tag = "2")] - pub source_file: ::core::option::Option<::prost::alloc::string::String>, - /// Identifies the starting offset in bytes in the generated code - /// that relates to the identified object. - #[prost(int32, optional, tag = "3")] - pub begin: ::core::option::Option, - /// Identifies the ending offset in bytes in the generated code that - /// relates to the identified offset. The end offset should be one past - /// the last relevant byte (so the length of the text = end - begin). - #[prost(int32, optional, tag = "4")] - pub end: ::core::option::Option, - } -} -/// `Any` contains an arbitrary serialized protocol buffer message along with a -/// URL that describes the type of the serialized message. -/// -/// Protobuf library provides support to pack/unpack Any values in the form -/// of utility functions or additional generated methods of the Any type. -/// -/// Example 1: Pack and unpack a message in C++. -/// -/// ```text -/// Foo foo = ...; -/// Any any; -/// any.PackFrom(foo); -/// ... -/// if (any.UnpackTo(&foo)) { -/// ... -/// } -/// ``` -/// -/// Example 2: Pack and unpack a message in Java. -/// -/// ```text -/// Foo foo = ...; -/// Any any = Any.pack(foo); -/// ... -/// if (any.is(Foo.class)) { -/// foo = any.unpack(Foo.class); -/// } -/// ``` -/// -/// Example 3: Pack and unpack a message in Python. -/// -/// ```text -/// foo = Foo(...) -/// any = Any() -/// any.Pack(foo) -/// ... -/// if any.Is(Foo.DESCRIPTOR): -/// any.Unpack(foo) -/// ... -/// ``` -/// -/// Example 4: Pack and unpack a message in Go -/// -/// ```text -/// foo := &pb.Foo{...} -/// any, err := anypb.New(foo) -/// if err != nil { -/// ... -/// } -/// ... -/// foo := &pb.Foo{} -/// if err := any.UnmarshalTo(foo); err != nil { -/// ... -/// } -/// ``` -/// -/// The pack methods provided by protobuf library will by default use -/// 'type.googleapis.com/full.type.name' as the type URL and the unpack -/// methods only use the fully qualified type name after the last '/' -/// in the type URL, for example "foo.bar.com/x/y.z" will yield type -/// name "y.z". -/// -/// # JSON -/// -/// The JSON representation of an `Any` value uses the regular -/// representation of the deserialized, embedded message, with an -/// additional field `@type` which contains the type URL. Example: -/// -/// ```text -/// package google.profile; -/// message Person { -/// string first_name = 1; -/// string last_name = 2; -/// } -/// -/// { -/// "@type": "type.googleapis.com/google.profile.Person", -/// "firstName": , -/// "lastName": -/// } -/// ``` -/// -/// If the embedded message type is well-known and has a custom JSON -/// representation, that representation will be embedded adding a field -/// `value` which holds the custom JSON in addition to the `@type` -/// field. Example (for message \[google.protobuf.Duration\]\[\]): -/// -/// ```text -/// { -/// "@type": "type.googleapis.com/google.protobuf.Duration", -/// "value": "1.212s" -/// } -/// ``` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Any { - /// A URL/resource name that uniquely identifies the type of the serialized - /// protocol buffer message. This string must contain at least - /// one "/" character. The last segment of the URL's path must represent - /// the fully qualified name of the type (as in - /// `path/google.protobuf.Duration`). The name should be in a canonical form - /// (e.g., leading "." is not accepted). - /// - /// In practice, teams usually precompile into the binary all types that they - /// expect it to use in the context of Any. However, for URLs which use the - /// scheme `http`, `https`, or no scheme, one can optionally set up a type - /// server that maps type URLs to message definitions as follows: - /// - /// * If no scheme is provided, `https` is assumed. - /// * An HTTP GET on the URL must yield a \[google.protobuf.Type\]\[\] - /// value in binary format, or produce an error. - /// * Applications are allowed to cache lookup results based on the - /// URL, or have them precompiled into a binary to avoid any - /// lookup. Therefore, binary compatibility needs to be preserved - /// on changes to types. (Use versioned type names to manage - /// breaking changes.) - /// - /// Note: this functionality is not currently available in the official - /// protobuf release, and it is not used for type URLs beginning with - /// type.googleapis.com. - /// - /// Schemes other than `http`, `https` (or the empty scheme) might be - /// used with implementation specific semantics. - #[prost(string, tag = "1")] - pub type_url: ::prost::alloc::string::String, - /// Must be a valid serialized protocol buffer of the above specified type. - #[prost(bytes = "vec", tag = "2")] - pub value: ::prost::alloc::vec::Vec, -} -/// `SourceContext` represents information about the source of a -/// protobuf element, like the file in which it is defined. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SourceContext { - /// The path-qualified name of the .proto file that contained the associated - /// protobuf element. For example: `"google/protobuf/source_context.proto"`. - #[prost(string, tag = "1")] - pub file_name: ::prost::alloc::string::String, -} -/// A protocol buffer message type. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Type { - /// The fully qualified message name. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The list of fields. - #[prost(message, repeated, tag = "2")] - pub fields: ::prost::alloc::vec::Vec, - /// The list of types appearing in `oneof` definitions in this type. - #[prost(string, repeated, tag = "3")] - pub oneofs: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The protocol buffer options. - #[prost(message, repeated, tag = "4")] - pub options: ::prost::alloc::vec::Vec