summaryrefslogtreecommitdiff
path: root/vendor/base64
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/base64')
-rw-r--r--vendor/base64/.cargo-checksum.json1
-rw-r--r--vendor/base64/Cargo.lock1515
-rw-r--r--vendor/base64/Cargo.toml85
-rw-r--r--vendor/base64/LICENSE-APACHE201
-rw-r--r--vendor/base64/LICENSE-MIT21
-rw-r--r--vendor/base64/README.md154
-rw-r--r--vendor/base64/RELEASE-NOTES.md271
-rw-r--r--vendor/base64/benches/benchmarks.rs238
-rw-r--r--vendor/base64/clippy.toml1
-rw-r--r--vendor/base64/examples/base64.rs81
-rw-r--r--vendor/base64/icon_CLion.svg34
-rw-r--r--vendor/base64/src/alphabet.rs285
-rw-r--r--vendor/base64/src/chunked_encoder.rs172
-rw-r--r--vendor/base64/src/decode.rs386
-rw-r--r--vendor/base64/src/display.rs88
-rw-r--r--vendor/base64/src/encode.rs492
-rw-r--r--vendor/base64/src/engine/general_purpose/decode.rs357
-rw-r--r--vendor/base64/src/engine/general_purpose/decode_suffix.rs162
-rw-r--r--vendor/base64/src/engine/general_purpose/mod.rs352
-rw-r--r--vendor/base64/src/engine/mod.rs478
-rw-r--r--vendor/base64/src/engine/naive.rs195
-rw-r--r--vendor/base64/src/engine/tests.rs1579
-rw-r--r--vendor/base64/src/lib.rs277
-rw-r--r--vendor/base64/src/prelude.rs20
-rw-r--r--vendor/base64/src/read/decoder.rs335
-rw-r--r--vendor/base64/src/read/decoder_tests.rs487
-rw-r--r--vendor/base64/src/read/mod.rs6
-rw-r--r--vendor/base64/src/tests.rs117
-rw-r--r--vendor/base64/src/write/encoder.rs407
-rw-r--r--vendor/base64/src/write/encoder_string_writer.rs207
-rw-r--r--vendor/base64/src/write/encoder_tests.rs554
-rw-r--r--vendor/base64/src/write/mod.rs11
-rw-r--r--vendor/base64/tests/encode.rs77
-rw-r--r--vendor/base64/tests/tests.rs161
34 files changed, 0 insertions, 9807 deletions
diff --git a/vendor/base64/.cargo-checksum.json b/vendor/base64/.cargo-checksum.json
deleted file mode 100644
index 74114189..00000000
--- a/vendor/base64/.cargo-checksum.json
+++ /dev/null
@@ -1 +0,0 @@
-{"files":{"Cargo.lock":"cee37732975a1ffc1f956d3d05b6edf1baec72841cfabc384a21b02b3bfa0275","Cargo.toml":"52bee6a418e14918d37058fd15fccfd0f417a06fe4f9668b6f97866bf7f991e3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0dd882e53de11566d50f8e8e2d5a651bcf3fabee4987d70f306233cf39094ba7","README.md":"df01f5b4317d601e7de86743f9818aec9196abf9e298f5e47679b7a966ecd945","RELEASE-NOTES.md":"997a5193317a8bff266ecfe4f015ba070b782b6df7d3a1738b9b52584d57f9c6","benches/benchmarks.rs":"cebbcc8649e760e569c6be04f5e727aee2c2568ced7faab580fc0aa0d0426d26","clippy.toml":"b26be4d15ed059985ce6994f11817fd7562046f46e460a0dc64dbb71cfc246d1","examples/base64.rs":"b75ead2199a9b4389c69fe6f1ae988176a263b8fc84e7a4fea1d7e5a41592078","icon_CLion.svg":"cffa044ba75cb998ee3306991dc4a3755ec2f39ab95ddd4b74bc21988389020f","src/alphabet.rs":"5de2beb8fcccb078c61cac2c0477ebbde145122d6c10a0f7ea2e57e8159318e0","src/chunked_encoder.rs":"edfdbb9a4329b80fb2c769ada81e234e00839e0fa85faaa70bacf40ce12e951c","src/decode.rs":"b046a72d62eaac58dc42efcf7848d9d96d022f6594e851cf87074b77ce45c04a","src/display.rs":"31bf3e19274a0b80dd8948a81ea535944f756ef5b88736124c940f5fe1e8c71c","src/encode.rs":"44ddcc162f3fe9817b6e857dda0a3b9197b90a657e5f71c44aacabf5431ccf7d","src/engine/general_purpose/decode.rs":"d865b057e5788e7fefd189cf57ec913df263e6a0742dfa52513f587e14fa1a92","src/engine/general_purpose/decode_suffix.rs":"689688f7bf442b232d3b9f56a1b41c56d9393ace88556a165c224b93dd19b74e","src/engine/general_purpose/mod.rs":"901760a7f5721ec3bafad5fea6251f57de0f767ecb2e1e2fdfe64d661404ec34","src/engine/mod.rs":"5e4a6c0e86417f3b62350264ef383f91e9864390f7c315d786ecd8e9c920ee9f","src/engine/naive.rs":"70de29d909c3fe7918d2965782088b05047b8b6e30d1d2bf11ba073d3f8633ff","src/engine/tests.rs":"2cc8d1431f40f5b9c3ad8970e6fb73bba8be3f2317553dd026539f41908aaa19","src/lib.rs":"c4db7bd31ace78aec2ecd151cef3ad90dfdc76097ba12027bde79d3c82612f7c","src/prelude.rs":"c1587138e5301ac797c5c362cb3638649b33f79c20c16db6f38ad44330540752","src/read/decoder.rs":"00aaa0553a54fcf12762658c4e56663a9705cc30c07af30976291e6f69d78c3d","src/read/decoder_tests.rs":"66ec39bf6e86f21f4db1afd6c5cd63d4a4931ab896b9c38de25d99b803804bbf","src/read/mod.rs":"e0b714eda02d16b1ffa6f78fd09b2f963e01c881b1f7c17b39db4e904be5e746","src/tests.rs":"90cb9f8a1ccb7c4ddc4f8618208e0031fc97e0df0e5aa466d6a5cf45d25967d8","src/write/encoder.rs":"c889c853249220fe2ddaeb77ee6e2ee2945f7db88cd6658ef89ff71b81255ea8","src/write/encoder_string_writer.rs":"0326c9d120369b9bbc35697b5b9b141bed24283374c93d5af1052eb042e47799","src/write/encoder_tests.rs":"28695a485b17cf5db73656aae5d90127f726e02c6d70efd83e5ab53a4cc17b38","src/write/mod.rs":"73cd98dadc9d712b3fefd9449d97e825e097397441b90588e0051e4d3b0911b9","tests/encode.rs":"5309f4538b1df611436f7bfba7409c725161b6f841b1bbf8d9890ae185de7d88","tests/tests.rs":"78efcf0dc4bb6ae52f7a91fcad89e44e4dce578224c36b4e6c1c306459be8500"},"package":"72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"} \ No newline at end of file
diff --git a/vendor/base64/Cargo.lock b/vendor/base64/Cargo.lock
deleted file mode 100644
index 84e188d1..00000000
--- a/vendor/base64/Cargo.lock
+++ /dev/null
@@ -1,1515 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-version = 3
-
-[[package]]
-name = "anes"
-version = "0.1.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
-
-[[package]]
-name = "async-attributes"
-version = "1.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5"
-dependencies = [
- "quote",
- "syn 1.0.109",
-]
-
-[[package]]
-name = "async-channel"
-version = "1.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35"
-dependencies = [
- "concurrent-queue",
- "event-listener 2.5.3",
- "futures-core",
-]
-
-[[package]]
-name = "async-channel"
-version = "2.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3"
-dependencies = [
- "concurrent-queue",
- "event-listener 5.2.0",
- "event-listener-strategy 0.5.0",
- "futures-core",
- "pin-project-lite",
-]
-
-[[package]]
-name = "async-executor"
-version = "1.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "17ae5ebefcc48e7452b4987947920dac9450be1110cadf34d1b8c116bdbaf97c"
-dependencies = [
- "async-lock 3.3.0",
- "async-task",
- "concurrent-queue",
- "fastrand 2.0.1",
- "futures-lite 2.2.0",
- "slab",
-]
-
-[[package]]
-name = "async-global-executor"
-version = "2.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c"
-dependencies = [
- "async-channel 2.2.0",
- "async-executor",
- "async-io 2.3.1",
- "async-lock 3.3.0",
- "blocking",
- "futures-lite 2.2.0",
- "once_cell",
-]
-
-[[package]]
-name = "async-io"
-version = "1.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af"
-dependencies = [
- "async-lock 2.8.0",
- "autocfg",
- "cfg-if",
- "concurrent-queue",
- "futures-lite 1.13.0",
- "log",
- "parking",
- "polling 2.8.0",
- "rustix 0.37.27",
- "slab",
- "socket2",
- "waker-fn",
-]
-
-[[package]]
-name = "async-io"
-version = "2.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65"
-dependencies = [
- "async-lock 3.3.0",
- "cfg-if",
- "concurrent-queue",
- "futures-io",
- "futures-lite 2.2.0",
- "parking",
- "polling 3.4.0",
- "rustix 0.38.9",
- "slab",
- "tracing",
- "windows-sys 0.52.0",
-]
-
-[[package]]
-name = "async-lock"
-version = "2.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b"
-dependencies = [
- "event-listener 2.5.3",
-]
-
-[[package]]
-name = "async-lock"
-version = "3.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b"
-dependencies = [
- "event-listener 4.0.3",
- "event-listener-strategy 0.4.0",
- "pin-project-lite",
-]
-
-[[package]]
-name = "async-std"
-version = "1.12.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d"
-dependencies = [
- "async-attributes",
- "async-channel 1.9.0",
- "async-global-executor",
- "async-io 1.13.0",
- "async-lock 2.8.0",
- "crossbeam-utils",
- "futures-channel",
- "futures-core",
- "futures-io",
- "futures-lite 1.13.0",
- "gloo-timers",
- "kv-log-macro",
- "log",
- "memchr",
- "once_cell",
- "pin-project-lite",
- "pin-utils",
- "slab",
- "wasm-bindgen-futures",
-]
-
-[[package]]
-name = "async-task"
-version = "4.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fbb36e985947064623dbd357f727af08ffd077f93d696782f3c56365fa2e2799"
-
-[[package]]
-name = "atomic-waker"
-version = "1.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
-
-[[package]]
-name = "atty"
-version = "0.2.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
-dependencies = [
- "hermit-abi 0.1.19",
- "libc",
- "winapi",
-]
-
-[[package]]
-name = "autocfg"
-version = "1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
-
-[[package]]
-name = "base64"
-version = "0.22.1"
-dependencies = [
- "clap",
- "criterion",
- "once_cell",
- "rand",
- "rstest",
- "rstest_reuse",
- "strum",
-]
-
-[[package]]
-name = "bitflags"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
-
-[[package]]
-name = "bitflags"
-version = "2.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf"
-
-[[package]]
-name = "blocking"
-version = "1.5.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6a37913e8dc4ddcc604f0c6d3bf2887c995153af3611de9e23c352b44c1b9118"
-dependencies = [
- "async-channel 2.2.0",
- "async-lock 3.3.0",
- "async-task",
- "fastrand 2.0.1",
- "futures-io",
- "futures-lite 2.2.0",
- "piper",
- "tracing",
-]
-
-[[package]]
-name = "bumpalo"
-version = "3.15.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b"
-
-[[package]]
-name = "cast"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
-
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
-[[package]]
-name = "ciborium"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
-dependencies = [
- "ciborium-io",
- "ciborium-ll",
- "serde",
-]
-
-[[package]]
-name = "ciborium-io"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
-
-[[package]]
-name = "ciborium-ll"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
-dependencies = [
- "ciborium-io",
- "half",
-]
-
-[[package]]
-name = "clap"
-version = "3.2.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4ea181bf566f71cb9a5d17a59e1871af638180a18fb0035c92ae62b705207123"
-dependencies = [
- "atty",
- "bitflags 1.3.2",
- "clap_derive",
- "clap_lex",
- "indexmap",
- "once_cell",
- "strsim",
- "termcolor",
- "textwrap",
-]
-
-[[package]]
-name = "clap_derive"
-version = "3.2.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ae6371b8bdc8b7d3959e9cf7b22d4435ef3e79e138688421ec654acf8c81b008"
-dependencies = [
- "heck",
- "proc-macro-error",
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
-[[package]]
-name = "clap_lex"
-version = "0.2.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
-dependencies = [
- "os_str_bytes",
-]
-
-[[package]]
-name = "concurrent-queue"
-version = "2.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363"
-dependencies = [
- "crossbeam-utils",
-]
-
-[[package]]
-name = "criterion"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb"
-dependencies = [
- "anes",
- "atty",
- "cast",
- "ciborium",
- "clap",
- "criterion-plot",
- "itertools",
- "lazy_static",
- "num-traits",
- "oorandom",
- "plotters",
- "rayon",
- "regex",
- "serde",
- "serde_derive",
- "serde_json",
- "tinytemplate",
- "walkdir",
-]
-
-[[package]]
-name = "criterion-plot"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
-dependencies = [
- "cast",
- "itertools",
-]
-
-[[package]]
-name = "crossbeam-deque"
-version = "0.8.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d"
-dependencies = [
- "crossbeam-epoch",
- "crossbeam-utils",
-]
-
-[[package]]
-name = "crossbeam-epoch"
-version = "0.9.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
-dependencies = [
- "crossbeam-utils",
-]
-
-[[package]]
-name = "crossbeam-utils"
-version = "0.8.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345"
-
-[[package]]
-name = "crunchy"
-version = "0.2.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
-
-[[package]]
-name = "ctor"
-version = "0.1.26"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096"
-dependencies = [
- "quote",
- "syn 1.0.109",
-]
-
-[[package]]
-name = "either"
-version = "1.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a"
-
-[[package]]
-name = "errno"
-version = "0.3.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245"
-dependencies = [
- "libc",
- "windows-sys 0.52.0",
-]
-
-[[package]]
-name = "event-listener"
-version = "2.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0"
-
-[[package]]
-name = "event-listener"
-version = "4.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e"
-dependencies = [
- "concurrent-queue",
- "parking",
- "pin-project-lite",
-]
-
-[[package]]
-name = "event-listener"
-version = "5.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91"
-dependencies = [
- "concurrent-queue",
- "parking",
- "pin-project-lite",
-]
-
-[[package]]
-name = "event-listener-strategy"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3"
-dependencies = [
- "event-listener 4.0.3",
- "pin-project-lite",
-]
-
-[[package]]
-name = "event-listener-strategy"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291"
-dependencies = [
- "event-listener 5.2.0",
- "pin-project-lite",
-]
-
-[[package]]
-name = "fastrand"
-version = "1.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be"
-dependencies = [
- "instant",
-]
-
-[[package]]
-name = "fastrand"
-version = "2.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5"
-
-[[package]]
-name = "futures"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0"
-dependencies = [
- "futures-channel",
- "futures-core",
- "futures-executor",
- "futures-io",
- "futures-sink",
- "futures-task",
- "futures-util",
-]
-
-[[package]]
-name = "futures-channel"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78"
-dependencies = [
- "futures-core",
- "futures-sink",
-]
-
-[[package]]
-name = "futures-core"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
-
-[[package]]
-name = "futures-executor"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d"
-dependencies = [
- "futures-core",
- "futures-task",
- "futures-util",
-]
-
-[[package]]
-name = "futures-io"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1"
-
-[[package]]
-name = "futures-lite"
-version = "1.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce"
-dependencies = [
- "fastrand 1.9.0",
- "futures-core",
- "futures-io",
- "memchr",
- "parking",
- "pin-project-lite",
- "waker-fn",
-]
-
-[[package]]
-name = "futures-lite"
-version = "2.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba"
-dependencies = [
- "fastrand 2.0.1",
- "futures-core",
- "futures-io",
- "parking",
- "pin-project-lite",
-]
-
-[[package]]
-name = "futures-macro"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.52",
-]
-
-[[package]]
-name = "futures-sink"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5"
-
-[[package]]
-name = "futures-task"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
-
-[[package]]
-name = "futures-timer"
-version = "3.0.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24"
-
-[[package]]
-name = "futures-util"
-version = "0.3.30"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
-dependencies = [
- "futures-channel",
- "futures-core",
- "futures-io",
- "futures-macro",
- "futures-sink",
- "futures-task",
- "memchr",
- "pin-project-lite",
- "pin-utils",
- "slab",
-]
-
-[[package]]
-name = "getrandom"
-version = "0.2.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5"
-dependencies = [
- "cfg-if",
- "libc",
- "wasi",
-]
-
-[[package]]
-name = "gloo-timers"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c"
-dependencies = [
- "futures-channel",
- "futures-core",
- "js-sys",
- "wasm-bindgen",
-]
-
-[[package]]
-name = "half"
-version = "2.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b5eceaaeec696539ddaf7b333340f1af35a5aa87ae3e4f3ead0532f72affab2e"
-dependencies = [
- "cfg-if",
- "crunchy",
-]
-
-[[package]]
-name = "hashbrown"
-version = "0.12.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
-
-[[package]]
-name = "heck"
-version = "0.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
-
-[[package]]
-name = "hermit-abi"
-version = "0.1.19"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "hermit-abi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024"
-
-[[package]]
-name = "indexmap"
-version = "1.9.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"
-dependencies = [
- "autocfg",
- "hashbrown",
-]
-
-[[package]]
-name = "instant"
-version = "0.1.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
-name = "io-lifetimes"
-version = "1.0.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2"
-dependencies = [
- "hermit-abi 0.3.9",
- "libc",
- "windows-sys 0.48.0",
-]
-
-[[package]]
-name = "itertools"
-version = "0.10.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
-dependencies = [
- "either",
-]
-
-[[package]]
-name = "itoa"
-version = "1.0.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c"
-
-[[package]]
-name = "js-sys"
-version = "0.3.68"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee"
-dependencies = [
- "wasm-bindgen",
-]
-
-[[package]]
-name = "kv-log-macro"
-version = "1.0.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f"
-dependencies = [
- "log",
-]
-
-[[package]]
-name = "lazy_static"
-version = "1.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
-
-[[package]]
-name = "libc"
-version = "0.2.153"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd"
-
-[[package]]
-name = "linux-raw-sys"
-version = "0.3.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519"
-
-[[package]]
-name = "linux-raw-sys"
-version = "0.4.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c"
-
-[[package]]
-name = "log"
-version = "0.4.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
-dependencies = [
- "cfg-if",
- "value-bag",
-]
-
-[[package]]
-name = "memchr"
-version = "2.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149"
-
-[[package]]
-name = "num-traits"
-version = "0.2.18"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a"
-dependencies = [
- "autocfg",
-]
-
-[[package]]
-name = "once_cell"
-version = "1.19.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
-
-[[package]]
-name = "oorandom"
-version = "11.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575"
-
-[[package]]
-name = "os_str_bytes"
-version = "6.6.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1"
-
-[[package]]
-name = "parking"
-version = "2.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae"
-
-[[package]]
-name = "pin-project-lite"
-version = "0.2.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
-
-[[package]]
-name = "pin-utils"
-version = "0.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
-
-[[package]]
-name = "piper"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "668d31b1c4eba19242f2088b2bf3316b82ca31082a8335764db4e083db7485d4"
-dependencies = [
- "atomic-waker",
- "fastrand 2.0.1",
- "futures-io",
-]
-
-[[package]]
-name = "plotters"
-version = "0.3.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45"
-dependencies = [
- "num-traits",
- "plotters-backend",
- "plotters-svg",
- "wasm-bindgen",
- "web-sys",
-]
-
-[[package]]
-name = "plotters-backend"
-version = "0.3.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609"
-
-[[package]]
-name = "plotters-svg"
-version = "0.3.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab"
-dependencies = [
- "plotters-backend",
-]
-
-[[package]]
-name = "polling"
-version = "2.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce"
-dependencies = [
- "autocfg",
- "bitflags 1.3.2",
- "cfg-if",
- "concurrent-queue",
- "libc",
- "log",
- "pin-project-lite",
- "windows-sys 0.48.0",
-]
-
-[[package]]
-name = "polling"
-version = "3.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14"
-dependencies = [
- "cfg-if",
- "concurrent-queue",
- "pin-project-lite",
- "rustix 0.38.9",
- "tracing",
- "windows-sys 0.52.0",
-]
-
-[[package]]
-name = "ppv-lite86"
-version = "0.2.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
-
-[[package]]
-name = "proc-macro-error"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
-dependencies = [
- "proc-macro-error-attr",
- "proc-macro2",
- "quote",
- "syn 1.0.109",
- "version_check",
-]
-
-[[package]]
-name = "proc-macro-error-attr"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
-dependencies = [
- "proc-macro2",
- "quote",
- "version_check",
-]
-
-[[package]]
-name = "proc-macro2"
-version = "1.0.78"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae"
-dependencies = [
- "unicode-ident",
-]
-
-[[package]]
-name = "quote"
-version = "1.0.35"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
-dependencies = [
- "proc-macro2",
-]
-
-[[package]]
-name = "rand"
-version = "0.8.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404"
-dependencies = [
- "libc",
- "rand_chacha",
- "rand_core",
-]
-
-[[package]]
-name = "rand_chacha"
-version = "0.3.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
-dependencies = [
- "ppv-lite86",
- "rand_core",
-]
-
-[[package]]
-name = "rand_core"
-version = "0.6.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
-dependencies = [
- "getrandom",
-]
-
-[[package]]
-name = "rayon"
-version = "1.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd"
-dependencies = [
- "either",
- "rayon-core",
-]
-
-[[package]]
-name = "rayon-core"
-version = "1.12.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2"
-dependencies = [
- "crossbeam-deque",
- "crossbeam-utils",
-]
-
-[[package]]
-name = "regex"
-version = "1.8.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
-dependencies = [
- "regex-syntax",
-]
-
-[[package]]
-name = "regex-syntax"
-version = "0.7.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da"
-
-[[package]]
-name = "rstest"
-version = "0.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b939295f93cb1d12bc1a83cf9ee963199b133fb8a79832dd51b68bb9f59a04dc"
-dependencies = [
- "async-std",
- "futures",
- "futures-timer",
- "rstest_macros",
- "rustc_version",
-]
-
-[[package]]
-name = "rstest_macros"
-version = "0.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f78aba848123782ba59340928ec7d876ebe745aa0365d6af8a630f19a5c16116"
-dependencies = [
- "cfg-if",
- "proc-macro2",
- "quote",
- "rustc_version",
- "syn 1.0.109",
-]
-
-[[package]]
-name = "rstest_reuse"
-version = "0.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "88530b681abe67924d42cca181d070e3ac20e0740569441a9e35a7cedd2b34a4"
-dependencies = [
- "quote",
- "rand",
- "rustc_version",
- "syn 2.0.52",
-]
-
-[[package]]
-name = "rustc_version"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
-dependencies = [
- "semver",
-]
-
-[[package]]
-name = "rustix"
-version = "0.37.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2"
-dependencies = [
- "bitflags 1.3.2",
- "errno",
- "io-lifetimes",
- "libc",
- "linux-raw-sys 0.3.8",
- "windows-sys 0.48.0",
-]
-
-[[package]]
-name = "rustix"
-version = "0.38.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9bfe0f2582b4931a45d1fa608f8a8722e8b3c7ac54dd6d5f3b3212791fedef49"
-dependencies = [
- "bitflags 2.4.2",
- "errno",
- "libc",
- "linux-raw-sys 0.4.13",
- "windows-sys 0.48.0",
-]
-
-[[package]]
-name = "rustversion"
-version = "1.0.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4"
-
-[[package]]
-name = "ryu"
-version = "1.0.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1"
-
-[[package]]
-name = "same-file"
-version = "1.0.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
-dependencies = [
- "winapi-util",
-]
-
-[[package]]
-name = "semver"
-version = "1.0.22"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca"
-
-[[package]]
-name = "serde"
-version = "1.0.197"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"
-dependencies = [
- "serde_derive",
-]
-
-[[package]]
-name = "serde_derive"
-version = "1.0.197"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.52",
-]
-
-[[package]]
-name = "serde_json"
-version = "1.0.114"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0"
-dependencies = [
- "itoa",
- "ryu",
- "serde",
-]
-
-[[package]]
-name = "slab"
-version = "0.4.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67"
-dependencies = [
- "autocfg",
-]
-
-[[package]]
-name = "socket2"
-version = "0.4.10"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d"
-dependencies = [
- "libc",
- "winapi",
-]
-
-[[package]]
-name = "strsim"
-version = "0.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
-
-[[package]]
-name = "strum"
-version = "0.25.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125"
-dependencies = [
- "strum_macros",
-]
-
-[[package]]
-name = "strum_macros"
-version = "0.25.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0"
-dependencies = [
- "heck",
- "proc-macro2",
- "quote",
- "rustversion",
- "syn 2.0.52",
-]
-
-[[package]]
-name = "syn"
-version = "1.0.109"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "syn"
-version = "2.0.52"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
-[[package]]
-name = "termcolor"
-version = "1.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
-dependencies = [
- "winapi-util",
-]
-
-[[package]]
-name = "textwrap"
-version = "0.16.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9"
-
-[[package]]
-name = "tinytemplate"
-version = "1.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
-dependencies = [
- "serde",
- "serde_json",
-]
-
-[[package]]
-name = "tracing"
-version = "0.1.40"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
-dependencies = [
- "pin-project-lite",
- "tracing-core",
-]
-
-[[package]]
-name = "tracing-core"
-version = "0.1.32"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
-
-[[package]]
-name = "unicode-ident"
-version = "1.0.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
-
-[[package]]
-name = "value-bag"
-version = "1.0.0-alpha.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2209b78d1249f7e6f3293657c9779fe31ced465df091bbd433a1cf88e916ec55"
-dependencies = [
- "ctor",
- "version_check",
-]
-
-[[package]]
-name = "version_check"
-version = "0.9.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
-
-[[package]]
-name = "waker-fn"
-version = "1.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690"
-
-[[package]]
-name = "walkdir"
-version = "2.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
-dependencies = [
- "same-file",
- "winapi-util",
-]
-
-[[package]]
-name = "wasi"
-version = "0.11.0+wasi-snapshot-preview1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
-
-[[package]]
-name = "wasm-bindgen"
-version = "0.2.91"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f"
-dependencies = [
- "cfg-if",
- "wasm-bindgen-macro",
-]
-
-[[package]]
-name = "wasm-bindgen-backend"
-version = "0.2.91"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b"
-dependencies = [
- "bumpalo",
- "log",
- "once_cell",
- "proc-macro2",
- "quote",
- "syn 2.0.52",
- "wasm-bindgen-shared",
-]
-
-[[package]]
-name = "wasm-bindgen-futures"
-version = "0.4.41"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97"
-dependencies = [
- "cfg-if",
- "js-sys",
- "wasm-bindgen",
- "web-sys",
-]
-
-[[package]]
-name = "wasm-bindgen-macro"
-version = "0.2.91"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed"
-dependencies = [
- "quote",
- "wasm-bindgen-macro-support",
-]
-
-[[package]]
-name = "wasm-bindgen-macro-support"
-version = "0.2.91"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.52",
- "wasm-bindgen-backend",
- "wasm-bindgen-shared",
-]
-
-[[package]]
-name = "wasm-bindgen-shared"
-version = "0.2.91"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838"
-
-[[package]]
-name = "web-sys"
-version = "0.3.68"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446"
-dependencies = [
- "js-sys",
- "wasm-bindgen",
-]
-
-[[package]]
-name = "winapi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-dependencies = [
- "winapi-i686-pc-windows-gnu",
- "winapi-x86_64-pc-windows-gnu",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-
-[[package]]
-name = "winapi-util"
-version = "0.1.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
-dependencies = [
- "winapi",
-]
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
-
-[[package]]
-name = "windows-sys"
-version = "0.48.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
-dependencies = [
- "windows-targets 0.48.5",
-]
-
-[[package]]
-name = "windows-sys"
-version = "0.52.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
-dependencies = [
- "windows-targets 0.52.4",
-]
-
-[[package]]
-name = "windows-targets"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
-dependencies = [
- "windows_aarch64_gnullvm 0.48.5",
- "windows_aarch64_msvc 0.48.5",
- "windows_i686_gnu 0.48.5",
- "windows_i686_msvc 0.48.5",
- "windows_x86_64_gnu 0.48.5",
- "windows_x86_64_gnullvm 0.48.5",
- "windows_x86_64_msvc 0.48.5",
-]
-
-[[package]]
-name = "windows-targets"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b"
-dependencies = [
- "windows_aarch64_gnullvm 0.52.4",
- "windows_aarch64_msvc 0.52.4",
- "windows_i686_gnu 0.52.4",
- "windows_i686_msvc 0.52.4",
- "windows_x86_64_gnu 0.52.4",
- "windows_x86_64_gnullvm 0.52.4",
- "windows_x86_64_msvc 0.52.4",
-]
-
-[[package]]
-name = "windows_aarch64_gnullvm"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
-
-[[package]]
-name = "windows_aarch64_gnullvm"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9"
-
-[[package]]
-name = "windows_aarch64_msvc"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
-
-[[package]]
-name = "windows_aarch64_msvc"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675"
-
-[[package]]
-name = "windows_i686_gnu"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
-
-[[package]]
-name = "windows_i686_gnu"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3"
-
-[[package]]
-name = "windows_i686_msvc"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
-
-[[package]]
-name = "windows_i686_msvc"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02"
-
-[[package]]
-name = "windows_x86_64_gnu"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
-
-[[package]]
-name = "windows_x86_64_gnu"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03"
-
-[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
-
-[[package]]
-name = "windows_x86_64_gnullvm"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177"
-
-[[package]]
-name = "windows_x86_64_msvc"
-version = "0.48.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
-
-[[package]]
-name = "windows_x86_64_msvc"
-version = "0.52.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8"
diff --git a/vendor/base64/Cargo.toml b/vendor/base64/Cargo.toml
deleted file mode 100644
index e1b35fc4..00000000
--- a/vendor/base64/Cargo.toml
+++ /dev/null
@@ -1,85 +0,0 @@
-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
-#
-# When uploading crates to the registry Cargo will automatically
-# "normalize" Cargo.toml files for maximal compatibility
-# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies.
-#
-# If you are reading this file be aware that the original Cargo.toml
-# will likely look very different (and much more reasonable).
-# See Cargo.toml.orig for the original contents.
-
-[package]
-edition = "2018"
-rust-version = "1.48.0"
-name = "base64"
-version = "0.22.1"
-authors = ["Marshall Pierce <marshall@mpierce.org>"]
-description = "encodes and decodes base64 as bytes or utf8"
-documentation = "https://docs.rs/base64"
-readme = "README.md"
-keywords = [
- "base64",
- "utf8",
- "encode",
- "decode",
- "no_std",
-]
-categories = ["encoding"]
-license = "MIT OR Apache-2.0"
-repository = "https://github.com/marshallpierce/rust-base64"
-
-[package.metadata.docs.rs]
-rustdoc-args = ["--generate-link-to-definition"]
-
-[profile.bench]
-debug = 2
-
-[profile.test]
-opt-level = 3
-
-[[example]]
-name = "base64"
-required-features = ["std"]
-
-[[test]]
-name = "tests"
-required-features = ["alloc"]
-
-[[test]]
-name = "encode"
-required-features = ["alloc"]
-
-[[bench]]
-name = "benchmarks"
-harness = false
-required-features = ["std"]
-
-[dev-dependencies.clap]
-version = "3.2.25"
-features = ["derive"]
-
-[dev-dependencies.criterion]
-version = "0.4.0"
-
-[dev-dependencies.once_cell]
-version = "1"
-
-[dev-dependencies.rand]
-version = "0.8.5"
-features = ["small_rng"]
-
-[dev-dependencies.rstest]
-version = "0.13.0"
-
-[dev-dependencies.rstest_reuse]
-version = "0.6.0"
-
-[dev-dependencies.strum]
-version = "0.25"
-features = ["derive"]
-
-[features]
-alloc = []
-default = ["std"]
-std = ["alloc"]
diff --git a/vendor/base64/LICENSE-APACHE b/vendor/base64/LICENSE-APACHE
deleted file mode 100644
index 16fe87b0..00000000
--- a/vendor/base64/LICENSE-APACHE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
-2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
-3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
-4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
-5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
-6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
-8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
-Copyright [yyyy] [name of copyright owner]
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/vendor/base64/LICENSE-MIT b/vendor/base64/LICENSE-MIT
deleted file mode 100644
index 7bc10f80..00000000
--- a/vendor/base64/LICENSE-MIT
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Alice Maz
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/base64/README.md b/vendor/base64/README.md
deleted file mode 100644
index f566756d..00000000
--- a/vendor/base64/README.md
+++ /dev/null
@@ -1,154 +0,0 @@
-# [base64](https://crates.io/crates/base64)
-
-[![](https://img.shields.io/crates/v/base64.svg)](https://crates.io/crates/base64) [![Docs](https://docs.rs/base64/badge.svg)](https://docs.rs/base64) [![CircleCI](https://circleci.com/gh/marshallpierce/rust-base64/tree/master.svg?style=shield)](https://circleci.com/gh/marshallpierce/rust-base64/tree/master) [![codecov](https://codecov.io/gh/marshallpierce/rust-base64/branch/master/graph/badge.svg)](https://codecov.io/gh/marshallpierce/rust-base64) [![unsafe forbidden](https://img.shields.io/badge/unsafe-forbidden-success.svg)](https://github.com/rust-secure-code/safety-dance/)
-
-<a href="https://www.jetbrains.com/?from=rust-base64"><img src="/icon_CLion.svg" height="40px"/></a>
-
-Made with CLion. Thanks to JetBrains for supporting open source!
-
-It's base64. What more could anyone want?
-
-This library's goals are to be *correct* and *fast*. It's thoroughly tested and widely used. It exposes functionality at
-multiple levels of abstraction so you can choose the level of convenience vs performance that you want,
-e.g. `decode_engine_slice` decodes into an existing `&mut [u8]` and is pretty fast (2.6GiB/s for a 3 KiB input),
-whereas `decode_engine` allocates a new `Vec<u8>` and returns it, which might be more convenient in some cases, but is
-slower (although still fast enough for almost any purpose) at 2.1 GiB/s.
-
-See the [docs](https://docs.rs/base64) for all the details.
-
-## FAQ
-
-### I need to decode base64 with whitespace/null bytes/other random things interspersed in it. What should I do?
-
-Remove non-base64 characters from your input before decoding.
-
-If you have a `Vec` of base64, [retain](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.retain) can be used to
-strip out whatever you need removed.
-
-If you have a `Read` (e.g. reading a file or network socket), there are various approaches.
-
-- Use [iter_read](https://crates.io/crates/iter-read) together with `Read`'s `bytes()` to filter out unwanted bytes.
-- Implement `Read` with a `read()` impl that delegates to your actual `Read`, and then drops any bytes you don't want.
-
-### I need to line-wrap base64, e.g. for MIME/PEM.
-
-[line-wrap](https://crates.io/crates/line-wrap) does just that.
-
-### I want canonical base64 encoding/decoding.
-
-First, don't do this. You should no more expect Base64 to be canonical than you should expect compression algorithms to
-produce canonical output across all usage in the wild (hint: they don't).
-However, [people are drawn to their own destruction like moths to a flame](https://eprint.iacr.org/2022/361), so here we
-are.
-
-There are two opportunities for non-canonical encoding (and thus, detection of the same during decoding): the final bits
-of the last encoded token in two or three token suffixes, and the `=` token used to inflate the suffix to a full four
-tokens.
-
-The trailing bits issue is unavoidable: with 6 bits available in each encoded token, 1 input byte takes 2 tokens,
-with the second one having some bits unused. Same for two input bytes: 16 bits, but 3 tokens have 18 bits. Unless we
-decide to stop shipping whole bytes around, we're stuck with those extra bits that a sneaky or buggy encoder might set
-to 1 instead of 0.
-
-The `=` pad bytes, on the other hand, are entirely a self-own by the Base64 standard. They do not affect decoding other
-than to provide an opportunity to say "that padding is incorrect". Exabytes of storage and transfer have no doubt been
-wasted on pointless `=` bytes. Somehow we all seem to be quite comfortable with, say, hex-encoded data just stopping
-when it's done rather than requiring a confirmation that the author of the encoder could count to four. Anyway, there
-are two ways to make pad bytes predictable: require canonical padding to the next multiple of four bytes as per the RFC,
-or, if you control all producers and consumers, save a few bytes by requiring no padding (especially applicable to the
-url-safe alphabet).
-
-All `Engine` implementations must at a minimum support treating non-canonical padding of both types as an error, and
-optionally may allow other behaviors.
-
-## Rust version compatibility
-
-The minimum supported Rust version is 1.48.0.
-
-# Contributing
-
-Contributions are very welcome. However, because this library is used widely, and in security-sensitive contexts, all
-PRs will be carefully scrutinized. Beyond that, this sort of low level library simply needs to be 100% correct. Nobody
-wants to chase bugs in encoding of any sort.
-
-All this means that it takes me a fair amount of time to review each PR, so it might take quite a while to carve out the
-free time to give each PR the attention it deserves. I will get to everyone eventually!
-
-## Developing
-
-Benchmarks are in `benches/`.
-
-```bash
-cargo bench
-```
-
-## no_std
-
-This crate supports no_std. By default the crate targets std via the `std` feature. You can deactivate
-the `default-features` to target `core` instead. In that case you lose out on all the functionality revolving
-around `std::io`, `std::error::Error`, and heap allocations. There is an additional `alloc` feature that you can activate
-to bring back the support for heap allocations.
-
-## Profiling
-
-On Linux, you can use [perf](https://perf.wiki.kernel.org/index.php/Main_Page) for profiling. Then compile the
-benchmarks with `cargo bench --no-run`.
-
-Run the benchmark binary with `perf` (shown here filtering to one particular benchmark, which will make the results
-easier to read). `perf` is only available to the root user on most systems as it fiddles with event counters in your
-CPU, so use `sudo`. We need to run the actual benchmark binary, hence the path into `target`. You can see the actual
-full path with `cargo bench -v`; it will print out the commands it runs. If you use the exact path
-that `bench` outputs, make sure you get the one that's for the benchmarks, not the tests. You may also want
-to `cargo clean` so you have only one `benchmarks-` binary (they tend to accumulate).
-
-```bash
-sudo perf record target/release/deps/benchmarks-* --bench decode_10mib_reuse
-```
-
-Then analyze the results, again with perf:
-
-```bash
-sudo perf annotate -l
-```
-
-You'll see a bunch of interleaved rust source and assembly like this. The section with `lib.rs:327` is telling us that
-4.02% of samples saw the `movzbl` aka bit shift as the active instruction. However, this percentage is not as exact as
-it seems due to a phenomenon called *skid*. Basically, a consequence of how fancy modern CPUs are is that this sort of
-instruction profiling is inherently inaccurate, especially in branch-heavy code.
-
-```text
- lib.rs:322 0.70 : 10698: mov %rdi,%rax
- 2.82 : 1069b: shr $0x38,%rax
- : if morsel == decode_tables::INVALID_VALUE {
- : bad_byte_index = input_index;
- : break;
- : };
- : accum = (morsel as u64) << 58;
- lib.rs:327 4.02 : 1069f: movzbl (%r9,%rax,1),%r15d
- : // fast loop of 8 bytes at a time
- : while input_index < length_of_full_chunks {
- : let mut accum: u64;
- :
- : let input_chunk = BigEndian::read_u64(&input_bytes[input_index..(input_index + 8)]);
- : morsel = decode_table[(input_chunk >> 56) as usize];
- lib.rs:322 3.68 : 106a4: cmp $0xff,%r15
- : if morsel == decode_tables::INVALID_VALUE {
- 0.00 : 106ab: je 1090e <base64::decode_config_buf::hbf68a45fefa299c1+0x46e>
-```
-
-## Fuzzing
-
-This uses [cargo-fuzz](https://github.com/rust-fuzz/cargo-fuzz). See `fuzz/fuzzers` for the available fuzzing scripts.
-To run, use an invocation like these:
-
-```bash
-cargo +nightly fuzz run roundtrip
-cargo +nightly fuzz run roundtrip_no_pad
-cargo +nightly fuzz run roundtrip_random_config -- -max_len=10240
-cargo +nightly fuzz run decode_random
-```
-
-## License
-
-This project is dual-licensed under MIT and Apache 2.0.
-
diff --git a/vendor/base64/RELEASE-NOTES.md b/vendor/base64/RELEASE-NOTES.md
deleted file mode 100644
index 91b68a67..00000000
--- a/vendor/base64/RELEASE-NOTES.md
+++ /dev/null
@@ -1,271 +0,0 @@
-# 0.22.1
-
-- Correct the symbols used for the predefined `alphabet::BIN_HEX`.
-
-# 0.22.0
-
-- `DecodeSliceError::OutputSliceTooSmall` is now conservative rather than precise. That is, the error will only occur if the decoded output _cannot_ fit, meaning that `Engine::decode_slice` can now be used with exactly-sized output slices. As part of this, `Engine::internal_decode` now returns `DecodeSliceError` instead of `DecodeError`, but that is not expected to affect any external callers.
-- `DecodeError::InvalidLength` now refers specifically to the _number of valid symbols_ being invalid (i.e. `len % 4 == 1`), rather than just the number of input bytes. This avoids confusing scenarios when based on interpretation you could make a case for either `InvalidLength` or `InvalidByte` being appropriate.
-- Decoding is somewhat faster (5-10%)
-
-# 0.21.7
-
-- Support getting an alphabet's contents as a str via `Alphabet::as_str()`
-
-# 0.21.6
-
-- Improved introductory documentation and example
-
-# 0.21.5
-
-- Add `Debug` and `Clone` impls for the general purpose Engine
-
-# 0.21.4
-
-- Make `encoded_len` `const`, allowing the creation of arrays sized to encode compile-time-known data lengths
-
-# 0.21.3
-
-- Implement `source` instead of `cause` on Error types
-- Roll back MSRV to 1.48.0 so Debian can continue to live in a time warp
-- Slightly faster chunked encoding for short inputs
-- Decrease binary size
-
-# 0.21.2
-
-- Rollback MSRV to 1.57.0 -- only dev dependencies need 1.60, not the main code
-
-# 0.21.1
-
-- Remove the possibility of panicking during decoded length calculations
-- `DecoderReader` no longer sometimes erroneously ignores
- padding [#226](https://github.com/marshallpierce/rust-base64/issues/226)
-
-## Breaking changes
-
-- `Engine.internal_decode` return type changed
-- Update MSRV to 1.60.0
-
-# 0.21.0
-
-## Migration
-
-### Functions
-
-| < 0.20 function | 0.21 equivalent |
-|-------------------------|-------------------------------------------------------------------------------------|
-| `encode()` | `engine::general_purpose::STANDARD.encode()` or `prelude::BASE64_STANDARD.encode()` |
-| `encode_config()` | `engine.encode()` |
-| `encode_config_buf()` | `engine.encode_string()` |
-| `encode_config_slice()` | `engine.encode_slice()` |
-| `decode()` | `engine::general_purpose::STANDARD.decode()` or `prelude::BASE64_STANDARD.decode()` |
-| `decode_config()` | `engine.decode()` |
-| `decode_config_buf()` | `engine.decode_vec()` |
-| `decode_config_slice()` | `engine.decode_slice()` |
-
-The short-lived 0.20 functions were the 0.13 functions with `config` replaced with `engine`.
-
-### Padding
-
-If applicable, use the preset engines `engine::STANDARD`, `engine::STANDARD_NO_PAD`, `engine::URL_SAFE`,
-or `engine::URL_SAFE_NO_PAD`.
-The `NO_PAD` ones require that padding is absent when decoding, and the others require that
-canonical padding is present .
-
-If you need the < 0.20 behavior that did not care about padding, or want to recreate < 0.20.0's predefined `Config`s
-precisely, see the following table.
-
-| 0.13.1 Config | 0.20.0+ alphabet | `encode_padding` | `decode_padding_mode` |
-|-----------------|------------------|------------------|-----------------------|
-| STANDARD | STANDARD | true | Indifferent |
-| STANDARD_NO_PAD | STANDARD | false | Indifferent |
-| URL_SAFE | URL_SAFE | true | Indifferent |
-| URL_SAFE_NO_PAD | URL_SAFE | false | Indifferent |
-
-# 0.21.0-rc.1
-
-- Restore the ability to decode into a slice of precisely the correct length with `Engine.decode_slice_unchecked`.
-- Add `Engine` as a `pub use` in `prelude`.
-
-# 0.21.0-beta.2
-
-## Breaking changes
-
-- Re-exports of preconfigured engines in `engine` are removed in favor of `base64::prelude::...` that are better suited
- to those who wish to `use` the entire path to a name.
-
-# 0.21.0-beta.1
-
-## Breaking changes
-
-- `FastPortable` was only meant to be an interim name, and shouldn't have shipped in 0.20. It is now `GeneralPurpose` to
- make its intended usage more clear.
-- `GeneralPurpose` and its config are now `pub use`'d in the `engine` module for convenience.
-- Change a few `from()` functions to be `new()`. `from()` causes confusing compiler errors because of confusion
- with `From::from`, and is a little misleading because some of those invocations are not very cheap as one would
- usually expect from a `from` call.
-- `encode*` and `decode*` top level functions are now methods on `Engine`.
-- `DEFAULT_ENGINE` was replaced by `engine::general_purpose::STANDARD`
-- Predefined engine consts `engine::general_purpose::{STANDARD, STANDARD_NO_PAD, URL_SAFE, URL_SAFE_NO_PAD}`
- - These are `pub use`d into `engine` as well
-- The `*_slice` decode/encode functions now return an error instead of panicking when the output slice is too small
- - As part of this, there isn't now a public way to decode into a slice _exactly_ the size needed for inputs that
- aren't multiples of 4 tokens. If adding up to 2 bytes to always be a multiple of 3 bytes for the decode buffer is
- a problem, file an issue.
-
-## Other changes
-
-- `decoded_len_estimate()` is provided to make it easy to size decode buffers correctly.
-
-# 0.20.0
-
-## Breaking changes
-
-- Update MSRV to 1.57.0
-- Decoding can now either ignore padding, require correct padding, or require no padding. The default is to require
- correct padding.
- - The `NO_PAD` config now requires that padding be absent when decoding.
-
-## 0.20.0-alpha.1
-
-### Breaking changes
-
-- Extended the `Config` concept into the `Engine` abstraction, allowing the user to pick different encoding / decoding
- implementations.
- - What was formerly the only algorithm is now the `FastPortable` engine, so named because it's portable (works on
- any CPU) and relatively fast.
- - This opens the door to a portable constant-time
- implementation ([#153](https://github.com/marshallpierce/rust-base64/pull/153),
- presumably `ConstantTimePortable`?) for security-sensitive applications that need side-channel resistance, and
- CPU-specific SIMD implementations for more speed.
- - Standard base64 per the RFC is available via `DEFAULT_ENGINE`. To use different alphabets or other settings (
- padding, etc), create your own engine instance.
-- `CharacterSet` is now `Alphabet` (per the RFC), and allows creating custom alphabets. The corresponding tables that
- were previously code-generated are now built dynamically.
-- Since there are already multiple breaking changes, various functions are renamed to be more consistent and
- discoverable.
-- MSRV is now 1.47.0 to allow various things to use `const fn`.
-- `DecoderReader` now owns its inner reader, and can expose it via `into_inner()`. For symmetry, `EncoderWriter` can do
- the same with its writer.
-- `encoded_len` is now public so you can size encode buffers precisely.
-
-# 0.13.1
-
-- More precise decode buffer sizing, avoiding unnecessary allocation in `decode_config`.
-
-# 0.13.0
-
-- Config methods are const
-- Added `EncoderStringWriter` to allow encoding directly to a String
-- `EncoderWriter` now owns its delegate writer rather than keeping a reference to it (though refs still work)
- - As a consequence, it is now possible to extract the delegate writer from an `EncoderWriter` via `finish()`, which
- returns `Result<W>` instead of `Result<()>`. If you were calling `finish()` explicitly, you will now need to
- use `let _ = foo.finish()` instead of just `foo.finish()` to avoid a warning about the unused value.
-- When decoding input that has both an invalid length and an invalid symbol as the last byte, `InvalidByte` will be
- emitted instead of `InvalidLength` to make the problem more obvious.
-
-# 0.12.2
-
-- Add `BinHex` alphabet
-
-# 0.12.1
-
-- Add `Bcrypt` alphabet
-
-# 0.12.0
-
-- A `Read` implementation (`DecoderReader`) to let users transparently decoded data from a b64 input source
-- IMAP's modified b64 alphabet
-- Relaxed type restrictions to just `AsRef<[ut8]>` for main `encode*`/`decode*` functions
-- A minor performance improvement in encoding
-
-# 0.11.0
-
-- Minimum rust version 1.34.0
-- `no_std` is now supported via the two new features `alloc` and `std`.
-
-# 0.10.1
-
-- Minimum rust version 1.27.2
-- Fix bug in streaming encoding ([#90](https://github.com/marshallpierce/rust-base64/pull/90)): if the underlying writer
- didn't write all the bytes given to it, the remaining bytes would not be retried later. See the docs
- on `EncoderWriter::write`.
-- Make it configurable whether or not to return an error when decoding detects excess trailing bits.
-
-# 0.10.0
-
-- Remove line wrapping. Line wrapping was never a great conceptual fit in this library, and other features (streaming
- encoding, etc) either couldn't support it or could support only special cases of it with a great increase in
- complexity. Line wrapping has been pulled out into a [line-wrap](https://crates.io/crates/line-wrap) crate, so it's
- still available if you need it.
- - `Base64Display` creation no longer uses a `Result` because it can't fail, which means its helper methods for
- common
- configs that `unwrap()` for you are no longer needed
-- Add a streaming encoder `Write` impl to transparently base64 as you write.
-- Remove the remaining `unsafe` code.
-- Remove whitespace stripping to simplify `no_std` support. No out of the box configs use it, and it's trivial to do
- yourself if needed: `filter(|b| !b" \n\t\r\x0b\x0c".contains(b)`.
-- Detect invalid trailing symbols when decoding and return an error rather than silently ignoring them.
-
-# 0.9.3
-
-- Update safemem
-
-# 0.9.2
-
-- Derive `Clone` for `DecodeError`.
-
-# 0.9.1
-
-- Add support for `crypt(3)`'s base64 variant.
-
-# 0.9.0
-
-- `decode_config_slice` function for no-allocation decoding, analogous to `encode_config_slice`
-- Decode performance optimization
-
-# 0.8.0
-
-- `encode_config_slice` function for no-allocation encoding
-
-# 0.7.0
-
-- `STANDARD_NO_PAD` config
-- `Base64Display` heap-free wrapper for use in format strings, etc
-
-# 0.6.0
-
-- Decode performance improvements
-- Use `unsafe` in fewer places
-- Added fuzzers
-
-# 0.5.2
-
-- Avoid usize overflow when calculating length
-- Better line wrapping performance
-
-# 0.5.1
-
-- Temporarily disable line wrapping
-- Add Apache 2.0 license
-
-# 0.5.0
-
-- MIME support, including configurable line endings and line wrapping
-- Removed `decode_ws`
-- Renamed `Base64Error` to `DecodeError`
-
-# 0.4.1
-
-- Allow decoding a `AsRef<[u8]>` instead of just a `&str`
-
-# 0.4.0
-
-- Configurable padding
-- Encode performance improvements
-
-# 0.3.0
-
-- Added encode/decode functions that do not allocate their own storage
-- Decode performance improvements
-- Extraneous padding bytes are no longer ignored. Now, an error will be returned.
diff --git a/vendor/base64/benches/benchmarks.rs b/vendor/base64/benches/benchmarks.rs
deleted file mode 100644
index 8f041854..00000000
--- a/vendor/base64/benches/benchmarks.rs
+++ /dev/null
@@ -1,238 +0,0 @@
-#[macro_use]
-extern crate criterion;
-
-use base64::{
- display,
- engine::{general_purpose::STANDARD, Engine},
- write,
-};
-use criterion::{black_box, Bencher, BenchmarkId, Criterion, Throughput};
-use rand::{Rng, SeedableRng};
-use std::io::{self, Read, Write};
-
-fn do_decode_bench(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
- fill(&mut v);
- let encoded = STANDARD.encode(&v);
-
- b.iter(|| {
- let orig = STANDARD.decode(&encoded);
- black_box(&orig);
- });
-}
-
-fn do_decode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
- fill(&mut v);
- let encoded = STANDARD.encode(&v);
-
- let mut buf = Vec::new();
- b.iter(|| {
- STANDARD.decode_vec(&encoded, &mut buf).unwrap();
- black_box(&buf);
- buf.clear();
- });
-}
-
-fn do_decode_bench_slice(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
- fill(&mut v);
- let encoded = STANDARD.encode(&v);
-
- let mut buf = vec![0; size];
- b.iter(|| {
- STANDARD.decode_slice(&encoded, &mut buf).unwrap();
- black_box(&buf);
- });
-}
-
-fn do_decode_bench_stream(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size * 3 / 4);
- fill(&mut v);
- let encoded = STANDARD.encode(&v);
-
- let mut buf = vec![0; size];
- buf.truncate(0);
-
- b.iter(|| {
- let mut cursor = io::Cursor::new(&encoded[..]);
- let mut decoder = base64::read::DecoderReader::new(&mut cursor, &STANDARD);
- decoder.read_to_end(&mut buf).unwrap();
- buf.clear();
- black_box(&buf);
- });
-}
-
-fn do_encode_bench(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size);
- fill(&mut v);
- b.iter(|| {
- let e = STANDARD.encode(&v);
- black_box(&e);
- });
-}
-
-fn do_encode_bench_display(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size);
- fill(&mut v);
- b.iter(|| {
- let e = format!("{}", display::Base64Display::new(&v, &STANDARD));
- black_box(&e);
- });
-}
-
-fn do_encode_bench_reuse_buf(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size);
- fill(&mut v);
- let mut buf = String::new();
- b.iter(|| {
- STANDARD.encode_string(&v, &mut buf);
- buf.clear();
- });
-}
-
-fn do_encode_bench_slice(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size);
- fill(&mut v);
- // conservative estimate of encoded size
- let mut buf = vec![0; v.len() * 2];
- b.iter(|| STANDARD.encode_slice(&v, &mut buf).unwrap());
-}
-
-fn do_encode_bench_stream(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size);
- fill(&mut v);
- let mut buf = Vec::with_capacity(size * 2);
-
- b.iter(|| {
- buf.clear();
- let mut stream_enc = write::EncoderWriter::new(&mut buf, &STANDARD);
- stream_enc.write_all(&v).unwrap();
- stream_enc.flush().unwrap();
- });
-}
-
-fn do_encode_bench_string_stream(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size);
- fill(&mut v);
-
- b.iter(|| {
- let mut stream_enc = write::EncoderStringWriter::new(&STANDARD);
- stream_enc.write_all(&v).unwrap();
- stream_enc.flush().unwrap();
- let _ = stream_enc.into_inner();
- });
-}
-
-fn do_encode_bench_string_reuse_buf_stream(b: &mut Bencher, &size: &usize) {
- let mut v: Vec<u8> = Vec::with_capacity(size);
- fill(&mut v);
-
- let mut buf = String::new();
- b.iter(|| {
- buf.clear();
- let mut stream_enc = write::EncoderStringWriter::from_consumer(&mut buf, &STANDARD);
- stream_enc.write_all(&v).unwrap();
- stream_enc.flush().unwrap();
- let _ = stream_enc.into_inner();
- });
-}
-
-fn fill(v: &mut Vec<u8>) {
- let cap = v.capacity();
- // weak randomness is plenty; we just want to not be completely friendly to the branch predictor
- let mut r = rand::rngs::SmallRng::from_entropy();
- while v.len() < cap {
- v.push(r.gen::<u8>());
- }
-}
-
-const BYTE_SIZES: [usize; 5] = [3, 50, 100, 500, 3 * 1024];
-
-// Benchmarks over these byte sizes take longer so we will run fewer samples to
-// keep the benchmark runtime reasonable.
-const LARGE_BYTE_SIZES: [usize; 3] = [3 * 1024 * 1024, 10 * 1024 * 1024, 30 * 1024 * 1024];
-
-fn encode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) {
- let mut group = c.benchmark_group(label);
- group
- .warm_up_time(std::time::Duration::from_millis(500))
- .measurement_time(std::time::Duration::from_secs(3));
-
- for size in byte_sizes {
- group
- .throughput(Throughput::Bytes(*size as u64))
- .bench_with_input(BenchmarkId::new("encode", size), size, do_encode_bench)
- .bench_with_input(
- BenchmarkId::new("encode_display", size),
- size,
- do_encode_bench_display,
- )
- .bench_with_input(
- BenchmarkId::new("encode_reuse_buf", size),
- size,
- do_encode_bench_reuse_buf,
- )
- .bench_with_input(
- BenchmarkId::new("encode_slice", size),
- size,
- do_encode_bench_slice,
- )
- .bench_with_input(
- BenchmarkId::new("encode_reuse_buf_stream", size),
- size,
- do_encode_bench_stream,
- )
- .bench_with_input(
- BenchmarkId::new("encode_string_stream", size),
- size,
- do_encode_bench_string_stream,
- )
- .bench_with_input(
- BenchmarkId::new("encode_string_reuse_buf_stream", size),
- size,
- do_encode_bench_string_reuse_buf_stream,
- );
- }
-
- group.finish();
-}
-
-fn decode_benchmarks(c: &mut Criterion, label: &str, byte_sizes: &[usize]) {
- let mut group = c.benchmark_group(label);
-
- for size in byte_sizes {
- group
- .warm_up_time(std::time::Duration::from_millis(500))
- .measurement_time(std::time::Duration::from_secs(3))
- .throughput(Throughput::Bytes(*size as u64))
- .bench_with_input(BenchmarkId::new("decode", size), size, do_decode_bench)
- .bench_with_input(
- BenchmarkId::new("decode_reuse_buf", size),
- size,
- do_decode_bench_reuse_buf,
- )
- .bench_with_input(
- BenchmarkId::new("decode_slice", size),
- size,
- do_decode_bench_slice,
- )
- .bench_with_input(
- BenchmarkId::new("decode_stream", size),
- size,
- do_decode_bench_stream,
- );
- }
-
- group.finish();
-}
-
-fn bench(c: &mut Criterion) {
- encode_benchmarks(c, "encode_small_input", &BYTE_SIZES[..]);
- encode_benchmarks(c, "encode_large_input", &LARGE_BYTE_SIZES[..]);
- decode_benchmarks(c, "decode_small_input", &BYTE_SIZES[..]);
- decode_benchmarks(c, "decode_large_input", &LARGE_BYTE_SIZES[..]);
-}
-
-criterion_group!(benches, bench);
-criterion_main!(benches);
diff --git a/vendor/base64/clippy.toml b/vendor/base64/clippy.toml
deleted file mode 100644
index 11d46a73..00000000
--- a/vendor/base64/clippy.toml
+++ /dev/null
@@ -1 +0,0 @@
-msrv = "1.48.0"
diff --git a/vendor/base64/examples/base64.rs b/vendor/base64/examples/base64.rs
deleted file mode 100644
index 0c8aa3fe..00000000
--- a/vendor/base64/examples/base64.rs
+++ /dev/null
@@ -1,81 +0,0 @@
-use std::fs::File;
-use std::io::{self, Read};
-use std::path::PathBuf;
-use std::process;
-
-use base64::{alphabet, engine, read, write};
-use clap::Parser;
-
-#[derive(Clone, Debug, Parser, strum::EnumString, Default)]
-#[strum(serialize_all = "kebab-case")]
-enum Alphabet {
- #[default]
- Standard,
- UrlSafe,
-}
-
-/// Base64 encode or decode FILE (or standard input), to standard output.
-#[derive(Debug, Parser)]
-struct Opt {
- /// Decode the base64-encoded input (default: encode the input as base64).
- #[structopt(short = 'd', long = "decode")]
- decode: bool,
-
- /// The encoding alphabet: "standard" (default) or "url-safe".
- #[structopt(long = "alphabet")]
- alphabet: Option<Alphabet>,
-
- /// Omit padding characters while encoding, and reject them while decoding.
- #[structopt(short = 'p', long = "no-padding")]
- no_padding: bool,
-
- /// The file to encode or decode.
- #[structopt(name = "FILE", parse(from_os_str))]
- file: Option<PathBuf>,
-}
-
-fn main() {
- let opt = Opt::parse();
- let stdin;
- let mut input: Box<dyn Read> = match opt.file {
- None => {
- stdin = io::stdin();
- Box::new(stdin.lock())
- }
- Some(ref f) if f.as_os_str() == "-" => {
- stdin = io::stdin();
- Box::new(stdin.lock())
- }
- Some(f) => Box::new(File::open(f).unwrap()),
- };
-
- let alphabet = opt.alphabet.unwrap_or_default();
- let engine = engine::GeneralPurpose::new(
- &match alphabet {
- Alphabet::Standard => alphabet::STANDARD,
- Alphabet::UrlSafe => alphabet::URL_SAFE,
- },
- match opt.no_padding {
- true => engine::general_purpose::NO_PAD,
- false => engine::general_purpose::PAD,
- },
- );
-
- let stdout = io::stdout();
- let mut stdout = stdout.lock();
- let r = if opt.decode {
- let mut decoder = read::DecoderReader::new(&mut input, &engine);
- io::copy(&mut decoder, &mut stdout)
- } else {
- let mut encoder = write::EncoderWriter::new(&mut stdout, &engine);
- io::copy(&mut input, &mut encoder)
- };
- if let Err(e) = r {
- eprintln!(
- "Base64 {} failed with {}",
- if opt.decode { "decode" } else { "encode" },
- e
- );
- process::exit(1);
- }
-}
diff --git a/vendor/base64/icon_CLion.svg b/vendor/base64/icon_CLion.svg
deleted file mode 100644
index e9edb044..00000000
--- a/vendor/base64/icon_CLion.svg
+++ /dev/null
@@ -1,34 +0,0 @@
-<svg id="Layer_1" data-name="Layer 1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 128 128">
- <defs>
- <linearGradient id="linear-gradient" x1="40.69" y1="-676.56" x2="83.48" y2="-676.56" gradientTransform="matrix(1, 0, 0, -1, 0, -648.86)" gradientUnits="userSpaceOnUse">
- <stop offset="0" stop-color="#ed358c"/>
- <stop offset="0.16" stop-color="#e9388c"/>
- <stop offset="0.3" stop-color="#de418c"/>
- <stop offset="0.43" stop-color="#cc508c"/>
- <stop offset="0.57" stop-color="#b2658d"/>
- <stop offset="0.7" stop-color="#90808d"/>
- <stop offset="0.83" stop-color="#67a18e"/>
- <stop offset="0.95" stop-color="#37c78f"/>
- <stop offset="1" stop-color="#22d88f"/>
- </linearGradient>
- <linearGradient id="linear-gradient-2" x1="32.58" y1="-665.27" x2="13.76" y2="-791.59" gradientTransform="matrix(1, 0, 0, -1, 0, -648.86)" gradientUnits="userSpaceOnUse">
- <stop offset="0.09" stop-color="#22d88f"/>
- <stop offset="0.9" stop-color="#029de0"/>
- </linearGradient>
- <linearGradient id="linear-gradient-3" x1="116.68" y1="-660.66" x2="-12.09" y2="-796.66" xlink:href="#linear-gradient-2"/>
- <linearGradient id="linear-gradient-4" x1="73.35" y1="-739.1" x2="122.29" y2="-746.06" xlink:href="#linear-gradient-2"/>
- </defs>
- <title>icon_CLion</title>
- <g>
- <polygon points="49.2 51.8 40.6 55.4 48.4 0 77.8 16.2 49.2 51.8" fill="url(#linear-gradient)"/>
- <polygon points="44.6 76.8 48.8 0 11.8 23.2 0 94 44.6 76.8" fill="url(#linear-gradient-2)"/>
- <polygon points="125.4 38.4 109 4.8 77.8 16.2 55 41.4 0 94 41.6 124.4 93.6 77.2 125.4 38.4" fill="url(#linear-gradient-3)"/>
- <polygon points="53.8 54.6 46.6 98.4 75.8 121 107.8 128 128 82.4 53.8 54.6" fill="url(#linear-gradient-4)"/>
- </g>
- <g>
- <rect x="24" y="24" width="80" height="80"/>
- <rect x="31.6" y="89" width="30" height="5" fill="#fff"/>
- <path d="M31,51.2h0A16.83,16.83,0,0,1,48.2,34c6.2,0,10,2,13,5.2l-4.6,5.4c-2.6-2.4-5.2-3.8-8.4-3.8-5.6,0-9.6,4.6-9.6,10.4h0c0,5.6,4,10.4,9.6,10.4,3.8,0,6.2-1.6,8.8-3.8l4.6,4.6c-3.4,3.6-7.2,6-13.6,6A17,17,0,0,1,31,51.2" fill="#fff"/>
- <path d="M66.6,34.4H74v27H88.4v6.2H66.6V34.4Z" fill="#fff"/>
- </g>
-</svg>
diff --git a/vendor/base64/src/alphabet.rs b/vendor/base64/src/alphabet.rs
deleted file mode 100644
index b07bfdfe..00000000
--- a/vendor/base64/src/alphabet.rs
+++ /dev/null
@@ -1,285 +0,0 @@
-//! Provides [Alphabet] and constants for alphabets commonly used in the wild.
-
-use crate::PAD_BYTE;
-use core::{convert, fmt};
-#[cfg(any(feature = "std", test))]
-use std::error;
-
-const ALPHABET_SIZE: usize = 64;
-
-/// An alphabet defines the 64 ASCII characters (symbols) used for base64.
-///
-/// Common alphabets are provided as constants, and custom alphabets
-/// can be made via `from_str` or the `TryFrom<str>` implementation.
-///
-/// # Examples
-///
-/// Building and using a custom Alphabet:
-///
-/// ```
-/// let custom = base64::alphabet::Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap();
-///
-/// let engine = base64::engine::GeneralPurpose::new(
-/// &custom,
-/// base64::engine::general_purpose::PAD);
-/// ```
-///
-/// Building a const:
-///
-/// ```
-/// use base64::alphabet::Alphabet;
-///
-/// static CUSTOM: Alphabet = {
-/// // Result::unwrap() isn't const yet, but panic!() is OK
-/// match Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/") {
-/// Ok(x) => x,
-/// Err(_) => panic!("creation of alphabet failed"),
-/// }
-/// };
-/// ```
-///
-/// Building lazily:
-///
-/// ```
-/// use base64::{
-/// alphabet::Alphabet,
-/// engine::{general_purpose::GeneralPurpose, GeneralPurposeConfig},
-/// };
-/// use once_cell::sync::Lazy;
-///
-/// static CUSTOM: Lazy<Alphabet> = Lazy::new(||
-/// Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/").unwrap()
-/// );
-/// ```
-#[derive(Clone, Debug, Eq, PartialEq)]
-pub struct Alphabet {
- pub(crate) symbols: [u8; ALPHABET_SIZE],
-}
-
-impl Alphabet {
- /// Performs no checks so that it can be const.
- /// Used only for known-valid strings.
- const fn from_str_unchecked(alphabet: &str) -> Self {
- let mut symbols = [0_u8; ALPHABET_SIZE];
- let source_bytes = alphabet.as_bytes();
-
- // a way to copy that's allowed in const fn
- let mut index = 0;
- while index < ALPHABET_SIZE {
- symbols[index] = source_bytes[index];
- index += 1;
- }
-
- Self { symbols }
- }
-
- /// Create an `Alphabet` from a string of 64 unique printable ASCII bytes.
- ///
- /// The `=` byte is not allowed as it is used for padding.
- pub const fn new(alphabet: &str) -> Result<Self, ParseAlphabetError> {
- let bytes = alphabet.as_bytes();
- if bytes.len() != ALPHABET_SIZE {
- return Err(ParseAlphabetError::InvalidLength);
- }
-
- {
- let mut index = 0;
- while index < ALPHABET_SIZE {
- let byte = bytes[index];
-
- // must be ascii printable. 127 (DEL) is commonly considered printable
- // for some reason but clearly unsuitable for base64.
- if !(byte >= 32_u8 && byte <= 126_u8) {
- return Err(ParseAlphabetError::UnprintableByte(byte));
- }
- // = is assumed to be padding, so cannot be used as a symbol
- if byte == PAD_BYTE {
- return Err(ParseAlphabetError::ReservedByte(byte));
- }
-
- // Check for duplicates while staying within what const allows.
- // It's n^2, but only over 64 hot bytes, and only once, so it's likely in the single digit
- // microsecond range.
-
- let mut probe_index = 0;
- while probe_index < ALPHABET_SIZE {
- if probe_index == index {
- probe_index += 1;
- continue;
- }
-
- let probe_byte = bytes[probe_index];
-
- if byte == probe_byte {
- return Err(ParseAlphabetError::DuplicatedByte(byte));
- }
-
- probe_index += 1;
- }
-
- index += 1;
- }
- }
-
- Ok(Self::from_str_unchecked(alphabet))
- }
-
- /// Create a `&str` from the symbols in the `Alphabet`
- pub fn as_str(&self) -> &str {
- core::str::from_utf8(&self.symbols).unwrap()
- }
-}
-
-impl convert::TryFrom<&str> for Alphabet {
- type Error = ParseAlphabetError;
-
- fn try_from(value: &str) -> Result<Self, Self::Error> {
- Self::new(value)
- }
-}
-
-/// Possible errors when constructing an [Alphabet] from a `str`.
-#[derive(Debug, Eq, PartialEq)]
-pub enum ParseAlphabetError {
- /// Alphabets must be 64 ASCII bytes
- InvalidLength,
- /// All bytes must be unique
- DuplicatedByte(u8),
- /// All bytes must be printable (in the range `[32, 126]`).
- UnprintableByte(u8),
- /// `=` cannot be used
- ReservedByte(u8),
-}
-
-impl fmt::Display for ParseAlphabetError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- Self::InvalidLength => write!(f, "Invalid length - must be 64 bytes"),
- Self::DuplicatedByte(b) => write!(f, "Duplicated byte: {:#04x}", b),
- Self::UnprintableByte(b) => write!(f, "Unprintable byte: {:#04x}", b),
- Self::ReservedByte(b) => write!(f, "Reserved byte: {:#04x}", b),
- }
- }
-}
-
-#[cfg(any(feature = "std", test))]
-impl error::Error for ParseAlphabetError {}
-
-/// The standard alphabet (with `+` and `/`) specified in [RFC 4648][].
-///
-/// [RFC 4648]: https://datatracker.ietf.org/doc/html/rfc4648#section-4
-pub const STANDARD: Alphabet = Alphabet::from_str_unchecked(
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/",
-);
-
-/// The URL-safe alphabet (with `-` and `_`) specified in [RFC 4648][].
-///
-/// [RFC 4648]: https://datatracker.ietf.org/doc/html/rfc4648#section-5
-pub const URL_SAFE: Alphabet = Alphabet::from_str_unchecked(
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_",
-);
-
-/// The `crypt(3)` alphabet (with `.` and `/` as the _first_ two characters).
-///
-/// Not standardized, but folk wisdom on the net asserts that this alphabet is what crypt uses.
-pub const CRYPT: Alphabet = Alphabet::from_str_unchecked(
- "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz",
-);
-
-/// The bcrypt alphabet.
-pub const BCRYPT: Alphabet = Alphabet::from_str_unchecked(
- "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
-);
-
-/// The alphabet used in IMAP-modified UTF-7 (with `+` and `,`).
-///
-/// See [RFC 3501](https://tools.ietf.org/html/rfc3501#section-5.1.3)
-pub const IMAP_MUTF7: Alphabet = Alphabet::from_str_unchecked(
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,",
-);
-
-/// The alphabet used in BinHex 4.0 files.
-///
-/// See [BinHex 4.0 Definition](http://files.stairways.com/other/binhex-40-specs-info.txt)
-pub const BIN_HEX: Alphabet = Alphabet::from_str_unchecked(
- "!\"#$%&'()*+,-012345689@ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr",
-);
-
-#[cfg(test)]
-mod tests {
- use crate::alphabet::*;
- use core::convert::TryFrom as _;
-
- #[test]
- fn detects_duplicate_start() {
- assert_eq!(
- ParseAlphabetError::DuplicatedByte(b'A'),
- Alphabet::new("AACDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
- .unwrap_err()
- );
- }
-
- #[test]
- fn detects_duplicate_end() {
- assert_eq!(
- ParseAlphabetError::DuplicatedByte(b'/'),
- Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789//")
- .unwrap_err()
- );
- }
-
- #[test]
- fn detects_duplicate_middle() {
- assert_eq!(
- ParseAlphabetError::DuplicatedByte(b'Z'),
- Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/")
- .unwrap_err()
- );
- }
-
- #[test]
- fn detects_length() {
- assert_eq!(
- ParseAlphabetError::InvalidLength,
- Alphabet::new(
- "xxxxxxxxxABCDEFGHIJKLMNOPQRSTUVWXYZZbcdefghijklmnopqrstuvwxyz0123456789+/",
- )
- .unwrap_err()
- );
- }
-
- #[test]
- fn detects_padding() {
- assert_eq!(
- ParseAlphabetError::ReservedByte(b'='),
- Alphabet::new("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+=")
- .unwrap_err()
- );
- }
-
- #[test]
- fn detects_unprintable() {
- // form feed
- assert_eq!(
- ParseAlphabetError::UnprintableByte(0xc),
- Alphabet::new("\x0cBCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
- .unwrap_err()
- );
- }
-
- #[test]
- fn same_as_unchecked() {
- assert_eq!(
- STANDARD,
- Alphabet::try_from("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
- .unwrap()
- );
- }
-
- #[test]
- fn str_same_as_input() {
- let alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
- let a = Alphabet::try_from(alphabet).unwrap();
- assert_eq!(alphabet, a.as_str())
- }
-}
diff --git a/vendor/base64/src/chunked_encoder.rs b/vendor/base64/src/chunked_encoder.rs
deleted file mode 100644
index 817b339f..00000000
--- a/vendor/base64/src/chunked_encoder.rs
+++ /dev/null
@@ -1,172 +0,0 @@
-use crate::{
- encode::add_padding,
- engine::{Config, Engine},
-};
-#[cfg(any(feature = "alloc", test))]
-use alloc::string::String;
-#[cfg(any(feature = "alloc", test))]
-use core::str;
-
-/// The output mechanism for ChunkedEncoder's encoded bytes.
-pub trait Sink {
- type Error;
-
- /// Handle a chunk of encoded base64 data (as UTF-8 bytes)
- fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error>;
-}
-
-/// A base64 encoder that emits encoded bytes in chunks without heap allocation.
-pub struct ChunkedEncoder<'e, E: Engine + ?Sized> {
- engine: &'e E,
-}
-
-impl<'e, E: Engine + ?Sized> ChunkedEncoder<'e, E> {
- pub fn new(engine: &'e E) -> ChunkedEncoder<'e, E> {
- ChunkedEncoder { engine }
- }
-
- pub fn encode<S: Sink>(&self, bytes: &[u8], sink: &mut S) -> Result<(), S::Error> {
- const BUF_SIZE: usize = 1024;
- const CHUNK_SIZE: usize = BUF_SIZE / 4 * 3;
-
- let mut buf = [0; BUF_SIZE];
- for chunk in bytes.chunks(CHUNK_SIZE) {
- let mut len = self.engine.internal_encode(chunk, &mut buf);
- if chunk.len() != CHUNK_SIZE && self.engine.config().encode_padding() {
- // Final, potentially partial, chunk.
- // Only need to consider if padding is needed on a partial chunk since full chunk
- // is a multiple of 3, which therefore won't be padded.
- // Pad output to multiple of four bytes if required by config.
- len += add_padding(len, &mut buf[len..]);
- }
- sink.write_encoded_bytes(&buf[..len])?;
- }
-
- Ok(())
- }
-}
-
-// A really simple sink that just appends to a string
-#[cfg(any(feature = "alloc", test))]
-pub(crate) struct StringSink<'a> {
- string: &'a mut String,
-}
-
-#[cfg(any(feature = "alloc", test))]
-impl<'a> StringSink<'a> {
- pub(crate) fn new(s: &mut String) -> StringSink {
- StringSink { string: s }
- }
-}
-
-#[cfg(any(feature = "alloc", test))]
-impl<'a> Sink for StringSink<'a> {
- type Error = ();
-
- fn write_encoded_bytes(&mut self, s: &[u8]) -> Result<(), Self::Error> {
- self.string.push_str(str::from_utf8(s).unwrap());
-
- Ok(())
- }
-}
-
-#[cfg(test)]
-pub mod tests {
- use rand::{
- distributions::{Distribution, Uniform},
- Rng, SeedableRng,
- };
-
- use crate::{
- alphabet::STANDARD,
- engine::general_purpose::{GeneralPurpose, GeneralPurposeConfig, PAD},
- tests::random_engine,
- };
-
- use super::*;
-
- #[test]
- fn chunked_encode_empty() {
- assert_eq!("", chunked_encode_str(&[], PAD));
- }
-
- #[test]
- fn chunked_encode_intermediate_fast_loop() {
- // > 8 bytes input, will enter the pretty fast loop
- assert_eq!("Zm9vYmFyYmF6cXV4", chunked_encode_str(b"foobarbazqux", PAD));
- }
-
- #[test]
- fn chunked_encode_fast_loop() {
- // > 32 bytes input, will enter the uber fast loop
- assert_eq!(
- "Zm9vYmFyYmF6cXV4cXV1eGNvcmdlZ3JhdWx0Z2FycGx5eg==",
- chunked_encode_str(b"foobarbazquxquuxcorgegraultgarplyz", PAD)
- );
- }
-
- #[test]
- fn chunked_encode_slow_loop_only() {
- // < 8 bytes input, slow loop only
- assert_eq!("Zm9vYmFy", chunked_encode_str(b"foobar", PAD));
- }
-
- #[test]
- fn chunked_encode_matches_normal_encode_random_string_sink() {
- let helper = StringSinkTestHelper;
- chunked_encode_matches_normal_encode_random(&helper);
- }
-
- pub fn chunked_encode_matches_normal_encode_random<S: SinkTestHelper>(sink_test_helper: &S) {
- let mut input_buf: Vec<u8> = Vec::new();
- let mut output_buf = String::new();
- let mut rng = rand::rngs::SmallRng::from_entropy();
- let input_len_range = Uniform::new(1, 10_000);
-
- for _ in 0..20_000 {
- input_buf.clear();
- output_buf.clear();
-
- let buf_len = input_len_range.sample(&mut rng);
- for _ in 0..buf_len {
- input_buf.push(rng.gen());
- }
-
- let engine = random_engine(&mut rng);
-
- let chunk_encoded_string = sink_test_helper.encode_to_string(&engine, &input_buf);
- engine.encode_string(&input_buf, &mut output_buf);
-
- assert_eq!(output_buf, chunk_encoded_string, "input len={}", buf_len);
- }
- }
-
- fn chunked_encode_str(bytes: &[u8], config: GeneralPurposeConfig) -> String {
- let mut s = String::new();
-
- let mut sink = StringSink::new(&mut s);
- let engine = GeneralPurpose::new(&STANDARD, config);
- let encoder = ChunkedEncoder::new(&engine);
- encoder.encode(bytes, &mut sink).unwrap();
-
- s
- }
-
- // An abstraction around sinks so that we can have tests that easily to any sink implementation
- pub trait SinkTestHelper {
- fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String;
- }
-
- struct StringSinkTestHelper;
-
- impl SinkTestHelper for StringSinkTestHelper {
- fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String {
- let encoder = ChunkedEncoder::new(engine);
- let mut s = String::new();
- let mut sink = StringSink::new(&mut s);
- encoder.encode(bytes, &mut sink).unwrap();
-
- s
- }
- }
-}
diff --git a/vendor/base64/src/decode.rs b/vendor/base64/src/decode.rs
deleted file mode 100644
index 6df8abad..00000000
--- a/vendor/base64/src/decode.rs
+++ /dev/null
@@ -1,386 +0,0 @@
-use crate::engine::{general_purpose::STANDARD, DecodeEstimate, Engine};
-#[cfg(any(feature = "alloc", test))]
-use alloc::vec::Vec;
-use core::fmt;
-#[cfg(any(feature = "std", test))]
-use std::error;
-
-/// Errors that can occur while decoding.
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum DecodeError {
- /// An invalid byte was found in the input. The offset and offending byte are provided.
- ///
- /// Padding characters (`=`) interspersed in the encoded form are invalid, as they may only
- /// be present as the last 0-2 bytes of input.
- ///
- /// This error may also indicate that extraneous trailing input bytes are present, causing
- /// otherwise valid padding to no longer be the last bytes of input.
- InvalidByte(usize, u8),
- /// The length of the input, as measured in valid base64 symbols, is invalid.
- /// There must be 2-4 symbols in the last input quad.
- InvalidLength(usize),
- /// The last non-padding input symbol's encoded 6 bits have nonzero bits that will be discarded.
- /// This is indicative of corrupted or truncated Base64.
- /// Unlike [DecodeError::InvalidByte], which reports symbols that aren't in the alphabet,
- /// this error is for symbols that are in the alphabet but represent nonsensical encodings.
- InvalidLastSymbol(usize, u8),
- /// The nature of the padding was not as configured: absent or incorrect when it must be
- /// canonical, or present when it must be absent, etc.
- InvalidPadding,
-}
-
-impl fmt::Display for DecodeError {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- match *self {
- Self::InvalidByte(index, byte) => {
- write!(f, "Invalid symbol {}, offset {}.", byte, index)
- }
- Self::InvalidLength(len) => write!(f, "Invalid input length: {}", len),
- Self::InvalidLastSymbol(index, byte) => {
- write!(f, "Invalid last symbol {}, offset {}.", byte, index)
- }
- Self::InvalidPadding => write!(f, "Invalid padding"),
- }
- }
-}
-
-#[cfg(any(feature = "std", test))]
-impl error::Error for DecodeError {}
-
-/// Errors that can occur while decoding into a slice.
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum DecodeSliceError {
- /// A [DecodeError] occurred
- DecodeError(DecodeError),
- /// The provided slice is too small.
- OutputSliceTooSmall,
-}
-
-impl fmt::Display for DecodeSliceError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- Self::DecodeError(e) => write!(f, "DecodeError: {}", e),
- Self::OutputSliceTooSmall => write!(f, "Output slice too small"),
- }
- }
-}
-
-#[cfg(any(feature = "std", test))]
-impl error::Error for DecodeSliceError {
- fn source(&self) -> Option<&(dyn error::Error + 'static)> {
- match self {
- DecodeSliceError::DecodeError(e) => Some(e),
- DecodeSliceError::OutputSliceTooSmall => None,
- }
- }
-}
-
-impl From<DecodeError> for DecodeSliceError {
- fn from(e: DecodeError) -> Self {
- DecodeSliceError::DecodeError(e)
- }
-}
-
-/// Decode base64 using the [`STANDARD` engine](STANDARD).
-///
-/// See [Engine::decode].
-#[deprecated(since = "0.21.0", note = "Use Engine::decode")]
-#[cfg(any(feature = "alloc", test))]
-pub fn decode<T: AsRef<[u8]>>(input: T) -> Result<Vec<u8>, DecodeError> {
- STANDARD.decode(input)
-}
-
-/// Decode from string reference as octets using the specified [Engine].
-///
-/// See [Engine::decode].
-///Returns a `Result` containing a `Vec<u8>`.
-#[deprecated(since = "0.21.0", note = "Use Engine::decode")]
-#[cfg(any(feature = "alloc", test))]
-pub fn decode_engine<E: Engine, T: AsRef<[u8]>>(
- input: T,
- engine: &E,
-) -> Result<Vec<u8>, DecodeError> {
- engine.decode(input)
-}
-
-/// Decode from string reference as octets.
-///
-/// See [Engine::decode_vec].
-#[cfg(any(feature = "alloc", test))]
-#[deprecated(since = "0.21.0", note = "Use Engine::decode_vec")]
-pub fn decode_engine_vec<E: Engine, T: AsRef<[u8]>>(
- input: T,
- buffer: &mut Vec<u8>,
- engine: &E,
-) -> Result<(), DecodeError> {
- engine.decode_vec(input, buffer)
-}
-
-/// Decode the input into the provided output slice.
-///
-/// See [Engine::decode_slice].
-#[deprecated(since = "0.21.0", note = "Use Engine::decode_slice")]
-pub fn decode_engine_slice<E: Engine, T: AsRef<[u8]>>(
- input: T,
- output: &mut [u8],
- engine: &E,
-) -> Result<usize, DecodeSliceError> {
- engine.decode_slice(input, output)
-}
-
-/// Returns a conservative estimate of the decoded size of `encoded_len` base64 symbols (rounded up
-/// to the next group of 3 decoded bytes).
-///
-/// The resulting length will be a safe choice for the size of a decode buffer, but may have up to
-/// 2 trailing bytes that won't end up being needed.
-///
-/// # Examples
-///
-/// ```
-/// use base64::decoded_len_estimate;
-///
-/// assert_eq!(3, decoded_len_estimate(1));
-/// assert_eq!(3, decoded_len_estimate(2));
-/// assert_eq!(3, decoded_len_estimate(3));
-/// assert_eq!(3, decoded_len_estimate(4));
-/// // start of the next quad of encoded symbols
-/// assert_eq!(6, decoded_len_estimate(5));
-/// ```
-pub fn decoded_len_estimate(encoded_len: usize) -> usize {
- STANDARD
- .internal_decoded_len_estimate(encoded_len)
- .decoded_len_estimate()
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use crate::{
- alphabet,
- engine::{general_purpose, Config, GeneralPurpose},
- tests::{assert_encode_sanity, random_engine},
- };
- use rand::{
- distributions::{Distribution, Uniform},
- Rng, SeedableRng,
- };
-
- #[test]
- fn decode_into_nonempty_vec_doesnt_clobber_existing_prefix() {
- let mut orig_data = Vec::new();
- let mut encoded_data = String::new();
- let mut decoded_with_prefix = Vec::new();
- let mut decoded_without_prefix = Vec::new();
- let mut prefix = Vec::new();
-
- let prefix_len_range = Uniform::new(0, 1000);
- let input_len_range = Uniform::new(0, 1000);
-
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- for _ in 0..10_000 {
- orig_data.clear();
- encoded_data.clear();
- decoded_with_prefix.clear();
- decoded_without_prefix.clear();
- prefix.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- for _ in 0..input_len {
- orig_data.push(rng.gen());
- }
-
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut encoded_data);
- assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len);
-
- let prefix_len = prefix_len_range.sample(&mut rng);
-
- // fill the buf with a prefix
- for _ in 0..prefix_len {
- prefix.push(rng.gen());
- }
-
- decoded_with_prefix.resize(prefix_len, 0);
- decoded_with_prefix.copy_from_slice(&prefix);
-
- // decode into the non-empty buf
- engine
- .decode_vec(&encoded_data, &mut decoded_with_prefix)
- .unwrap();
- // also decode into the empty buf
- engine
- .decode_vec(&encoded_data, &mut decoded_without_prefix)
- .unwrap();
-
- assert_eq!(
- prefix_len + decoded_without_prefix.len(),
- decoded_with_prefix.len()
- );
- assert_eq!(orig_data, decoded_without_prefix);
-
- // append plain decode onto prefix
- prefix.append(&mut decoded_without_prefix);
-
- assert_eq!(prefix, decoded_with_prefix);
- }
- }
-
- #[test]
- fn decode_slice_doesnt_clobber_existing_prefix_or_suffix() {
- do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| {
- e.decode_slice(input, output).unwrap()
- })
- }
-
- #[test]
- fn decode_slice_unchecked_doesnt_clobber_existing_prefix_or_suffix() {
- do_decode_slice_doesnt_clobber_existing_prefix_or_suffix(|e, input, output| {
- e.decode_slice_unchecked(input, output).unwrap()
- })
- }
-
- #[test]
- fn decode_engine_estimation_works_for_various_lengths() {
- let engine = GeneralPurpose::new(&alphabet::STANDARD, general_purpose::NO_PAD);
- for num_prefix_quads in 0..100 {
- for suffix in &["AA", "AAA", "AAAA"] {
- let mut prefix = "AAAA".repeat(num_prefix_quads);
- prefix.push_str(suffix);
- // make sure no overflow (and thus a panic) occurs
- let res = engine.decode(prefix);
- assert!(res.is_ok());
- }
- }
- }
-
- #[test]
- fn decode_slice_output_length_errors() {
- for num_quads in 1..100 {
- let input = "AAAA".repeat(num_quads);
- let mut vec = vec![0; (num_quads - 1) * 3];
- assert_eq!(
- DecodeSliceError::OutputSliceTooSmall,
- STANDARD.decode_slice(&input, &mut vec).unwrap_err()
- );
- vec.push(0);
- assert_eq!(
- DecodeSliceError::OutputSliceTooSmall,
- STANDARD.decode_slice(&input, &mut vec).unwrap_err()
- );
- vec.push(0);
- assert_eq!(
- DecodeSliceError::OutputSliceTooSmall,
- STANDARD.decode_slice(&input, &mut vec).unwrap_err()
- );
- vec.push(0);
- // now it works
- assert_eq!(
- num_quads * 3,
- STANDARD.decode_slice(&input, &mut vec).unwrap()
- );
- }
- }
-
- fn do_decode_slice_doesnt_clobber_existing_prefix_or_suffix<
- F: Fn(&GeneralPurpose, &[u8], &mut [u8]) -> usize,
- >(
- call_decode: F,
- ) {
- let mut orig_data = Vec::new();
- let mut encoded_data = String::new();
- let mut decode_buf = Vec::new();
- let mut decode_buf_copy: Vec<u8> = Vec::new();
-
- let input_len_range = Uniform::new(0, 1000);
-
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- for _ in 0..10_000 {
- orig_data.clear();
- encoded_data.clear();
- decode_buf.clear();
- decode_buf_copy.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- for _ in 0..input_len {
- orig_data.push(rng.gen());
- }
-
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut encoded_data);
- assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len);
-
- // fill the buffer with random garbage, long enough to have some room before and after
- for _ in 0..5000 {
- decode_buf.push(rng.gen());
- }
-
- // keep a copy for later comparison
- decode_buf_copy.extend(decode_buf.iter());
-
- let offset = 1000;
-
- // decode into the non-empty buf
- let decode_bytes_written =
- call_decode(&engine, encoded_data.as_bytes(), &mut decode_buf[offset..]);
-
- assert_eq!(orig_data.len(), decode_bytes_written);
- assert_eq!(
- orig_data,
- &decode_buf[offset..(offset + decode_bytes_written)]
- );
- assert_eq!(&decode_buf_copy[0..offset], &decode_buf[0..offset]);
- assert_eq!(
- &decode_buf_copy[offset + decode_bytes_written..],
- &decode_buf[offset + decode_bytes_written..]
- );
- }
- }
-}
-
-#[allow(deprecated)]
-#[cfg(test)]
-mod coverage_gaming {
- use super::*;
- use std::error::Error;
-
- #[test]
- fn decode_error() {
- let _ = format!("{:?}", DecodeError::InvalidPadding.clone());
- let _ = format!(
- "{} {} {} {}",
- DecodeError::InvalidByte(0, 0),
- DecodeError::InvalidLength(0),
- DecodeError::InvalidLastSymbol(0, 0),
- DecodeError::InvalidPadding,
- );
- }
-
- #[test]
- fn decode_slice_error() {
- let _ = format!("{:?}", DecodeSliceError::OutputSliceTooSmall.clone());
- let _ = format!(
- "{} {}",
- DecodeSliceError::OutputSliceTooSmall,
- DecodeSliceError::DecodeError(DecodeError::InvalidPadding)
- );
- let _ = DecodeSliceError::OutputSliceTooSmall.source();
- let _ = DecodeSliceError::DecodeError(DecodeError::InvalidPadding).source();
- }
-
- #[test]
- fn deprecated_fns() {
- let _ = decode("");
- let _ = decode_engine("", &crate::prelude::BASE64_STANDARD);
- let _ = decode_engine_vec("", &mut Vec::new(), &crate::prelude::BASE64_STANDARD);
- let _ = decode_engine_slice("", &mut [], &crate::prelude::BASE64_STANDARD);
- }
-
- #[test]
- fn decoded_len_est() {
- assert_eq!(3, decoded_len_estimate(4));
- }
-}
diff --git a/vendor/base64/src/display.rs b/vendor/base64/src/display.rs
deleted file mode 100644
index fc292f1b..00000000
--- a/vendor/base64/src/display.rs
+++ /dev/null
@@ -1,88 +0,0 @@
-//! Enables base64'd output anywhere you might use a `Display` implementation, like a format string.
-//!
-//! ```
-//! use base64::{display::Base64Display, engine::general_purpose::STANDARD};
-//!
-//! let data = vec![0x0, 0x1, 0x2, 0x3];
-//! let wrapper = Base64Display::new(&data, &STANDARD);
-//!
-//! assert_eq!("base64: AAECAw==", format!("base64: {}", wrapper));
-//! ```
-
-use super::chunked_encoder::ChunkedEncoder;
-use crate::engine::Engine;
-use core::fmt::{Display, Formatter};
-use core::{fmt, str};
-
-/// A convenience wrapper for base64'ing bytes into a format string without heap allocation.
-pub struct Base64Display<'a, 'e, E: Engine> {
- bytes: &'a [u8],
- chunked_encoder: ChunkedEncoder<'e, E>,
-}
-
-impl<'a, 'e, E: Engine> Base64Display<'a, 'e, E> {
- /// Create a `Base64Display` with the provided engine.
- pub fn new(bytes: &'a [u8], engine: &'e E) -> Base64Display<'a, 'e, E> {
- Base64Display {
- bytes,
- chunked_encoder: ChunkedEncoder::new(engine),
- }
- }
-}
-
-impl<'a, 'e, E: Engine> Display for Base64Display<'a, 'e, E> {
- fn fmt(&self, formatter: &mut Formatter) -> Result<(), fmt::Error> {
- let mut sink = FormatterSink { f: formatter };
- self.chunked_encoder.encode(self.bytes, &mut sink)
- }
-}
-
-struct FormatterSink<'a, 'b: 'a> {
- f: &'a mut Formatter<'b>,
-}
-
-impl<'a, 'b: 'a> super::chunked_encoder::Sink for FormatterSink<'a, 'b> {
- type Error = fmt::Error;
-
- fn write_encoded_bytes(&mut self, encoded: &[u8]) -> Result<(), Self::Error> {
- // Avoid unsafe. If max performance is needed, write your own display wrapper that uses
- // unsafe here to gain about 10-15%.
- self.f
- .write_str(str::from_utf8(encoded).expect("base64 data was not utf8"))
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::super::chunked_encoder::tests::{
- chunked_encode_matches_normal_encode_random, SinkTestHelper,
- };
- use super::*;
- use crate::engine::general_purpose::STANDARD;
-
- #[test]
- fn basic_display() {
- assert_eq!(
- "~$Zm9vYmFy#*",
- format!("~${}#*", Base64Display::new(b"foobar", &STANDARD))
- );
- assert_eq!(
- "~$Zm9vYmFyZg==#*",
- format!("~${}#*", Base64Display::new(b"foobarf", &STANDARD))
- );
- }
-
- #[test]
- fn display_encode_matches_normal_encode() {
- let helper = DisplaySinkTestHelper;
- chunked_encode_matches_normal_encode_random(&helper);
- }
-
- struct DisplaySinkTestHelper;
-
- impl SinkTestHelper for DisplaySinkTestHelper {
- fn encode_to_string<E: Engine>(&self, engine: &E, bytes: &[u8]) -> String {
- format!("{}", Base64Display::new(bytes, engine))
- }
- }
-}
diff --git a/vendor/base64/src/encode.rs b/vendor/base64/src/encode.rs
deleted file mode 100644
index ae6d7907..00000000
--- a/vendor/base64/src/encode.rs
+++ /dev/null
@@ -1,492 +0,0 @@
-#[cfg(any(feature = "alloc", test))]
-use alloc::string::String;
-use core::fmt;
-#[cfg(any(feature = "std", test))]
-use std::error;
-
-#[cfg(any(feature = "alloc", test))]
-use crate::engine::general_purpose::STANDARD;
-use crate::engine::{Config, Engine};
-use crate::PAD_BYTE;
-
-/// Encode arbitrary octets as base64 using the [`STANDARD` engine](STANDARD).
-///
-/// See [Engine::encode].
-#[allow(unused)]
-#[deprecated(since = "0.21.0", note = "Use Engine::encode")]
-#[cfg(any(feature = "alloc", test))]
-pub fn encode<T: AsRef<[u8]>>(input: T) -> String {
- STANDARD.encode(input)
-}
-
-///Encode arbitrary octets as base64 using the provided `Engine` into a new `String`.
-///
-/// See [Engine::encode].
-#[allow(unused)]
-#[deprecated(since = "0.21.0", note = "Use Engine::encode")]
-#[cfg(any(feature = "alloc", test))]
-pub fn encode_engine<E: Engine, T: AsRef<[u8]>>(input: T, engine: &E) -> String {
- engine.encode(input)
-}
-
-///Encode arbitrary octets as base64 into a supplied `String`.
-///
-/// See [Engine::encode_string].
-#[allow(unused)]
-#[deprecated(since = "0.21.0", note = "Use Engine::encode_string")]
-#[cfg(any(feature = "alloc", test))]
-pub fn encode_engine_string<E: Engine, T: AsRef<[u8]>>(
- input: T,
- output_buf: &mut String,
- engine: &E,
-) {
- engine.encode_string(input, output_buf)
-}
-
-/// Encode arbitrary octets as base64 into a supplied slice.
-///
-/// See [Engine::encode_slice].
-#[allow(unused)]
-#[deprecated(since = "0.21.0", note = "Use Engine::encode_slice")]
-pub fn encode_engine_slice<E: Engine, T: AsRef<[u8]>>(
- input: T,
- output_buf: &mut [u8],
- engine: &E,
-) -> Result<usize, EncodeSliceError> {
- engine.encode_slice(input, output_buf)
-}
-
-/// B64-encode and pad (if configured).
-///
-/// This helper exists to avoid recalculating encoded_size, which is relatively expensive on short
-/// inputs.
-///
-/// `encoded_size` is the encoded size calculated for `input`.
-///
-/// `output` must be of size `encoded_size`.
-///
-/// All bytes in `output` will be written to since it is exactly the size of the output.
-pub(crate) fn encode_with_padding<E: Engine + ?Sized>(
- input: &[u8],
- output: &mut [u8],
- engine: &E,
- expected_encoded_size: usize,
-) {
- debug_assert_eq!(expected_encoded_size, output.len());
-
- let b64_bytes_written = engine.internal_encode(input, output);
-
- let padding_bytes = if engine.config().encode_padding() {
- add_padding(b64_bytes_written, &mut output[b64_bytes_written..])
- } else {
- 0
- };
-
- let encoded_bytes = b64_bytes_written
- .checked_add(padding_bytes)
- .expect("usize overflow when calculating b64 length");
-
- debug_assert_eq!(expected_encoded_size, encoded_bytes);
-}
-
-/// Calculate the base64 encoded length for a given input length, optionally including any
-/// appropriate padding bytes.
-///
-/// Returns `None` if the encoded length can't be represented in `usize`. This will happen for
-/// input lengths in approximately the top quarter of the range of `usize`.
-pub const fn encoded_len(bytes_len: usize, padding: bool) -> Option<usize> {
- let rem = bytes_len % 3;
-
- let complete_input_chunks = bytes_len / 3;
- // `?` is disallowed in const, and `let Some(_) = _ else` requires 1.65.0, whereas this
- // messier syntax works on 1.48
- let complete_chunk_output =
- if let Some(complete_chunk_output) = complete_input_chunks.checked_mul(4) {
- complete_chunk_output
- } else {
- return None;
- };
-
- if rem > 0 {
- if padding {
- complete_chunk_output.checked_add(4)
- } else {
- let encoded_rem = match rem {
- 1 => 2,
- // only other possible remainder is 2
- // can't use a separate _ => unreachable!() in const fns in ancient rust versions
- _ => 3,
- };
- complete_chunk_output.checked_add(encoded_rem)
- }
- } else {
- Some(complete_chunk_output)
- }
-}
-
-/// Write padding characters.
-/// `unpadded_output_len` is the size of the unpadded but base64 encoded data.
-/// `output` is the slice where padding should be written, of length at least 2.
-///
-/// Returns the number of padding bytes written.
-pub(crate) fn add_padding(unpadded_output_len: usize, output: &mut [u8]) -> usize {
- let pad_bytes = (4 - (unpadded_output_len % 4)) % 4;
- // for just a couple bytes, this has better performance than using
- // .fill(), or iterating over mutable refs, which call memset()
- #[allow(clippy::needless_range_loop)]
- for i in 0..pad_bytes {
- output[i] = PAD_BYTE;
- }
-
- pad_bytes
-}
-
-/// Errors that can occur while encoding into a slice.
-#[derive(Clone, Debug, PartialEq, Eq)]
-pub enum EncodeSliceError {
- /// The provided slice is too small.
- OutputSliceTooSmall,
-}
-
-impl fmt::Display for EncodeSliceError {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- match self {
- Self::OutputSliceTooSmall => write!(f, "Output slice too small"),
- }
- }
-}
-
-#[cfg(any(feature = "std", test))]
-impl error::Error for EncodeSliceError {}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- use crate::{
- alphabet,
- engine::general_purpose::{GeneralPurpose, NO_PAD, STANDARD},
- tests::{assert_encode_sanity, random_config, random_engine},
- };
- use rand::{
- distributions::{Distribution, Uniform},
- Rng, SeedableRng,
- };
- use std::str;
-
- const URL_SAFE_NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD);
-
- #[test]
- fn encoded_size_correct_standard() {
- assert_encoded_length(0, 0, &STANDARD, true);
-
- assert_encoded_length(1, 4, &STANDARD, true);
- assert_encoded_length(2, 4, &STANDARD, true);
- assert_encoded_length(3, 4, &STANDARD, true);
-
- assert_encoded_length(4, 8, &STANDARD, true);
- assert_encoded_length(5, 8, &STANDARD, true);
- assert_encoded_length(6, 8, &STANDARD, true);
-
- assert_encoded_length(7, 12, &STANDARD, true);
- assert_encoded_length(8, 12, &STANDARD, true);
- assert_encoded_length(9, 12, &STANDARD, true);
-
- assert_encoded_length(54, 72, &STANDARD, true);
-
- assert_encoded_length(55, 76, &STANDARD, true);
- assert_encoded_length(56, 76, &STANDARD, true);
- assert_encoded_length(57, 76, &STANDARD, true);
-
- assert_encoded_length(58, 80, &STANDARD, true);
- }
-
- #[test]
- fn encoded_size_correct_no_pad() {
- assert_encoded_length(0, 0, &URL_SAFE_NO_PAD_ENGINE, false);
-
- assert_encoded_length(1, 2, &URL_SAFE_NO_PAD_ENGINE, false);
- assert_encoded_length(2, 3, &URL_SAFE_NO_PAD_ENGINE, false);
- assert_encoded_length(3, 4, &URL_SAFE_NO_PAD_ENGINE, false);
-
- assert_encoded_length(4, 6, &URL_SAFE_NO_PAD_ENGINE, false);
- assert_encoded_length(5, 7, &URL_SAFE_NO_PAD_ENGINE, false);
- assert_encoded_length(6, 8, &URL_SAFE_NO_PAD_ENGINE, false);
-
- assert_encoded_length(7, 10, &URL_SAFE_NO_PAD_ENGINE, false);
- assert_encoded_length(8, 11, &URL_SAFE_NO_PAD_ENGINE, false);
- assert_encoded_length(9, 12, &URL_SAFE_NO_PAD_ENGINE, false);
-
- assert_encoded_length(54, 72, &URL_SAFE_NO_PAD_ENGINE, false);
-
- assert_encoded_length(55, 74, &URL_SAFE_NO_PAD_ENGINE, false);
- assert_encoded_length(56, 75, &URL_SAFE_NO_PAD_ENGINE, false);
- assert_encoded_length(57, 76, &URL_SAFE_NO_PAD_ENGINE, false);
-
- assert_encoded_length(58, 78, &URL_SAFE_NO_PAD_ENGINE, false);
- }
-
- #[test]
- fn encoded_size_overflow() {
- assert_eq!(None, encoded_len(usize::MAX, true));
- }
-
- #[test]
- fn encode_engine_string_into_nonempty_buffer_doesnt_clobber_prefix() {
- let mut orig_data = Vec::new();
- let mut prefix = String::new();
- let mut encoded_data_no_prefix = String::new();
- let mut encoded_data_with_prefix = String::new();
- let mut decoded = Vec::new();
-
- let prefix_len_range = Uniform::new(0, 1000);
- let input_len_range = Uniform::new(0, 1000);
-
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- for _ in 0..10_000 {
- orig_data.clear();
- prefix.clear();
- encoded_data_no_prefix.clear();
- encoded_data_with_prefix.clear();
- decoded.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- for _ in 0..input_len {
- orig_data.push(rng.gen());
- }
-
- let prefix_len = prefix_len_range.sample(&mut rng);
- for _ in 0..prefix_len {
- // getting convenient random single-byte printable chars that aren't base64 is
- // annoying
- prefix.push('#');
- }
- encoded_data_with_prefix.push_str(&prefix);
-
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut encoded_data_no_prefix);
- engine.encode_string(&orig_data, &mut encoded_data_with_prefix);
-
- assert_eq!(
- encoded_data_no_prefix.len() + prefix_len,
- encoded_data_with_prefix.len()
- );
- assert_encode_sanity(
- &encoded_data_no_prefix,
- engine.config().encode_padding(),
- input_len,
- );
- assert_encode_sanity(
- &encoded_data_with_prefix[prefix_len..],
- engine.config().encode_padding(),
- input_len,
- );
-
- // append plain encode onto prefix
- prefix.push_str(&encoded_data_no_prefix);
-
- assert_eq!(prefix, encoded_data_with_prefix);
-
- engine
- .decode_vec(&encoded_data_no_prefix, &mut decoded)
- .unwrap();
- assert_eq!(orig_data, decoded);
- }
- }
-
- #[test]
- fn encode_engine_slice_into_nonempty_buffer_doesnt_clobber_suffix() {
- let mut orig_data = Vec::new();
- let mut encoded_data = Vec::new();
- let mut encoded_data_original_state = Vec::new();
- let mut decoded = Vec::new();
-
- let input_len_range = Uniform::new(0, 1000);
-
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- for _ in 0..10_000 {
- orig_data.clear();
- encoded_data.clear();
- encoded_data_original_state.clear();
- decoded.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- for _ in 0..input_len {
- orig_data.push(rng.gen());
- }
-
- // plenty of existing garbage in the encoded buffer
- for _ in 0..10 * input_len {
- encoded_data.push(rng.gen());
- }
-
- encoded_data_original_state.extend_from_slice(&encoded_data);
-
- let engine = random_engine(&mut rng);
-
- let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap();
-
- assert_eq!(
- encoded_size,
- engine.encode_slice(&orig_data, &mut encoded_data).unwrap()
- );
-
- assert_encode_sanity(
- str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
- engine.config().encode_padding(),
- input_len,
- );
-
- assert_eq!(
- &encoded_data[encoded_size..],
- &encoded_data_original_state[encoded_size..]
- );
-
- engine
- .decode_vec(&encoded_data[0..encoded_size], &mut decoded)
- .unwrap();
- assert_eq!(orig_data, decoded);
- }
- }
-
- #[test]
- fn encode_to_slice_random_valid_utf8() {
- let mut input = Vec::new();
- let mut output = Vec::new();
-
- let input_len_range = Uniform::new(0, 1000);
-
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- for _ in 0..10_000 {
- input.clear();
- output.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- for _ in 0..input_len {
- input.push(rng.gen());
- }
-
- let config = random_config(&mut rng);
- let engine = random_engine(&mut rng);
-
- // fill up the output buffer with garbage
- let encoded_size = encoded_len(input_len, config.encode_padding()).unwrap();
- for _ in 0..encoded_size {
- output.push(rng.gen());
- }
-
- let orig_output_buf = output.clone();
-
- let bytes_written = engine.internal_encode(&input, &mut output);
-
- // make sure the part beyond bytes_written is the same garbage it was before
- assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
-
- // make sure the encoded bytes are UTF-8
- let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
- }
- }
-
- #[test]
- fn encode_with_padding_random_valid_utf8() {
- let mut input = Vec::new();
- let mut output = Vec::new();
-
- let input_len_range = Uniform::new(0, 1000);
-
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- for _ in 0..10_000 {
- input.clear();
- output.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- for _ in 0..input_len {
- input.push(rng.gen());
- }
-
- let engine = random_engine(&mut rng);
-
- // fill up the output buffer with garbage
- let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap();
- for _ in 0..encoded_size + 1000 {
- output.push(rng.gen());
- }
-
- let orig_output_buf = output.clone();
-
- encode_with_padding(&input, &mut output[0..encoded_size], &engine, encoded_size);
-
- // make sure the part beyond b64 is the same garbage it was before
- assert_eq!(orig_output_buf[encoded_size..], output[encoded_size..]);
-
- // make sure the encoded bytes are UTF-8
- let _ = str::from_utf8(&output[0..encoded_size]).unwrap();
- }
- }
-
- #[test]
- fn add_padding_random_valid_utf8() {
- let mut output = Vec::new();
-
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- // cover our bases for length % 4
- for unpadded_output_len in 0..20 {
- output.clear();
-
- // fill output with random
- for _ in 0..100 {
- output.push(rng.gen());
- }
-
- let orig_output_buf = output.clone();
-
- let bytes_written = add_padding(unpadded_output_len, &mut output);
-
- // make sure the part beyond bytes_written is the same garbage it was before
- assert_eq!(orig_output_buf[bytes_written..], output[bytes_written..]);
-
- // make sure the encoded bytes are UTF-8
- let _ = str::from_utf8(&output[0..bytes_written]).unwrap();
- }
- }
-
- fn assert_encoded_length<E: Engine>(
- input_len: usize,
- enc_len: usize,
- engine: &E,
- padded: bool,
- ) {
- assert_eq!(enc_len, encoded_len(input_len, padded).unwrap());
-
- let mut bytes: Vec<u8> = Vec::new();
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- for _ in 0..input_len {
- bytes.push(rng.gen());
- }
-
- let encoded = engine.encode(&bytes);
- assert_encode_sanity(&encoded, padded, input_len);
-
- assert_eq!(enc_len, encoded.len());
- }
-
- #[test]
- fn encode_imap() {
- assert_eq!(
- &GeneralPurpose::new(&alphabet::IMAP_MUTF7, NO_PAD).encode(b"\xFB\xFF"),
- &GeneralPurpose::new(&alphabet::STANDARD, NO_PAD)
- .encode(b"\xFB\xFF")
- .replace('/', ",")
- );
- }
-}
diff --git a/vendor/base64/src/engine/general_purpose/decode.rs b/vendor/base64/src/engine/general_purpose/decode.rs
deleted file mode 100644
index b55d3fc5..00000000
--- a/vendor/base64/src/engine/general_purpose/decode.rs
+++ /dev/null
@@ -1,357 +0,0 @@
-use crate::{
- engine::{general_purpose::INVALID_VALUE, DecodeEstimate, DecodeMetadata, DecodePaddingMode},
- DecodeError, DecodeSliceError, PAD_BYTE,
-};
-
-#[doc(hidden)]
-pub struct GeneralPurposeEstimate {
- /// input len % 4
- rem: usize,
- conservative_decoded_len: usize,
-}
-
-impl GeneralPurposeEstimate {
- pub(crate) fn new(encoded_len: usize) -> Self {
- let rem = encoded_len % 4;
- Self {
- rem,
- conservative_decoded_len: (encoded_len / 4 + (rem > 0) as usize) * 3,
- }
- }
-}
-
-impl DecodeEstimate for GeneralPurposeEstimate {
- fn decoded_len_estimate(&self) -> usize {
- self.conservative_decoded_len
- }
-}
-
-/// Helper to avoid duplicating num_chunks calculation, which is costly on short inputs.
-/// Returns the decode metadata, or an error.
-// We're on the fragile edge of compiler heuristics here. If this is not inlined, slow. If this is
-// inlined(always), a different slow. plain ol' inline makes the benchmarks happiest at the moment,
-// but this is fragile and the best setting changes with only minor code modifications.
-#[inline]
-pub(crate) fn decode_helper(
- input: &[u8],
- estimate: GeneralPurposeEstimate,
- output: &mut [u8],
- decode_table: &[u8; 256],
- decode_allow_trailing_bits: bool,
- padding_mode: DecodePaddingMode,
-) -> Result<DecodeMetadata, DecodeSliceError> {
- let input_complete_nonterminal_quads_len =
- complete_quads_len(input, estimate.rem, output.len(), decode_table)?;
-
- const UNROLLED_INPUT_CHUNK_SIZE: usize = 32;
- const UNROLLED_OUTPUT_CHUNK_SIZE: usize = UNROLLED_INPUT_CHUNK_SIZE / 4 * 3;
-
- let input_complete_quads_after_unrolled_chunks_len =
- input_complete_nonterminal_quads_len % UNROLLED_INPUT_CHUNK_SIZE;
-
- let input_unrolled_loop_len =
- input_complete_nonterminal_quads_len - input_complete_quads_after_unrolled_chunks_len;
-
- // chunks of 32 bytes
- for (chunk_index, chunk) in input[..input_unrolled_loop_len]
- .chunks_exact(UNROLLED_INPUT_CHUNK_SIZE)
- .enumerate()
- {
- let input_index = chunk_index * UNROLLED_INPUT_CHUNK_SIZE;
- let chunk_output = &mut output[chunk_index * UNROLLED_OUTPUT_CHUNK_SIZE
- ..(chunk_index + 1) * UNROLLED_OUTPUT_CHUNK_SIZE];
-
- decode_chunk_8(
- &chunk[0..8],
- input_index,
- decode_table,
- &mut chunk_output[0..6],
- )?;
- decode_chunk_8(
- &chunk[8..16],
- input_index + 8,
- decode_table,
- &mut chunk_output[6..12],
- )?;
- decode_chunk_8(
- &chunk[16..24],
- input_index + 16,
- decode_table,
- &mut chunk_output[12..18],
- )?;
- decode_chunk_8(
- &chunk[24..32],
- input_index + 24,
- decode_table,
- &mut chunk_output[18..24],
- )?;
- }
-
- // remaining quads, except for the last possibly partial one, as it may have padding
- let output_unrolled_loop_len = input_unrolled_loop_len / 4 * 3;
- let output_complete_quad_len = input_complete_nonterminal_quads_len / 4 * 3;
- {
- let output_after_unroll = &mut output[output_unrolled_loop_len..output_complete_quad_len];
-
- for (chunk_index, chunk) in input
- [input_unrolled_loop_len..input_complete_nonterminal_quads_len]
- .chunks_exact(4)
- .enumerate()
- {
- let chunk_output = &mut output_after_unroll[chunk_index * 3..chunk_index * 3 + 3];
-
- decode_chunk_4(
- chunk,
- input_unrolled_loop_len + chunk_index * 4,
- decode_table,
- chunk_output,
- )?;
- }
- }
-
- super::decode_suffix::decode_suffix(
- input,
- input_complete_nonterminal_quads_len,
- output,
- output_complete_quad_len,
- decode_table,
- decode_allow_trailing_bits,
- padding_mode,
- )
-}
-
-/// Returns the length of complete quads, except for the last one, even if it is complete.
-///
-/// Returns an error if the output len is not big enough for decoding those complete quads, or if
-/// the input % 4 == 1, and that last byte is an invalid value other than a pad byte.
-///
-/// - `input` is the base64 input
-/// - `input_len_rem` is input len % 4
-/// - `output_len` is the length of the output slice
-pub(crate) fn complete_quads_len(
- input: &[u8],
- input_len_rem: usize,
- output_len: usize,
- decode_table: &[u8; 256],
-) -> Result<usize, DecodeSliceError> {
- debug_assert!(input.len() % 4 == input_len_rem);
-
- // detect a trailing invalid byte, like a newline, as a user convenience
- if input_len_rem == 1 {
- let last_byte = input[input.len() - 1];
- // exclude pad bytes; might be part of padding that extends from earlier in the input
- if last_byte != PAD_BYTE && decode_table[usize::from(last_byte)] == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(input.len() - 1, last_byte).into());
- }
- };
-
- // skip last quad, even if it's complete, as it may have padding
- let input_complete_nonterminal_quads_len = input
- .len()
- .saturating_sub(input_len_rem)
- // if rem was 0, subtract 4 to avoid padding
- .saturating_sub((input_len_rem == 0) as usize * 4);
- debug_assert!(
- input.is_empty() || (1..=4).contains(&(input.len() - input_complete_nonterminal_quads_len))
- );
-
- // check that everything except the last quad handled by decode_suffix will fit
- if output_len < input_complete_nonterminal_quads_len / 4 * 3 {
- return Err(DecodeSliceError::OutputSliceTooSmall);
- };
- Ok(input_complete_nonterminal_quads_len)
-}
-
-/// Decode 8 bytes of input into 6 bytes of output.
-///
-/// `input` is the 8 bytes to decode.
-/// `index_at_start_of_input` is the offset in the overall input (used for reporting errors
-/// accurately)
-/// `decode_table` is the lookup table for the particular base64 alphabet.
-/// `output` will have its first 6 bytes overwritten
-// yes, really inline (worth 30-50% speedup)
-#[inline(always)]
-fn decode_chunk_8(
- input: &[u8],
- index_at_start_of_input: usize,
- decode_table: &[u8; 256],
- output: &mut [u8],
-) -> Result<(), DecodeError> {
- let morsel = decode_table[usize::from(input[0])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0]));
- }
- let mut accum = u64::from(morsel) << 58;
-
- let morsel = decode_table[usize::from(input[1])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 1,
- input[1],
- ));
- }
- accum |= u64::from(morsel) << 52;
-
- let morsel = decode_table[usize::from(input[2])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 2,
- input[2],
- ));
- }
- accum |= u64::from(morsel) << 46;
-
- let morsel = decode_table[usize::from(input[3])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 3,
- input[3],
- ));
- }
- accum |= u64::from(morsel) << 40;
-
- let morsel = decode_table[usize::from(input[4])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 4,
- input[4],
- ));
- }
- accum |= u64::from(morsel) << 34;
-
- let morsel = decode_table[usize::from(input[5])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 5,
- input[5],
- ));
- }
- accum |= u64::from(morsel) << 28;
-
- let morsel = decode_table[usize::from(input[6])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 6,
- input[6],
- ));
- }
- accum |= u64::from(morsel) << 22;
-
- let morsel = decode_table[usize::from(input[7])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 7,
- input[7],
- ));
- }
- accum |= u64::from(morsel) << 16;
-
- output[..6].copy_from_slice(&accum.to_be_bytes()[..6]);
-
- Ok(())
-}
-
-/// Like [decode_chunk_8] but for 4 bytes of input and 3 bytes of output.
-#[inline(always)]
-fn decode_chunk_4(
- input: &[u8],
- index_at_start_of_input: usize,
- decode_table: &[u8; 256],
- output: &mut [u8],
-) -> Result<(), DecodeError> {
- let morsel = decode_table[usize::from(input[0])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(index_at_start_of_input, input[0]));
- }
- let mut accum = u32::from(morsel) << 26;
-
- let morsel = decode_table[usize::from(input[1])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 1,
- input[1],
- ));
- }
- accum |= u32::from(morsel) << 20;
-
- let morsel = decode_table[usize::from(input[2])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 2,
- input[2],
- ));
- }
- accum |= u32::from(morsel) << 14;
-
- let morsel = decode_table[usize::from(input[3])];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(
- index_at_start_of_input + 3,
- input[3],
- ));
- }
- accum |= u32::from(morsel) << 8;
-
- output[..3].copy_from_slice(&accum.to_be_bytes()[..3]);
-
- Ok(())
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
-
- use crate::engine::general_purpose::STANDARD;
-
- #[test]
- fn decode_chunk_8_writes_only_6_bytes() {
- let input = b"Zm9vYmFy"; // "foobar"
- let mut output = [0_u8, 1, 2, 3, 4, 5, 6, 7];
-
- decode_chunk_8(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap();
- assert_eq!(&vec![b'f', b'o', b'o', b'b', b'a', b'r', 6, 7], &output);
- }
-
- #[test]
- fn decode_chunk_4_writes_only_3_bytes() {
- let input = b"Zm9v"; // "foobar"
- let mut output = [0_u8, 1, 2, 3];
-
- decode_chunk_4(&input[..], 0, &STANDARD.decode_table, &mut output).unwrap();
- assert_eq!(&vec![b'f', b'o', b'o', 3], &output);
- }
-
- #[test]
- fn estimate_short_lengths() {
- for (range, decoded_len_estimate) in [
- (0..=0, 0),
- (1..=4, 3),
- (5..=8, 6),
- (9..=12, 9),
- (13..=16, 12),
- (17..=20, 15),
- ] {
- for encoded_len in range {
- let estimate = GeneralPurposeEstimate::new(encoded_len);
- assert_eq!(decoded_len_estimate, estimate.decoded_len_estimate());
- }
- }
- }
-
- #[test]
- fn estimate_via_u128_inflation() {
- // cover both ends of usize
- (0..1000)
- .chain(usize::MAX - 1000..=usize::MAX)
- .for_each(|encoded_len| {
- // inflate to 128 bit type to be able to safely use the easy formulas
- let len_128 = encoded_len as u128;
-
- let estimate = GeneralPurposeEstimate::new(encoded_len);
- assert_eq!(
- (len_128 + 3) / 4 * 3,
- estimate.conservative_decoded_len as u128
- );
- })
- }
-}
diff --git a/vendor/base64/src/engine/general_purpose/decode_suffix.rs b/vendor/base64/src/engine/general_purpose/decode_suffix.rs
deleted file mode 100644
index 02aaf514..00000000
--- a/vendor/base64/src/engine/general_purpose/decode_suffix.rs
+++ /dev/null
@@ -1,162 +0,0 @@
-use crate::{
- engine::{general_purpose::INVALID_VALUE, DecodeMetadata, DecodePaddingMode},
- DecodeError, DecodeSliceError, PAD_BYTE,
-};
-
-/// Decode the last 0-4 bytes, checking for trailing set bits and padding per the provided
-/// parameters.
-///
-/// Returns the decode metadata representing the total number of bytes decoded, including the ones
-/// indicated as already written by `output_index`.
-pub(crate) fn decode_suffix(
- input: &[u8],
- input_index: usize,
- output: &mut [u8],
- mut output_index: usize,
- decode_table: &[u8; 256],
- decode_allow_trailing_bits: bool,
- padding_mode: DecodePaddingMode,
-) -> Result<DecodeMetadata, DecodeSliceError> {
- debug_assert!((input.len() - input_index) <= 4);
-
- // Decode any leftovers that might not be a complete input chunk of 4 bytes.
- // Use a u32 as a stack-resident 4 byte buffer.
- let mut morsels_in_leftover = 0;
- let mut padding_bytes_count = 0;
- // offset from input_index
- let mut first_padding_offset: usize = 0;
- let mut last_symbol = 0_u8;
- let mut morsels = [0_u8; 4];
-
- for (leftover_index, &b) in input[input_index..].iter().enumerate() {
- // '=' padding
- if b == PAD_BYTE {
- // There can be bad padding bytes in a few ways:
- // 1 - Padding with non-padding characters after it
- // 2 - Padding after zero or one characters in the current quad (should only
- // be after 2 or 3 chars)
- // 3 - More than two characters of padding. If 3 or 4 padding chars
- // are in the same quad, that implies it will be caught by #2.
- // If it spreads from one quad to another, it will be an invalid byte
- // in the first quad.
- // 4 - Non-canonical padding -- 1 byte when it should be 2, etc.
- // Per config, non-canonical but still functional non- or partially-padded base64
- // may be treated as an error condition.
-
- if leftover_index < 2 {
- // Check for error #2.
- // Either the previous byte was padding, in which case we would have already hit
- // this case, or it wasn't, in which case this is the first such error.
- debug_assert!(
- leftover_index == 0 || (leftover_index == 1 && padding_bytes_count == 0)
- );
- let bad_padding_index = input_index + leftover_index;
- return Err(DecodeError::InvalidByte(bad_padding_index, b).into());
- }
-
- if padding_bytes_count == 0 {
- first_padding_offset = leftover_index;
- }
-
- padding_bytes_count += 1;
- continue;
- }
-
- // Check for case #1.
- // To make '=' handling consistent with the main loop, don't allow
- // non-suffix '=' in trailing chunk either. Report error as first
- // erroneous padding.
- if padding_bytes_count > 0 {
- return Err(
- DecodeError::InvalidByte(input_index + first_padding_offset, PAD_BYTE).into(),
- );
- }
-
- last_symbol = b;
-
- // can use up to 8 * 6 = 48 bits of the u64, if last chunk has no padding.
- // Pack the leftovers from left to right.
- let morsel = decode_table[b as usize];
- if morsel == INVALID_VALUE {
- return Err(DecodeError::InvalidByte(input_index + leftover_index, b).into());
- }
-
- morsels[morsels_in_leftover] = morsel;
- morsels_in_leftover += 1;
- }
-
- // If there was 1 trailing byte, and it was valid, and we got to this point without hitting
- // an invalid byte, now we can report invalid length
- if !input.is_empty() && morsels_in_leftover < 2 {
- return Err(DecodeError::InvalidLength(input_index + morsels_in_leftover).into());
- }
-
- match padding_mode {
- DecodePaddingMode::Indifferent => { /* everything we care about was already checked */ }
- DecodePaddingMode::RequireCanonical => {
- // allow empty input
- if (padding_bytes_count + morsels_in_leftover) % 4 != 0 {
- return Err(DecodeError::InvalidPadding.into());
- }
- }
- DecodePaddingMode::RequireNone => {
- if padding_bytes_count > 0 {
- // check at the end to make sure we let the cases of padding that should be InvalidByte
- // get hit
- return Err(DecodeError::InvalidPadding.into());
- }
- }
- }
-
- // When encoding 1 trailing byte (e.g. 0xFF), 2 base64 bytes ("/w") are needed.
- // / is the symbol for 63 (0x3F, bottom 6 bits all set) and w is 48 (0x30, top 2 bits
- // of bottom 6 bits set).
- // When decoding two symbols back to one trailing byte, any final symbol higher than
- // w would still decode to the original byte because we only care about the top two
- // bits in the bottom 6, but would be a non-canonical encoding. So, we calculate a
- // mask based on how many bits are used for just the canonical encoding, and optionally
- // error if any other bits are set. In the example of one encoded byte -> 2 symbols,
- // 2 symbols can technically encode 12 bits, but the last 4 are non-canonical, and
- // useless since there are no more symbols to provide the necessary 4 additional bits
- // to finish the second original byte.
-
- let leftover_bytes_to_append = morsels_in_leftover * 6 / 8;
- // Put the up to 6 complete bytes as the high bytes.
- // Gain a couple percent speedup from nudging these ORs to use more ILP with a two-way split.
- let mut leftover_num = (u32::from(morsels[0]) << 26)
- | (u32::from(morsels[1]) << 20)
- | (u32::from(morsels[2]) << 14)
- | (u32::from(morsels[3]) << 8);
-
- // if there are bits set outside the bits we care about, last symbol encodes trailing bits that
- // will not be included in the output
- let mask = !0_u32 >> (leftover_bytes_to_append * 8);
- if !decode_allow_trailing_bits && (leftover_num & mask) != 0 {
- // last morsel is at `morsels_in_leftover` - 1
- return Err(DecodeError::InvalidLastSymbol(
- input_index + morsels_in_leftover - 1,
- last_symbol,
- )
- .into());
- }
-
- // Strangely, this approach benchmarks better than writing bytes one at a time,
- // or copy_from_slice into output.
- for _ in 0..leftover_bytes_to_append {
- let hi_byte = (leftover_num >> 24) as u8;
- leftover_num <<= 8;
- *output
- .get_mut(output_index)
- .ok_or(DecodeSliceError::OutputSliceTooSmall)? = hi_byte;
- output_index += 1;
- }
-
- Ok(DecodeMetadata::new(
- output_index,
- if padding_bytes_count > 0 {
- Some(input_index + first_padding_offset)
- } else {
- None
- },
- ))
-}
diff --git a/vendor/base64/src/engine/general_purpose/mod.rs b/vendor/base64/src/engine/general_purpose/mod.rs
deleted file mode 100644
index 6fe95809..00000000
--- a/vendor/base64/src/engine/general_purpose/mod.rs
+++ /dev/null
@@ -1,352 +0,0 @@
-//! Provides the [GeneralPurpose] engine and associated config types.
-use crate::{
- alphabet,
- alphabet::Alphabet,
- engine::{Config, DecodeMetadata, DecodePaddingMode},
- DecodeSliceError,
-};
-use core::convert::TryInto;
-
-pub(crate) mod decode;
-pub(crate) mod decode_suffix;
-
-pub use decode::GeneralPurposeEstimate;
-
-pub(crate) const INVALID_VALUE: u8 = 255;
-
-/// A general-purpose base64 engine.
-///
-/// - It uses no vector CPU instructions, so it will work on any system.
-/// - It is reasonably fast (~2-3GiB/s).
-/// - It is not constant-time, though, so it is vulnerable to timing side-channel attacks. For loading cryptographic keys, etc, it is suggested to use the forthcoming constant-time implementation.
-
-#[derive(Debug, Clone)]
-pub struct GeneralPurpose {
- encode_table: [u8; 64],
- decode_table: [u8; 256],
- config: GeneralPurposeConfig,
-}
-
-impl GeneralPurpose {
- /// Create a `GeneralPurpose` engine from an [Alphabet].
- ///
- /// While not very expensive to initialize, ideally these should be cached
- /// if the engine will be used repeatedly.
- pub const fn new(alphabet: &Alphabet, config: GeneralPurposeConfig) -> Self {
- Self {
- encode_table: encode_table(alphabet),
- decode_table: decode_table(alphabet),
- config,
- }
- }
-}
-
-impl super::Engine for GeneralPurpose {
- type Config = GeneralPurposeConfig;
- type DecodeEstimate = GeneralPurposeEstimate;
-
- fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize {
- let mut input_index: usize = 0;
-
- const BLOCKS_PER_FAST_LOOP: usize = 4;
- const LOW_SIX_BITS: u64 = 0x3F;
-
- // we read 8 bytes at a time (u64) but only actually consume 6 of those bytes. Thus, we need
- // 2 trailing bytes to be available to read..
- let last_fast_index = input.len().saturating_sub(BLOCKS_PER_FAST_LOOP * 6 + 2);
- let mut output_index = 0;
-
- if last_fast_index > 0 {
- while input_index <= last_fast_index {
- // Major performance wins from letting the optimizer do the bounds check once, mostly
- // on the output side
- let input_chunk =
- &input[input_index..(input_index + (BLOCKS_PER_FAST_LOOP * 6 + 2))];
- let output_chunk =
- &mut output[output_index..(output_index + BLOCKS_PER_FAST_LOOP * 8)];
-
- // Hand-unrolling for 32 vs 16 or 8 bytes produces yields performance about equivalent
- // to unsafe pointer code on a Xeon E5-1650v3. 64 byte unrolling was slightly better for
- // large inputs but significantly worse for 50-byte input, unsurprisingly. I suspect
- // that it's a not uncommon use case to encode smallish chunks of data (e.g. a 64-byte
- // SHA-512 digest), so it would be nice if that fit in the unrolled loop at least once.
- // Plus, single-digit percentage performance differences might well be quite different
- // on different hardware.
-
- let input_u64 = read_u64(&input_chunk[0..]);
-
- output_chunk[0] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
- output_chunk[1] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
- output_chunk[2] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
- output_chunk[3] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
- output_chunk[4] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
- output_chunk[5] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
- output_chunk[6] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
- output_chunk[7] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
-
- let input_u64 = read_u64(&input_chunk[6..]);
-
- output_chunk[8] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
- output_chunk[9] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
- output_chunk[10] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
- output_chunk[11] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
- output_chunk[12] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
- output_chunk[13] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
- output_chunk[14] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
- output_chunk[15] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
-
- let input_u64 = read_u64(&input_chunk[12..]);
-
- output_chunk[16] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
- output_chunk[17] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
- output_chunk[18] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
- output_chunk[19] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
- output_chunk[20] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
- output_chunk[21] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
- output_chunk[22] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
- output_chunk[23] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
-
- let input_u64 = read_u64(&input_chunk[18..]);
-
- output_chunk[24] = self.encode_table[((input_u64 >> 58) & LOW_SIX_BITS) as usize];
- output_chunk[25] = self.encode_table[((input_u64 >> 52) & LOW_SIX_BITS) as usize];
- output_chunk[26] = self.encode_table[((input_u64 >> 46) & LOW_SIX_BITS) as usize];
- output_chunk[27] = self.encode_table[((input_u64 >> 40) & LOW_SIX_BITS) as usize];
- output_chunk[28] = self.encode_table[((input_u64 >> 34) & LOW_SIX_BITS) as usize];
- output_chunk[29] = self.encode_table[((input_u64 >> 28) & LOW_SIX_BITS) as usize];
- output_chunk[30] = self.encode_table[((input_u64 >> 22) & LOW_SIX_BITS) as usize];
- output_chunk[31] = self.encode_table[((input_u64 >> 16) & LOW_SIX_BITS) as usize];
-
- output_index += BLOCKS_PER_FAST_LOOP * 8;
- input_index += BLOCKS_PER_FAST_LOOP * 6;
- }
- }
-
- // Encode what's left after the fast loop.
-
- const LOW_SIX_BITS_U8: u8 = 0x3F;
-
- let rem = input.len() % 3;
- let start_of_rem = input.len() - rem;
-
- // start at the first index not handled by fast loop, which may be 0.
-
- while input_index < start_of_rem {
- let input_chunk = &input[input_index..(input_index + 3)];
- let output_chunk = &mut output[output_index..(output_index + 4)];
-
- output_chunk[0] = self.encode_table[(input_chunk[0] >> 2) as usize];
- output_chunk[1] = self.encode_table
- [((input_chunk[0] << 4 | input_chunk[1] >> 4) & LOW_SIX_BITS_U8) as usize];
- output_chunk[2] = self.encode_table
- [((input_chunk[1] << 2 | input_chunk[2] >> 6) & LOW_SIX_BITS_U8) as usize];
- output_chunk[3] = self.encode_table[(input_chunk[2] & LOW_SIX_BITS_U8) as usize];
-
- input_index += 3;
- output_index += 4;
- }
-
- if rem == 2 {
- output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize];
- output[output_index + 1] =
- self.encode_table[((input[start_of_rem] << 4 | input[start_of_rem + 1] >> 4)
- & LOW_SIX_BITS_U8) as usize];
- output[output_index + 2] =
- self.encode_table[((input[start_of_rem + 1] << 2) & LOW_SIX_BITS_U8) as usize];
- output_index += 3;
- } else if rem == 1 {
- output[output_index] = self.encode_table[(input[start_of_rem] >> 2) as usize];
- output[output_index + 1] =
- self.encode_table[((input[start_of_rem] << 4) & LOW_SIX_BITS_U8) as usize];
- output_index += 2;
- }
-
- output_index
- }
-
- fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate {
- GeneralPurposeEstimate::new(input_len)
- }
-
- fn internal_decode(
- &self,
- input: &[u8],
- output: &mut [u8],
- estimate: Self::DecodeEstimate,
- ) -> Result<DecodeMetadata, DecodeSliceError> {
- decode::decode_helper(
- input,
- estimate,
- output,
- &self.decode_table,
- self.config.decode_allow_trailing_bits,
- self.config.decode_padding_mode,
- )
- }
-
- fn config(&self) -> &Self::Config {
- &self.config
- }
-}
-
-/// Returns a table mapping a 6-bit index to the ASCII byte encoding of the index
-pub(crate) const fn encode_table(alphabet: &Alphabet) -> [u8; 64] {
- // the encode table is just the alphabet:
- // 6-bit index lookup -> printable byte
- let mut encode_table = [0_u8; 64];
- {
- let mut index = 0;
- while index < 64 {
- encode_table[index] = alphabet.symbols[index];
- index += 1;
- }
- }
-
- encode_table
-}
-
-/// Returns a table mapping base64 bytes as the lookup index to either:
-/// - [INVALID_VALUE] for bytes that aren't members of the alphabet
-/// - a byte whose lower 6 bits are the value that was encoded into the index byte
-pub(crate) const fn decode_table(alphabet: &Alphabet) -> [u8; 256] {
- let mut decode_table = [INVALID_VALUE; 256];
-
- // Since the table is full of `INVALID_VALUE` already, we only need to overwrite
- // the parts that are valid.
- let mut index = 0;
- while index < 64 {
- // The index in the alphabet is the 6-bit value we care about.
- // Since the index is in 0-63, it is safe to cast to u8.
- decode_table[alphabet.symbols[index] as usize] = index as u8;
- index += 1;
- }
-
- decode_table
-}
-
-#[inline]
-fn read_u64(s: &[u8]) -> u64 {
- u64::from_be_bytes(s[..8].try_into().unwrap())
-}
-
-/// Contains configuration parameters for base64 encoding and decoding.
-///
-/// ```
-/// # use base64::engine::GeneralPurposeConfig;
-/// let config = GeneralPurposeConfig::new()
-/// .with_encode_padding(false);
-/// // further customize using `.with_*` methods as needed
-/// ```
-///
-/// The constants [PAD] and [NO_PAD] cover most use cases.
-///
-/// To specify the characters used, see [Alphabet].
-#[derive(Clone, Copy, Debug)]
-pub struct GeneralPurposeConfig {
- encode_padding: bool,
- decode_allow_trailing_bits: bool,
- decode_padding_mode: DecodePaddingMode,
-}
-
-impl GeneralPurposeConfig {
- /// Create a new config with `padding` = `true`, `decode_allow_trailing_bits` = `false`, and
- /// `decode_padding_mode = DecodePaddingMode::RequireCanonicalPadding`.
- ///
- /// This probably matches most people's expectations, but consider disabling padding to save
- /// a few bytes unless you specifically need it for compatibility with some legacy system.
- pub const fn new() -> Self {
- Self {
- // RFC states that padding must be applied by default
- encode_padding: true,
- decode_allow_trailing_bits: false,
- decode_padding_mode: DecodePaddingMode::RequireCanonical,
- }
- }
-
- /// Create a new config based on `self` with an updated `padding` setting.
- ///
- /// If `padding` is `true`, encoding will append either 1 or 2 `=` padding characters as needed
- /// to produce an output whose length is a multiple of 4.
- ///
- /// Padding is not needed for correct decoding and only serves to waste bytes, but it's in the
- /// [spec](https://datatracker.ietf.org/doc/html/rfc4648#section-3.2).
- ///
- /// For new applications, consider not using padding if the decoders you're using don't require
- /// padding to be present.
- pub const fn with_encode_padding(self, padding: bool) -> Self {
- Self {
- encode_padding: padding,
- ..self
- }
- }
-
- /// Create a new config based on `self` with an updated `decode_allow_trailing_bits` setting.
- ///
- /// Most users will not need to configure this. It's useful if you need to decode base64
- /// produced by a buggy encoder that has bits set in the unused space on the last base64
- /// character as per [forgiving-base64 decode](https://infra.spec.whatwg.org/#forgiving-base64-decode).
- /// If invalid trailing bits are present and this is `true`, those bits will
- /// be silently ignored, else `DecodeError::InvalidLastSymbol` will be emitted.
- pub const fn with_decode_allow_trailing_bits(self, allow: bool) -> Self {
- Self {
- decode_allow_trailing_bits: allow,
- ..self
- }
- }
-
- /// Create a new config based on `self` with an updated `decode_padding_mode` setting.
- ///
- /// Padding is not useful in terms of representing encoded data -- it makes no difference to
- /// the decoder if padding is present or not, so if you have some un-padded input to decode, it
- /// is perfectly fine to use `DecodePaddingMode::Indifferent` to prevent errors from being
- /// emitted.
- ///
- /// However, since in practice
- /// [people who learned nothing from BER vs DER seem to expect base64 to have one canonical encoding](https://eprint.iacr.org/2022/361),
- /// the default setting is the stricter `DecodePaddingMode::RequireCanonicalPadding`.
- ///
- /// Or, if "canonical" in your circumstance means _no_ padding rather than padding to the
- /// next multiple of four, there's `DecodePaddingMode::RequireNoPadding`.
- pub const fn with_decode_padding_mode(self, mode: DecodePaddingMode) -> Self {
- Self {
- decode_padding_mode: mode,
- ..self
- }
- }
-}
-
-impl Default for GeneralPurposeConfig {
- /// Delegates to [GeneralPurposeConfig::new].
- fn default() -> Self {
- Self::new()
- }
-}
-
-impl Config for GeneralPurposeConfig {
- fn encode_padding(&self) -> bool {
- self.encode_padding
- }
-}
-
-/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [PAD] config.
-pub const STANDARD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, PAD);
-
-/// A [GeneralPurpose] engine using the [alphabet::STANDARD] base64 alphabet and [NO_PAD] config.
-pub const STANDARD_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
-
-/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [PAD] config.
-pub const URL_SAFE: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, PAD);
-
-/// A [GeneralPurpose] engine using the [alphabet::URL_SAFE] base64 alphabet and [NO_PAD] config.
-pub const URL_SAFE_NO_PAD: GeneralPurpose = GeneralPurpose::new(&alphabet::URL_SAFE, NO_PAD);
-
-/// Include padding bytes when encoding, and require that they be present when decoding.
-///
-/// This is the standard per the base64 RFC, but consider using [NO_PAD] instead as padding serves
-/// little purpose in practice.
-pub const PAD: GeneralPurposeConfig = GeneralPurposeConfig::new();
-
-/// Don't add padding when encoding, and require no padding when decoding.
-pub const NO_PAD: GeneralPurposeConfig = GeneralPurposeConfig::new()
- .with_encode_padding(false)
- .with_decode_padding_mode(DecodePaddingMode::RequireNone);
diff --git a/vendor/base64/src/engine/mod.rs b/vendor/base64/src/engine/mod.rs
deleted file mode 100644
index f2cc33f6..00000000
--- a/vendor/base64/src/engine/mod.rs
+++ /dev/null
@@ -1,478 +0,0 @@
-//! Provides the [Engine] abstraction and out of the box implementations.
-#[cfg(any(feature = "alloc", test))]
-use crate::chunked_encoder;
-use crate::{
- encode::{encode_with_padding, EncodeSliceError},
- encoded_len, DecodeError, DecodeSliceError,
-};
-#[cfg(any(feature = "alloc", test))]
-use alloc::vec::Vec;
-
-#[cfg(any(feature = "alloc", test))]
-use alloc::{string::String, vec};
-
-pub mod general_purpose;
-
-#[cfg(test)]
-mod naive;
-
-#[cfg(test)]
-mod tests;
-
-pub use general_purpose::{GeneralPurpose, GeneralPurposeConfig};
-
-/// An `Engine` provides low-level encoding and decoding operations that all other higher-level parts of the API use. Users of the library will generally not need to implement this.
-///
-/// Different implementations offer different characteristics. The library currently ships with
-/// [GeneralPurpose] that offers good speed and works on any CPU, with more choices
-/// coming later, like a constant-time one when side channel resistance is called for, and vendor-specific vectorized ones for more speed.
-///
-/// See [general_purpose::STANDARD_NO_PAD] if you just want standard base64. Otherwise, when possible, it's
-/// recommended to store the engine in a `const` so that references to it won't pose any lifetime
-/// issues, and to avoid repeating the cost of engine setup.
-///
-/// Since almost nobody will need to implement `Engine`, docs for internal methods are hidden.
-// When adding an implementation of Engine, include them in the engine test suite:
-// - add an implementation of [engine::tests::EngineWrapper]
-// - add the implementation to the `all_engines` macro
-// All tests run on all engines listed in the macro.
-pub trait Engine: Send + Sync {
- /// The config type used by this engine
- type Config: Config;
- /// The decode estimate used by this engine
- type DecodeEstimate: DecodeEstimate;
-
- /// This is not meant to be called directly; it is only for `Engine` implementors.
- /// See the other `encode*` functions on this trait.
- ///
- /// Encode the `input` bytes into the `output` buffer based on the mapping in `encode_table`.
- ///
- /// `output` will be long enough to hold the encoded data.
- ///
- /// Returns the number of bytes written.
- ///
- /// No padding should be written; that is handled separately.
- ///
- /// Must not write any bytes into the output slice other than the encoded data.
- #[doc(hidden)]
- fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize;
-
- /// This is not meant to be called directly; it is only for `Engine` implementors.
- ///
- /// As an optimization to prevent the decoded length from being calculated twice, it is
- /// sometimes helpful to have a conservative estimate of the decoded size before doing the
- /// decoding, so this calculation is done separately and passed to [Engine::decode()] as needed.
- #[doc(hidden)]
- fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate;
-
- /// This is not meant to be called directly; it is only for `Engine` implementors.
- /// See the other `decode*` functions on this trait.
- ///
- /// Decode `input` base64 bytes into the `output` buffer.
- ///
- /// `decode_estimate` is the result of [Engine::internal_decoded_len_estimate()], which is passed in to avoid
- /// calculating it again (expensive on short inputs).`
- ///
- /// Each complete 4-byte chunk of encoded data decodes to 3 bytes of decoded data, but this
- /// function must also handle the final possibly partial chunk.
- /// If the input length is not a multiple of 4, or uses padding bytes to reach a multiple of 4,
- /// the trailing 2 or 3 bytes must decode to 1 or 2 bytes, respectively, as per the
- /// [RFC](https://tools.ietf.org/html/rfc4648#section-3.5).
- ///
- /// Decoding must not write any bytes into the output slice other than the decoded data.
- ///
- /// Non-canonical trailing bits in the final tokens or non-canonical padding must be reported as
- /// errors unless the engine is configured otherwise.
- #[doc(hidden)]
- fn internal_decode(
- &self,
- input: &[u8],
- output: &mut [u8],
- decode_estimate: Self::DecodeEstimate,
- ) -> Result<DecodeMetadata, DecodeSliceError>;
-
- /// Returns the config for this engine.
- fn config(&self) -> &Self::Config;
-
- /// Encode arbitrary octets as base64 using the provided `Engine`.
- /// Returns a `String`.
- ///
- /// # Example
- ///
- /// ```rust
- /// use base64::{Engine as _, engine::{self, general_purpose}, alphabet};
- ///
- /// let b64 = general_purpose::STANDARD.encode(b"hello world~");
- /// println!("{}", b64);
- ///
- /// const CUSTOM_ENGINE: engine::GeneralPurpose =
- /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD);
- ///
- /// let b64_url = CUSTOM_ENGINE.encode(b"hello internet~");
- /// ```
- #[cfg(any(feature = "alloc", test))]
- #[inline]
- fn encode<T: AsRef<[u8]>>(&self, input: T) -> String {
- fn inner<E>(engine: &E, input_bytes: &[u8]) -> String
- where
- E: Engine + ?Sized,
- {
- let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding())
- .expect("integer overflow when calculating buffer size");
-
- let mut buf = vec![0; encoded_size];
-
- encode_with_padding(input_bytes, &mut buf[..], engine, encoded_size);
-
- String::from_utf8(buf).expect("Invalid UTF8")
- }
-
- inner(self, input.as_ref())
- }
-
- /// Encode arbitrary octets as base64 into a supplied `String`.
- /// Writes into the supplied `String`, which may allocate if its internal buffer isn't big enough.
- ///
- /// # Example
- ///
- /// ```rust
- /// use base64::{Engine as _, engine::{self, general_purpose}, alphabet};
- /// const CUSTOM_ENGINE: engine::GeneralPurpose =
- /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD);
- ///
- /// fn main() {
- /// let mut buf = String::new();
- /// general_purpose::STANDARD.encode_string(b"hello world~", &mut buf);
- /// println!("{}", buf);
- ///
- /// buf.clear();
- /// CUSTOM_ENGINE.encode_string(b"hello internet~", &mut buf);
- /// println!("{}", buf);
- /// }
- /// ```
- #[cfg(any(feature = "alloc", test))]
- #[inline]
- fn encode_string<T: AsRef<[u8]>>(&self, input: T, output_buf: &mut String) {
- fn inner<E>(engine: &E, input_bytes: &[u8], output_buf: &mut String)
- where
- E: Engine + ?Sized,
- {
- let mut sink = chunked_encoder::StringSink::new(output_buf);
-
- chunked_encoder::ChunkedEncoder::new(engine)
- .encode(input_bytes, &mut sink)
- .expect("Writing to a String shouldn't fail");
- }
-
- inner(self, input.as_ref(), output_buf)
- }
-
- /// Encode arbitrary octets as base64 into a supplied slice.
- /// Writes into the supplied output buffer.
- ///
- /// This is useful if you wish to avoid allocation entirely (e.g. encoding into a stack-resident
- /// or statically-allocated buffer).
- ///
- /// # Example
- ///
- #[cfg_attr(feature = "alloc", doc = "```")]
- #[cfg_attr(not(feature = "alloc"), doc = "```ignore")]
- /// use base64::{Engine as _, engine::general_purpose};
- /// let s = b"hello internet!";
- /// let mut buf = Vec::new();
- /// // make sure we'll have a slice big enough for base64 + padding
- /// buf.resize(s.len() * 4 / 3 + 4, 0);
- ///
- /// let bytes_written = general_purpose::STANDARD.encode_slice(s, &mut buf).unwrap();
- ///
- /// // shorten our vec down to just what was written
- /// buf.truncate(bytes_written);
- ///
- /// assert_eq!(s, general_purpose::STANDARD.decode(&buf).unwrap().as_slice());
- /// ```
- #[inline]
- fn encode_slice<T: AsRef<[u8]>>(
- &self,
- input: T,
- output_buf: &mut [u8],
- ) -> Result<usize, EncodeSliceError> {
- fn inner<E>(
- engine: &E,
- input_bytes: &[u8],
- output_buf: &mut [u8],
- ) -> Result<usize, EncodeSliceError>
- where
- E: Engine + ?Sized,
- {
- let encoded_size = encoded_len(input_bytes.len(), engine.config().encode_padding())
- .expect("usize overflow when calculating buffer size");
-
- if output_buf.len() < encoded_size {
- return Err(EncodeSliceError::OutputSliceTooSmall);
- }
-
- let b64_output = &mut output_buf[0..encoded_size];
-
- encode_with_padding(input_bytes, b64_output, engine, encoded_size);
-
- Ok(encoded_size)
- }
-
- inner(self, input.as_ref(), output_buf)
- }
-
- /// Decode the input into a new `Vec`.
- ///
- /// # Example
- ///
- /// ```rust
- /// use base64::{Engine as _, alphabet, engine::{self, general_purpose}};
- ///
- /// let bytes = general_purpose::STANDARD
- /// .decode("aGVsbG8gd29ybGR+Cg==").unwrap();
- /// println!("{:?}", bytes);
- ///
- /// // custom engine setup
- /// let bytes_url = engine::GeneralPurpose::new(
- /// &alphabet::URL_SAFE,
- /// general_purpose::NO_PAD)
- /// .decode("aGVsbG8gaW50ZXJuZXR-Cg").unwrap();
- /// println!("{:?}", bytes_url);
- /// ```
- #[cfg(any(feature = "alloc", test))]
- #[inline]
- fn decode<T: AsRef<[u8]>>(&self, input: T) -> Result<Vec<u8>, DecodeError> {
- fn inner<E>(engine: &E, input_bytes: &[u8]) -> Result<Vec<u8>, DecodeError>
- where
- E: Engine + ?Sized,
- {
- let estimate = engine.internal_decoded_len_estimate(input_bytes.len());
- let mut buffer = vec![0; estimate.decoded_len_estimate()];
-
- let bytes_written = engine
- .internal_decode(input_bytes, &mut buffer, estimate)
- .map_err(|e| match e {
- DecodeSliceError::DecodeError(e) => e,
- DecodeSliceError::OutputSliceTooSmall => {
- unreachable!("Vec is sized conservatively")
- }
- })?
- .decoded_len;
-
- buffer.truncate(bytes_written);
-
- Ok(buffer)
- }
-
- inner(self, input.as_ref())
- }
-
- /// Decode the `input` into the supplied `buffer`.
- ///
- /// Writes into the supplied `Vec`, which may allocate if its internal buffer isn't big enough.
- /// Returns a `Result` containing an empty tuple, aka `()`.
- ///
- /// # Example
- ///
- /// ```rust
- /// use base64::{Engine as _, alphabet, engine::{self, general_purpose}};
- /// const CUSTOM_ENGINE: engine::GeneralPurpose =
- /// engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::PAD);
- ///
- /// fn main() {
- /// use base64::Engine;
- /// let mut buffer = Vec::<u8>::new();
- /// // with the default engine
- /// general_purpose::STANDARD
- /// .decode_vec("aGVsbG8gd29ybGR+Cg==", &mut buffer,).unwrap();
- /// println!("{:?}", buffer);
- ///
- /// buffer.clear();
- ///
- /// // with a custom engine
- /// CUSTOM_ENGINE.decode_vec(
- /// "aGVsbG8gaW50ZXJuZXR-Cg==",
- /// &mut buffer,
- /// ).unwrap();
- /// println!("{:?}", buffer);
- /// }
- /// ```
- #[cfg(any(feature = "alloc", test))]
- #[inline]
- fn decode_vec<T: AsRef<[u8]>>(
- &self,
- input: T,
- buffer: &mut Vec<u8>,
- ) -> Result<(), DecodeError> {
- fn inner<E>(engine: &E, input_bytes: &[u8], buffer: &mut Vec<u8>) -> Result<(), DecodeError>
- where
- E: Engine + ?Sized,
- {
- let starting_output_len = buffer.len();
- let estimate = engine.internal_decoded_len_estimate(input_bytes.len());
-
- let total_len_estimate = estimate
- .decoded_len_estimate()
- .checked_add(starting_output_len)
- .expect("Overflow when calculating output buffer length");
-
- buffer.resize(total_len_estimate, 0);
-
- let buffer_slice = &mut buffer.as_mut_slice()[starting_output_len..];
-
- let bytes_written = engine
- .internal_decode(input_bytes, buffer_slice, estimate)
- .map_err(|e| match e {
- DecodeSliceError::DecodeError(e) => e,
- DecodeSliceError::OutputSliceTooSmall => {
- unreachable!("Vec is sized conservatively")
- }
- })?
- .decoded_len;
-
- buffer.truncate(starting_output_len + bytes_written);
-
- Ok(())
- }
-
- inner(self, input.as_ref(), buffer)
- }
-
- /// Decode the input into the provided output slice.
- ///
- /// Returns the number of bytes written to the slice, or an error if `output` is smaller than
- /// the estimated decoded length.
- ///
- /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end).
- ///
- /// See [crate::decoded_len_estimate] for calculating buffer sizes.
- ///
- /// See [Engine::decode_slice_unchecked] for a version that panics instead of returning an error
- /// if the output buffer is too small.
- #[inline]
- fn decode_slice<T: AsRef<[u8]>>(
- &self,
- input: T,
- output: &mut [u8],
- ) -> Result<usize, DecodeSliceError> {
- fn inner<E>(
- engine: &E,
- input_bytes: &[u8],
- output: &mut [u8],
- ) -> Result<usize, DecodeSliceError>
- where
- E: Engine + ?Sized,
- {
- engine
- .internal_decode(
- input_bytes,
- output,
- engine.internal_decoded_len_estimate(input_bytes.len()),
- )
- .map(|dm| dm.decoded_len)
- }
-
- inner(self, input.as_ref(), output)
- }
-
- /// Decode the input into the provided output slice.
- ///
- /// Returns the number of bytes written to the slice.
- ///
- /// This will not write any bytes past exactly what is decoded (no stray garbage bytes at the end).
- ///
- /// See [crate::decoded_len_estimate] for calculating buffer sizes.
- ///
- /// See [Engine::decode_slice] for a version that returns an error instead of panicking if the output
- /// buffer is too small.
- ///
- /// # Panics
- ///
- /// Panics if the provided output buffer is too small for the decoded data.
- #[inline]
- fn decode_slice_unchecked<T: AsRef<[u8]>>(
- &self,
- input: T,
- output: &mut [u8],
- ) -> Result<usize, DecodeError> {
- fn inner<E>(engine: &E, input_bytes: &[u8], output: &mut [u8]) -> Result<usize, DecodeError>
- where
- E: Engine + ?Sized,
- {
- engine
- .internal_decode(
- input_bytes,
- output,
- engine.internal_decoded_len_estimate(input_bytes.len()),
- )
- .map(|dm| dm.decoded_len)
- .map_err(|e| match e {
- DecodeSliceError::DecodeError(e) => e,
- DecodeSliceError::OutputSliceTooSmall => {
- panic!("Output slice is too small")
- }
- })
- }
-
- inner(self, input.as_ref(), output)
- }
-}
-
-/// The minimal level of configuration that engines must support.
-pub trait Config {
- /// Returns `true` if padding should be added after the encoded output.
- ///
- /// Padding is added outside the engine's encode() since the engine may be used
- /// to encode only a chunk of the overall output, so it can't always know when
- /// the output is "done" and would therefore need padding (if configured).
- // It could be provided as a separate parameter when encoding, but that feels like
- // leaking an implementation detail to the user, and it's hopefully more convenient
- // to have to only pass one thing (the engine) to any part of the API.
- fn encode_padding(&self) -> bool;
-}
-
-/// The decode estimate used by an engine implementation. Users do not need to interact with this;
-/// it is only for engine implementors.
-///
-/// Implementors may store relevant data here when constructing this to avoid having to calculate
-/// them again during actual decoding.
-pub trait DecodeEstimate {
- /// Returns a conservative (err on the side of too big) estimate of the decoded length to use
- /// for pre-allocating buffers, etc.
- ///
- /// The estimate must be no larger than the next largest complete triple of decoded bytes.
- /// That is, the final quad of tokens to decode may be assumed to be complete with no padding.
- fn decoded_len_estimate(&self) -> usize;
-}
-
-/// Controls how pad bytes are handled when decoding.
-///
-/// Each [Engine] must support at least the behavior indicated by
-/// [DecodePaddingMode::RequireCanonical], and may support other modes.
-#[derive(Clone, Copy, Debug, PartialEq, Eq)]
-pub enum DecodePaddingMode {
- /// Canonical padding is allowed, but any fewer padding bytes than that is also allowed.
- Indifferent,
- /// Padding must be canonical (0, 1, or 2 `=` as needed to produce a 4 byte suffix).
- RequireCanonical,
- /// Padding must be absent -- for when you want predictable padding, without any wasted bytes.
- RequireNone,
-}
-
-/// Metadata about the result of a decode operation
-#[derive(PartialEq, Eq, Debug)]
-pub struct DecodeMetadata {
- /// Number of decoded bytes output
- pub(crate) decoded_len: usize,
- /// Offset of the first padding byte in the input, if any
- pub(crate) padding_offset: Option<usize>,
-}
-
-impl DecodeMetadata {
- pub(crate) fn new(decoded_bytes: usize, padding_index: Option<usize>) -> Self {
- Self {
- decoded_len: decoded_bytes,
- padding_offset: padding_index,
- }
- }
-}
diff --git a/vendor/base64/src/engine/naive.rs b/vendor/base64/src/engine/naive.rs
deleted file mode 100644
index af509bfa..00000000
--- a/vendor/base64/src/engine/naive.rs
+++ /dev/null
@@ -1,195 +0,0 @@
-use crate::{
- alphabet::Alphabet,
- engine::{
- general_purpose::{self, decode_table, encode_table},
- Config, DecodeEstimate, DecodeMetadata, DecodePaddingMode, Engine,
- },
- DecodeError, DecodeSliceError,
-};
-use std::ops::{BitAnd, BitOr, Shl, Shr};
-
-/// Comparatively simple implementation that can be used as something to compare against in tests
-pub struct Naive {
- encode_table: [u8; 64],
- decode_table: [u8; 256],
- config: NaiveConfig,
-}
-
-impl Naive {
- const ENCODE_INPUT_CHUNK_SIZE: usize = 3;
- const DECODE_INPUT_CHUNK_SIZE: usize = 4;
-
- pub const fn new(alphabet: &Alphabet, config: NaiveConfig) -> Self {
- Self {
- encode_table: encode_table(alphabet),
- decode_table: decode_table(alphabet),
- config,
- }
- }
-
- fn decode_byte_into_u32(&self, offset: usize, byte: u8) -> Result<u32, DecodeError> {
- let decoded = self.decode_table[byte as usize];
-
- if decoded == general_purpose::INVALID_VALUE {
- return Err(DecodeError::InvalidByte(offset, byte));
- }
-
- Ok(decoded as u32)
- }
-}
-
-impl Engine for Naive {
- type Config = NaiveConfig;
- type DecodeEstimate = NaiveEstimate;
-
- fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize {
- // complete chunks first
-
- const LOW_SIX_BITS: u32 = 0x3F;
-
- let rem = input.len() % Self::ENCODE_INPUT_CHUNK_SIZE;
- // will never underflow
- let complete_chunk_len = input.len() - rem;
-
- let mut input_index = 0_usize;
- let mut output_index = 0_usize;
- if let Some(last_complete_chunk_index) =
- complete_chunk_len.checked_sub(Self::ENCODE_INPUT_CHUNK_SIZE)
- {
- while input_index <= last_complete_chunk_index {
- let chunk = &input[input_index..input_index + Self::ENCODE_INPUT_CHUNK_SIZE];
-
- // populate low 24 bits from 3 bytes
- let chunk_int: u32 =
- (chunk[0] as u32).shl(16) | (chunk[1] as u32).shl(8) | (chunk[2] as u32);
- // encode 4x 6-bit output bytes
- output[output_index] = self.encode_table[chunk_int.shr(18) as usize];
- output[output_index + 1] =
- self.encode_table[chunk_int.shr(12_u8).bitand(LOW_SIX_BITS) as usize];
- output[output_index + 2] =
- self.encode_table[chunk_int.shr(6_u8).bitand(LOW_SIX_BITS) as usize];
- output[output_index + 3] =
- self.encode_table[chunk_int.bitand(LOW_SIX_BITS) as usize];
-
- input_index += Self::ENCODE_INPUT_CHUNK_SIZE;
- output_index += 4;
- }
- }
-
- // then leftovers
- if rem == 2 {
- let chunk = &input[input_index..input_index + 2];
-
- // high six bits of chunk[0]
- output[output_index] = self.encode_table[chunk[0].shr(2) as usize];
- // bottom 2 bits of [0], high 4 bits of [1]
- output[output_index + 1] =
- self.encode_table[(chunk[0].shl(4_u8).bitor(chunk[1].shr(4_u8)) as u32)
- .bitand(LOW_SIX_BITS) as usize];
- // bottom 4 bits of [1], with the 2 bottom bits as zero
- output[output_index + 2] =
- self.encode_table[(chunk[1].shl(2_u8) as u32).bitand(LOW_SIX_BITS) as usize];
-
- output_index += 3;
- } else if rem == 1 {
- let byte = input[input_index];
- output[output_index] = self.encode_table[byte.shr(2) as usize];
- output[output_index + 1] =
- self.encode_table[(byte.shl(4_u8) as u32).bitand(LOW_SIX_BITS) as usize];
- output_index += 2;
- }
-
- output_index
- }
-
- fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate {
- NaiveEstimate::new(input_len)
- }
-
- fn internal_decode(
- &self,
- input: &[u8],
- output: &mut [u8],
- estimate: Self::DecodeEstimate,
- ) -> Result<DecodeMetadata, DecodeSliceError> {
- let complete_nonterminal_quads_len = general_purpose::decode::complete_quads_len(
- input,
- estimate.rem,
- output.len(),
- &self.decode_table,
- )?;
-
- const BOTTOM_BYTE: u32 = 0xFF;
-
- for (chunk_index, chunk) in input[..complete_nonterminal_quads_len]
- .chunks_exact(4)
- .enumerate()
- {
- let input_index = chunk_index * 4;
- let output_index = chunk_index * 3;
-
- let decoded_int: u32 = self.decode_byte_into_u32(input_index, chunk[0])?.shl(18)
- | self
- .decode_byte_into_u32(input_index + 1, chunk[1])?
- .shl(12)
- | self.decode_byte_into_u32(input_index + 2, chunk[2])?.shl(6)
- | self.decode_byte_into_u32(input_index + 3, chunk[3])?;
-
- output[output_index] = decoded_int.shr(16_u8).bitand(BOTTOM_BYTE) as u8;
- output[output_index + 1] = decoded_int.shr(8_u8).bitand(BOTTOM_BYTE) as u8;
- output[output_index + 2] = decoded_int.bitand(BOTTOM_BYTE) as u8;
- }
-
- general_purpose::decode_suffix::decode_suffix(
- input,
- complete_nonterminal_quads_len,
- output,
- complete_nonterminal_quads_len / 4 * 3,
- &self.decode_table,
- self.config.decode_allow_trailing_bits,
- self.config.decode_padding_mode,
- )
- }
-
- fn config(&self) -> &Self::Config {
- &self.config
- }
-}
-
-pub struct NaiveEstimate {
- /// remainder from dividing input by `Naive::DECODE_CHUNK_SIZE`
- rem: usize,
- /// Length of input that is in complete `Naive::DECODE_CHUNK_SIZE`-length chunks
- complete_chunk_len: usize,
-}
-
-impl NaiveEstimate {
- fn new(input_len: usize) -> Self {
- let rem = input_len % Naive::DECODE_INPUT_CHUNK_SIZE;
- let complete_chunk_len = input_len - rem;
-
- Self {
- rem,
- complete_chunk_len,
- }
- }
-}
-
-impl DecodeEstimate for NaiveEstimate {
- fn decoded_len_estimate(&self) -> usize {
- ((self.complete_chunk_len / 4) + ((self.rem > 0) as usize)) * 3
- }
-}
-
-#[derive(Clone, Copy, Debug)]
-pub struct NaiveConfig {
- pub encode_padding: bool,
- pub decode_allow_trailing_bits: bool,
- pub decode_padding_mode: DecodePaddingMode,
-}
-
-impl Config for NaiveConfig {
- fn encode_padding(&self) -> bool {
- self.encode_padding
- }
-}
diff --git a/vendor/base64/src/engine/tests.rs b/vendor/base64/src/engine/tests.rs
deleted file mode 100644
index 72bbf4bb..00000000
--- a/vendor/base64/src/engine/tests.rs
+++ /dev/null
@@ -1,1579 +0,0 @@
-// rstest_reuse template functions have unused variables
-#![allow(unused_variables)]
-
-use rand::{
- self,
- distributions::{self, Distribution as _},
- rngs, Rng as _, SeedableRng as _,
-};
-use rstest::rstest;
-use rstest_reuse::{apply, template};
-use std::{collections, fmt, io::Read as _};
-
-use crate::{
- alphabet::{Alphabet, STANDARD},
- encode::add_padding,
- encoded_len,
- engine::{
- general_purpose, naive, Config, DecodeEstimate, DecodeMetadata, DecodePaddingMode, Engine,
- },
- read::DecoderReader,
- tests::{assert_encode_sanity, random_alphabet, random_config},
- DecodeError, DecodeSliceError, PAD_BYTE,
-};
-
-// the case::foo syntax includes the "foo" in the generated test method names
-#[template]
-#[rstest(engine_wrapper,
-case::general_purpose(GeneralPurposeWrapper {}),
-case::naive(NaiveWrapper {}),
-case::decoder_reader(DecoderReaderEngineWrapper {}),
-)]
-fn all_engines<E: EngineWrapper>(engine_wrapper: E) {}
-
-/// Some decode tests don't make sense for use with `DecoderReader` as they are difficult to
-/// reason about or otherwise inapplicable given how DecoderReader slice up its input along
-/// chunk boundaries.
-#[template]
-#[rstest(engine_wrapper,
-case::general_purpose(GeneralPurposeWrapper {}),
-case::naive(NaiveWrapper {}),
-)]
-fn all_engines_except_decoder_reader<E: EngineWrapper>(engine_wrapper: E) {}
-
-#[apply(all_engines)]
-fn rfc_test_vectors_std_alphabet<E: EngineWrapper>(engine_wrapper: E) {
- let data = vec![
- ("", ""),
- ("f", "Zg=="),
- ("fo", "Zm8="),
- ("foo", "Zm9v"),
- ("foob", "Zm9vYg=="),
- ("fooba", "Zm9vYmE="),
- ("foobar", "Zm9vYmFy"),
- ];
-
- let engine = E::standard();
- let engine_no_padding = E::standard_unpadded();
-
- for (orig, encoded) in &data {
- let encoded_without_padding = encoded.trim_end_matches('=');
-
- // unpadded
- {
- let mut encode_buf = [0_u8; 8];
- let mut decode_buf = [0_u8; 6];
-
- let encode_len =
- engine_no_padding.internal_encode(orig.as_bytes(), &mut encode_buf[..]);
- assert_eq!(
- &encoded_without_padding,
- &std::str::from_utf8(&encode_buf[0..encode_len]).unwrap()
- );
- let decode_len = engine_no_padding
- .decode_slice_unchecked(encoded_without_padding.as_bytes(), &mut decode_buf[..])
- .unwrap();
- assert_eq!(orig.len(), decode_len);
-
- assert_eq!(
- orig,
- &std::str::from_utf8(&decode_buf[0..decode_len]).unwrap()
- );
-
- // if there was any padding originally, the no padding engine won't decode it
- if encoded.as_bytes().contains(&PAD_BYTE) {
- assert_eq!(
- Err(DecodeError::InvalidPadding),
- engine_no_padding.decode(encoded)
- )
- }
- }
-
- // padded
- {
- let mut encode_buf = [0_u8; 8];
- let mut decode_buf = [0_u8; 6];
-
- let encode_len = engine.internal_encode(orig.as_bytes(), &mut encode_buf[..]);
- assert_eq!(
- // doesn't have padding added yet
- &encoded_without_padding,
- &std::str::from_utf8(&encode_buf[0..encode_len]).unwrap()
- );
- let pad_len = add_padding(encode_len, &mut encode_buf[encode_len..]);
- assert_eq!(encoded.as_bytes(), &encode_buf[..encode_len + pad_len]);
-
- let decode_len = engine
- .decode_slice_unchecked(encoded.as_bytes(), &mut decode_buf[..])
- .unwrap();
- assert_eq!(orig.len(), decode_len);
-
- assert_eq!(
- orig,
- &std::str::from_utf8(&decode_buf[0..decode_len]).unwrap()
- );
-
- // if there was (canonical) padding, and we remove it, the standard engine won't decode
- if encoded.as_bytes().contains(&PAD_BYTE) {
- assert_eq!(
- Err(DecodeError::InvalidPadding),
- engine.decode(encoded_without_padding)
- )
- }
- }
- }
-}
-
-#[apply(all_engines)]
-fn roundtrip_random<E: EngineWrapper>(engine_wrapper: E) {
- let mut rng = seeded_rng();
-
- let mut orig_data = Vec::<u8>::new();
- let mut encode_buf = Vec::<u8>::new();
- let mut decode_buf = Vec::<u8>::new();
-
- let len_range = distributions::Uniform::new(1, 1_000);
-
- for _ in 0..10_000 {
- let engine = E::random(&mut rng);
-
- orig_data.clear();
- encode_buf.clear();
- decode_buf.clear();
-
- let (orig_len, _, encoded_len) = generate_random_encoded_data(
- &engine,
- &mut orig_data,
- &mut encode_buf,
- &mut rng,
- &len_range,
- );
-
- // exactly the right size
- decode_buf.resize(orig_len, 0);
-
- let dec_len = engine
- .decode_slice_unchecked(&encode_buf[0..encoded_len], &mut decode_buf[..])
- .unwrap();
-
- assert_eq!(orig_len, dec_len);
- assert_eq!(&orig_data[..], &decode_buf[..dec_len]);
- }
-}
-
-#[apply(all_engines)]
-fn encode_doesnt_write_extra_bytes<E: EngineWrapper>(engine_wrapper: E) {
- let mut rng = seeded_rng();
-
- let mut orig_data = Vec::<u8>::new();
- let mut encode_buf = Vec::<u8>::new();
- let mut encode_buf_backup = Vec::<u8>::new();
-
- let input_len_range = distributions::Uniform::new(0, 1000);
-
- for _ in 0..10_000 {
- let engine = E::random(&mut rng);
- let padded = engine.config().encode_padding();
-
- orig_data.clear();
- encode_buf.clear();
- encode_buf_backup.clear();
-
- let orig_len = fill_rand(&mut orig_data, &mut rng, &input_len_range);
-
- let prefix_len = 1024;
- // plenty of prefix and suffix
- fill_rand_len(&mut encode_buf, &mut rng, prefix_len * 2 + orig_len * 2);
- encode_buf_backup.extend_from_slice(&encode_buf[..]);
-
- let expected_encode_len_no_pad = encoded_len(orig_len, false).unwrap();
-
- let encoded_len_no_pad =
- engine.internal_encode(&orig_data[..], &mut encode_buf[prefix_len..]);
- assert_eq!(expected_encode_len_no_pad, encoded_len_no_pad);
-
- // no writes past what it claimed to write
- assert_eq!(&encode_buf_backup[..prefix_len], &encode_buf[..prefix_len]);
- assert_eq!(
- &encode_buf_backup[(prefix_len + encoded_len_no_pad)..],
- &encode_buf[(prefix_len + encoded_len_no_pad)..]
- );
-
- let encoded_data = &encode_buf[prefix_len..(prefix_len + encoded_len_no_pad)];
- assert_encode_sanity(
- std::str::from_utf8(encoded_data).unwrap(),
- // engines don't pad
- false,
- orig_len,
- );
-
- // pad so we can decode it in case our random engine requires padding
- let pad_len = if padded {
- add_padding(
- encoded_len_no_pad,
- &mut encode_buf[prefix_len + encoded_len_no_pad..],
- )
- } else {
- 0
- };
-
- assert_eq!(
- orig_data,
- engine
- .decode(&encode_buf[prefix_len..(prefix_len + encoded_len_no_pad + pad_len)],)
- .unwrap()
- );
- }
-}
-
-#[apply(all_engines)]
-fn encode_engine_slice_fits_into_precisely_sized_slice<E: EngineWrapper>(engine_wrapper: E) {
- let mut orig_data = Vec::new();
- let mut encoded_data = Vec::new();
- let mut decoded = Vec::new();
-
- let input_len_range = distributions::Uniform::new(0, 1000);
-
- let mut rng = rngs::SmallRng::from_entropy();
-
- for _ in 0..10_000 {
- orig_data.clear();
- encoded_data.clear();
- decoded.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- for _ in 0..input_len {
- orig_data.push(rng.gen());
- }
-
- let engine = E::random(&mut rng);
-
- let encoded_size = encoded_len(input_len, engine.config().encode_padding()).unwrap();
-
- encoded_data.resize(encoded_size, 0);
-
- assert_eq!(
- encoded_size,
- engine.encode_slice(&orig_data, &mut encoded_data).unwrap()
- );
-
- assert_encode_sanity(
- std::str::from_utf8(&encoded_data[0..encoded_size]).unwrap(),
- engine.config().encode_padding(),
- input_len,
- );
-
- engine
- .decode_vec(&encoded_data[0..encoded_size], &mut decoded)
- .unwrap();
- assert_eq!(orig_data, decoded);
- }
-}
-
-#[apply(all_engines)]
-fn decode_doesnt_write_extra_bytes<E>(engine_wrapper: E)
-where
- E: EngineWrapper,
- <<E as EngineWrapper>::Engine as Engine>::Config: fmt::Debug,
-{
- let mut rng = seeded_rng();
-
- let mut orig_data = Vec::<u8>::new();
- let mut encode_buf = Vec::<u8>::new();
- let mut decode_buf = Vec::<u8>::new();
- let mut decode_buf_backup = Vec::<u8>::new();
-
- let len_range = distributions::Uniform::new(1, 1_000);
-
- for _ in 0..10_000 {
- let engine = E::random(&mut rng);
-
- orig_data.clear();
- encode_buf.clear();
- decode_buf.clear();
- decode_buf_backup.clear();
-
- let orig_len = fill_rand(&mut orig_data, &mut rng, &len_range);
- encode_buf.resize(orig_len * 2 + 100, 0);
-
- let encoded_len = engine
- .encode_slice(&orig_data[..], &mut encode_buf[..])
- .unwrap();
- encode_buf.truncate(encoded_len);
-
- // oversize decode buffer so we can easily tell if it writes anything more than
- // just the decoded data
- let prefix_len = 1024;
- // plenty of prefix and suffix
- fill_rand_len(&mut decode_buf, &mut rng, prefix_len * 2 + orig_len * 2);
- decode_buf_backup.extend_from_slice(&decode_buf[..]);
-
- let dec_len = engine
- .decode_slice_unchecked(&encode_buf, &mut decode_buf[prefix_len..])
- .unwrap();
-
- assert_eq!(orig_len, dec_len);
- assert_eq!(
- &orig_data[..],
- &decode_buf[prefix_len..prefix_len + dec_len]
- );
- assert_eq!(&decode_buf_backup[..prefix_len], &decode_buf[..prefix_len]);
- assert_eq!(
- &decode_buf_backup[prefix_len + dec_len..],
- &decode_buf[prefix_len + dec_len..]
- );
- }
-}
-
-#[apply(all_engines)]
-fn decode_detect_invalid_last_symbol<E: EngineWrapper>(engine_wrapper: E) {
- // 0xFF -> "/w==", so all letters > w, 0-9, and '+', '/' should get InvalidLastSymbol
- let engine = E::standard();
-
- assert_eq!(Ok(vec![0x89, 0x85]), engine.decode("iYU="));
- assert_eq!(Ok(vec![0xFF]), engine.decode("/w=="));
-
- for (suffix, offset) in vec![
- // suffix, offset of bad byte from start of suffix
- ("/x==", 1_usize),
- ("/z==", 1_usize),
- ("/0==", 1_usize),
- ("/9==", 1_usize),
- ("/+==", 1_usize),
- ("//==", 1_usize),
- // trailing 01
- ("iYV=", 2_usize),
- // trailing 10
- ("iYW=", 2_usize),
- // trailing 11
- ("iYX=", 2_usize),
- ] {
- for prefix_quads in 0..256 {
- let mut encoded = "AAAA".repeat(prefix_quads);
- encoded.push_str(suffix);
-
- assert_eq!(
- Err(DecodeError::InvalidLastSymbol(
- encoded.len() - 4 + offset,
- suffix.as_bytes()[offset],
- )),
- engine.decode(encoded.as_str())
- );
- }
- }
-}
-
-#[apply(all_engines)]
-fn decode_detect_1_valid_symbol_in_last_quad_invalid_length<E: EngineWrapper>(engine_wrapper: E) {
- for len in (0_usize..256).map(|len| len * 4 + 1) {
- for mode in all_pad_modes() {
- let mut input = vec![b'A'; len];
-
- let engine = E::standard_with_pad_mode(true, mode);
-
- assert_eq!(Err(DecodeError::InvalidLength(len)), engine.decode(&input));
- // if we add padding, then the first pad byte in the quad is invalid because it should
- // be the second symbol
- for _ in 0..3 {
- input.push(PAD_BYTE);
- assert_eq!(
- Err(DecodeError::InvalidByte(len, PAD_BYTE)),
- engine.decode(&input)
- );
- }
- }
- }
-}
-
-#[apply(all_engines)]
-fn decode_detect_1_invalid_byte_in_last_quad_invalid_byte<E: EngineWrapper>(engine_wrapper: E) {
- for prefix_len in (0_usize..256).map(|len| len * 4) {
- for mode in all_pad_modes() {
- let mut input = vec![b'A'; prefix_len];
- input.push(b'*');
-
- let engine = E::standard_with_pad_mode(true, mode);
-
- assert_eq!(
- Err(DecodeError::InvalidByte(prefix_len, b'*')),
- engine.decode(&input)
- );
- // adding padding doesn't matter
- for _ in 0..3 {
- input.push(PAD_BYTE);
- assert_eq!(
- Err(DecodeError::InvalidByte(prefix_len, b'*')),
- engine.decode(&input)
- );
- }
- }
- }
-}
-
-#[apply(all_engines)]
-fn decode_detect_invalid_last_symbol_every_possible_two_symbols<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- let engine = E::standard();
-
- let mut base64_to_bytes = collections::HashMap::new();
-
- for b in 0_u8..=255 {
- let mut b64 = vec![0_u8; 4];
- assert_eq!(2, engine.internal_encode(&[b], &mut b64[..]));
- let _ = add_padding(2, &mut b64[2..]);
-
- assert!(base64_to_bytes.insert(b64, vec![b]).is_none());
- }
-
- // every possible combination of trailing symbols must either decode to 1 byte or get InvalidLastSymbol, with or without any leading chunks
-
- let mut prefix = Vec::new();
- for _ in 0..256 {
- let mut clone = prefix.clone();
-
- let mut symbols = [0_u8; 4];
- for &s1 in STANDARD.symbols.iter() {
- symbols[0] = s1;
- for &s2 in STANDARD.symbols.iter() {
- symbols[1] = s2;
- symbols[2] = PAD_BYTE;
- symbols[3] = PAD_BYTE;
-
- // chop off previous symbols
- clone.truncate(prefix.len());
- clone.extend_from_slice(&symbols[..]);
- let decoded_prefix_len = prefix.len() / 4 * 3;
-
- match base64_to_bytes.get(&symbols[..]) {
- Some(bytes) => {
- let res = engine
- .decode(&clone)
- // remove prefix
- .map(|decoded| decoded[decoded_prefix_len..].to_vec());
-
- assert_eq!(Ok(bytes.clone()), res);
- }
- None => assert_eq!(
- Err(DecodeError::InvalidLastSymbol(1, s2)),
- engine.decode(&symbols[..])
- ),
- }
- }
- }
-
- prefix.extend_from_slice(b"AAAA");
- }
-}
-
-#[apply(all_engines)]
-fn decode_detect_invalid_last_symbol_every_possible_three_symbols<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- let engine = E::standard();
-
- let mut base64_to_bytes = collections::HashMap::new();
-
- let mut bytes = [0_u8; 2];
- for b1 in 0_u8..=255 {
- bytes[0] = b1;
- for b2 in 0_u8..=255 {
- bytes[1] = b2;
- let mut b64 = vec![0_u8; 4];
- assert_eq!(3, engine.internal_encode(&bytes, &mut b64[..]));
- let _ = add_padding(3, &mut b64[3..]);
-
- let mut v = Vec::with_capacity(2);
- v.extend_from_slice(&bytes[..]);
-
- assert!(base64_to_bytes.insert(b64, v).is_none());
- }
- }
-
- // every possible combination of symbols must either decode to 2 bytes or get InvalidLastSymbol, with or without any leading chunks
-
- let mut prefix = Vec::new();
- let mut input = Vec::new();
- for _ in 0..256 {
- input.clear();
- input.extend_from_slice(&prefix);
-
- let mut symbols = [0_u8; 4];
- for &s1 in STANDARD.symbols.iter() {
- symbols[0] = s1;
- for &s2 in STANDARD.symbols.iter() {
- symbols[1] = s2;
- for &s3 in STANDARD.symbols.iter() {
- symbols[2] = s3;
- symbols[3] = PAD_BYTE;
-
- // chop off previous symbols
- input.truncate(prefix.len());
- input.extend_from_slice(&symbols[..]);
- let decoded_prefix_len = prefix.len() / 4 * 3;
-
- match base64_to_bytes.get(&symbols[..]) {
- Some(bytes) => {
- let res = engine
- .decode(&input)
- // remove prefix
- .map(|decoded| decoded[decoded_prefix_len..].to_vec());
-
- assert_eq!(Ok(bytes.clone()), res);
- }
- None => assert_eq!(
- Err(DecodeError::InvalidLastSymbol(2, s3)),
- engine.decode(&symbols[..])
- ),
- }
- }
- }
- }
- prefix.extend_from_slice(b"AAAA");
- }
-}
-
-#[apply(all_engines)]
-fn decode_invalid_trailing_bits_ignored_when_configured<E: EngineWrapper>(engine_wrapper: E) {
- let strict = E::standard();
- let forgiving = E::standard_allow_trailing_bits();
-
- fn assert_tolerant_decode<E: Engine>(
- engine: &E,
- input: &mut String,
- b64_prefix_len: usize,
- expected_decode_bytes: Vec<u8>,
- data: &str,
- ) {
- let prefixed = prefixed_data(input, b64_prefix_len, data);
- let decoded = engine.decode(prefixed);
- // prefix is always complete chunks
- let decoded_prefix_len = b64_prefix_len / 4 * 3;
- assert_eq!(
- Ok(expected_decode_bytes),
- decoded.map(|v| v[decoded_prefix_len..].to_vec())
- );
- }
-
- let mut prefix = String::new();
- for _ in 0..256 {
- let mut input = prefix.clone();
-
- // example from https://github.com/marshallpierce/rust-base64/issues/75
- assert!(strict
- .decode(prefixed_data(&mut input, prefix.len(), "/w=="))
- .is_ok());
- assert!(strict
- .decode(prefixed_data(&mut input, prefix.len(), "iYU="))
- .is_ok());
- // trailing 01
- assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/x==");
- assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYV=");
- // trailing 10
- assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/y==");
- assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYW=");
- // trailing 11
- assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![255], "/z==");
- assert_tolerant_decode(&forgiving, &mut input, prefix.len(), vec![137, 133], "iYX=");
-
- prefix.push_str("AAAA");
- }
-}
-
-#[apply(all_engines)]
-fn decode_invalid_byte_error<E: EngineWrapper>(engine_wrapper: E) {
- let mut rng = seeded_rng();
-
- let mut orig_data = Vec::<u8>::new();
- let mut encode_buf = Vec::<u8>::new();
- let mut decode_buf = Vec::<u8>::new();
-
- let len_range = distributions::Uniform::new(1, 1_000);
-
- for _ in 0..100_000 {
- let alphabet = random_alphabet(&mut rng);
- let engine = E::random_alphabet(&mut rng, alphabet);
-
- orig_data.clear();
- encode_buf.clear();
- decode_buf.clear();
-
- let (orig_len, encoded_len_just_data, encoded_len_with_padding) =
- generate_random_encoded_data(
- &engine,
- &mut orig_data,
- &mut encode_buf,
- &mut rng,
- &len_range,
- );
-
- // exactly the right size
- decode_buf.resize(orig_len, 0);
-
- // replace one encoded byte with an invalid byte
- let invalid_byte: u8 = loop {
- let byte: u8 = rng.gen();
-
- if alphabet.symbols.contains(&byte) || byte == PAD_BYTE {
- continue;
- } else {
- break byte;
- }
- };
-
- let invalid_range = distributions::Uniform::new(0, orig_len);
- let invalid_index = invalid_range.sample(&mut rng);
- encode_buf[invalid_index] = invalid_byte;
-
- assert_eq!(
- Err(DecodeError::InvalidByte(invalid_index, invalid_byte)),
- engine.decode_slice_unchecked(
- &encode_buf[0..encoded_len_with_padding],
- &mut decode_buf[..],
- )
- );
- }
-}
-
-/// Any amount of padding anywhere before the final non padding character = invalid byte at first
-/// pad byte.
-/// From this and [decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_non_canonical_padding_suffix_all_modes],
-/// we know padding must extend contiguously to the end of the input.
-#[apply(all_engines)]
-fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_all_modes<
- E: EngineWrapper,
->(
- engine_wrapper: E,
-) {
- // Different amounts of padding, w/ offset from end for the last non-padding char.
- // Only canonical padding, so Canonical mode will work.
- let suffixes = &[("AA==", 2), ("AAA=", 1), ("AAAA", 0)];
-
- for mode in pad_modes_allowing_padding() {
- // We don't encode, so we don't care about encode padding.
- let engine = E::standard_with_pad_mode(true, mode);
-
- decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad(
- engine,
- suffixes.as_slice(),
- );
- }
-}
-
-/// See [decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_all_modes]
-#[apply(all_engines)]
-fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad_non_canonical_padding_suffix<
- E: EngineWrapper,
->(
- engine_wrapper: E,
-) {
- // Different amounts of padding, w/ offset from end for the last non-padding char, and
- // non-canonical padding.
- let suffixes = [
- ("AA==", 2),
- ("AA=", 1),
- ("AA", 0),
- ("AAA=", 1),
- ("AAA", 0),
- ("AAAA", 0),
- ];
-
- // We don't encode, so we don't care about encode padding.
- // Decoding is indifferent so that we don't get caught by missing padding on the last quad
- let engine = E::standard_with_pad_mode(true, DecodePaddingMode::Indifferent);
-
- decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad(
- engine,
- suffixes.as_slice(),
- )
-}
-
-fn decode_padding_before_final_non_padding_char_error_invalid_byte_at_first_pad(
- engine: impl Engine,
- suffixes: &[(&str, usize)],
-) {
- let mut rng = seeded_rng();
-
- let prefix_quads_range = distributions::Uniform::from(0..=256);
-
- for _ in 0..100_000 {
- for (suffix, suffix_offset) in suffixes.iter() {
- let mut s = "AAAA".repeat(prefix_quads_range.sample(&mut rng));
- s.push_str(suffix);
- let mut encoded = s.into_bytes();
-
- // calculate a range to write padding into that leaves at least one non padding char
- let last_non_padding_offset = encoded.len() - 1 - suffix_offset;
-
- // don't include last non padding char as it must stay not padding
- let padding_end = rng.gen_range(0..last_non_padding_offset);
-
- // don't use more than 100 bytes of padding, but also use shorter lengths when
- // padding_end is near the start of the encoded data to avoid biasing to padding
- // the entire prefix on short lengths
- let padding_len = rng.gen_range(1..=usize::min(100, padding_end + 1));
- let padding_start = padding_end.saturating_sub(padding_len);
-
- encoded[padding_start..=padding_end].fill(PAD_BYTE);
-
- // should still have non-padding before any final padding
- assert_ne!(PAD_BYTE, encoded[last_non_padding_offset]);
- assert_eq!(
- Err(DecodeError::InvalidByte(padding_start, PAD_BYTE)),
- engine.decode(&encoded),
- "len: {}, input: {}",
- encoded.len(),
- String::from_utf8(encoded).unwrap()
- );
- }
- }
-}
-
-/// Any amount of padding before final chunk that crosses over into final chunk with 1-4 bytes =
-/// invalid byte at first pad byte.
-/// From this we know the padding must start in the final chunk.
-#[apply(all_engines)]
-fn decode_padding_starts_before_final_chunk_error_invalid_byte_at_first_pad<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- let mut rng = seeded_rng();
-
- // must have at least one prefix quad
- let prefix_quads_range = distributions::Uniform::from(1..256);
- let suffix_pad_len_range = distributions::Uniform::from(1..=4);
- // don't use no-padding mode, as the reader decode might decode a block that ends with
- // valid padding, which should then be referenced when encountering the later invalid byte
- for mode in pad_modes_allowing_padding() {
- // we don't encode so we don't care about encode padding
- let engine = E::standard_with_pad_mode(true, mode);
- for _ in 0..100_000 {
- let suffix_len = suffix_pad_len_range.sample(&mut rng);
- // all 0 bits so we don't hit InvalidLastSymbol with the reader decoder
- let mut encoded = "AAAA"
- .repeat(prefix_quads_range.sample(&mut rng))
- .into_bytes();
- encoded.resize(encoded.len() + suffix_len, PAD_BYTE);
-
- // amount of padding must be long enough to extend back from suffix into previous
- // quads
- let padding_len = rng.gen_range(suffix_len + 1..encoded.len());
- // no non-padding after padding in this test, so padding goes to the end
- let padding_start = encoded.len() - padding_len;
- encoded[padding_start..].fill(PAD_BYTE);
-
- assert_eq!(
- Err(DecodeError::InvalidByte(padding_start, PAD_BYTE)),
- engine.decode(&encoded),
- "suffix_len: {}, padding_len: {}, b64: {}",
- suffix_len,
- padding_len,
- std::str::from_utf8(&encoded).unwrap()
- );
- }
- }
-}
-
-/// 0-1 bytes of data before any amount of padding in final chunk = invalid byte, since padding
-/// is not valid data (consistent with error for pad bytes in earlier chunks).
-/// From this we know there must be 2-3 bytes of data before padding
-#[apply(all_engines)]
-fn decode_too_little_data_before_padding_error_invalid_byte<E: EngineWrapper>(engine_wrapper: E) {
- let mut rng = seeded_rng();
-
- // want to test no prefix quad case, so start at 0
- let prefix_quads_range = distributions::Uniform::from(0_usize..256);
- let suffix_data_len_range = distributions::Uniform::from(0_usize..=1);
- for mode in all_pad_modes() {
- // we don't encode so we don't care about encode padding
- let engine = E::standard_with_pad_mode(true, mode);
- for _ in 0..100_000 {
- let suffix_data_len = suffix_data_len_range.sample(&mut rng);
- let prefix_quad_len = prefix_quads_range.sample(&mut rng);
-
- // for all possible padding lengths
- for padding_len in 1..=(4 - suffix_data_len) {
- let mut encoded = "ABCD".repeat(prefix_quad_len).into_bytes();
- encoded.resize(encoded.len() + suffix_data_len, b'A');
- encoded.resize(encoded.len() + padding_len, PAD_BYTE);
-
- assert_eq!(
- Err(DecodeError::InvalidByte(
- prefix_quad_len * 4 + suffix_data_len,
- PAD_BYTE,
- )),
- engine.decode(&encoded),
- "input {} suffix data len {} pad len {}",
- String::from_utf8(encoded).unwrap(),
- suffix_data_len,
- padding_len
- );
- }
- }
- }
-}
-
-// https://eprint.iacr.org/2022/361.pdf table 2, test 1
-#[apply(all_engines)]
-fn decode_malleability_test_case_3_byte_suffix_valid<E: EngineWrapper>(engine_wrapper: E) {
- assert_eq!(
- b"Hello".as_slice(),
- &E::standard().decode("SGVsbG8=").unwrap()
- );
-}
-
-// https://eprint.iacr.org/2022/361.pdf table 2, test 2
-#[apply(all_engines)]
-fn decode_malleability_test_case_3_byte_suffix_invalid_trailing_symbol<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- assert_eq!(
- DecodeError::InvalidLastSymbol(6, 0x39),
- E::standard().decode("SGVsbG9=").unwrap_err()
- );
-}
-
-// https://eprint.iacr.org/2022/361.pdf table 2, test 3
-#[apply(all_engines)]
-fn decode_malleability_test_case_3_byte_suffix_no_padding<E: EngineWrapper>(engine_wrapper: E) {
- assert_eq!(
- DecodeError::InvalidPadding,
- E::standard().decode("SGVsbG9").unwrap_err()
- );
-}
-
-// https://eprint.iacr.org/2022/361.pdf table 2, test 4
-#[apply(all_engines)]
-fn decode_malleability_test_case_2_byte_suffix_valid_two_padding_symbols<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- assert_eq!(
- b"Hell".as_slice(),
- &E::standard().decode("SGVsbA==").unwrap()
- );
-}
-
-// https://eprint.iacr.org/2022/361.pdf table 2, test 5
-#[apply(all_engines)]
-fn decode_malleability_test_case_2_byte_suffix_short_padding<E: EngineWrapper>(engine_wrapper: E) {
- assert_eq!(
- DecodeError::InvalidPadding,
- E::standard().decode("SGVsbA=").unwrap_err()
- );
-}
-
-// https://eprint.iacr.org/2022/361.pdf table 2, test 6
-#[apply(all_engines)]
-fn decode_malleability_test_case_2_byte_suffix_no_padding<E: EngineWrapper>(engine_wrapper: E) {
- assert_eq!(
- DecodeError::InvalidPadding,
- E::standard().decode("SGVsbA").unwrap_err()
- );
-}
-
-// https://eprint.iacr.org/2022/361.pdf table 2, test 7
-// DecoderReader pseudo-engine gets InvalidByte at 8 (extra padding) since it decodes the first
-// two complete quads correctly.
-#[apply(all_engines_except_decoder_reader)]
-fn decode_malleability_test_case_2_byte_suffix_too_much_padding<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- assert_eq!(
- DecodeError::InvalidByte(6, PAD_BYTE),
- E::standard().decode("SGVsbA====").unwrap_err()
- );
-}
-
-/// Requires canonical padding -> accepts 2 + 2, 3 + 1, 4 + 0 final quad configurations
-#[apply(all_engines)]
-fn decode_pad_mode_requires_canonical_accepts_canonical<E: EngineWrapper>(engine_wrapper: E) {
- assert_all_suffixes_ok(
- E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical),
- vec!["/w==", "iYU=", "AAAA"],
- );
-}
-
-/// Requires canonical padding -> rejects 2 + 0-1, 3 + 0 final chunk configurations
-#[apply(all_engines)]
-fn decode_pad_mode_requires_canonical_rejects_non_canonical<E: EngineWrapper>(engine_wrapper: E) {
- let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireCanonical);
-
- let suffixes = ["/w", "/w=", "iYU"];
- for num_prefix_quads in 0..256 {
- for &suffix in suffixes.iter() {
- let mut encoded = "AAAA".repeat(num_prefix_quads);
- encoded.push_str(suffix);
-
- let res = engine.decode(&encoded);
-
- assert_eq!(Err(DecodeError::InvalidPadding), res);
- }
- }
-}
-
-/// Requires no padding -> accepts 2 + 0, 3 + 0, 4 + 0 final chunk configuration
-#[apply(all_engines)]
-fn decode_pad_mode_requires_no_padding_accepts_no_padding<E: EngineWrapper>(engine_wrapper: E) {
- assert_all_suffixes_ok(
- E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone),
- vec!["/w", "iYU", "AAAA"],
- );
-}
-
-/// Requires no padding -> rejects 2 + 1-2, 3 + 1 final chunk configuration
-#[apply(all_engines)]
-fn decode_pad_mode_requires_no_padding_rejects_any_padding<E: EngineWrapper>(engine_wrapper: E) {
- let engine = E::standard_with_pad_mode(true, DecodePaddingMode::RequireNone);
-
- let suffixes = ["/w=", "/w==", "iYU="];
- for num_prefix_quads in 0..256 {
- for &suffix in suffixes.iter() {
- let mut encoded = "AAAA".repeat(num_prefix_quads);
- encoded.push_str(suffix);
-
- let res = engine.decode(&encoded);
-
- assert_eq!(Err(DecodeError::InvalidPadding), res);
- }
- }
-}
-
-/// Indifferent padding accepts 2 + 0-2, 3 + 0-1, 4 + 0 final chunk configuration
-#[apply(all_engines)]
-fn decode_pad_mode_indifferent_padding_accepts_anything<E: EngineWrapper>(engine_wrapper: E) {
- assert_all_suffixes_ok(
- E::standard_with_pad_mode(true, DecodePaddingMode::Indifferent),
- vec!["/w", "/w=", "/w==", "iYU", "iYU=", "AAAA"],
- );
-}
-
-/// 1 trailing byte that's not padding is detected as invalid byte even though there's padding
-/// in the middle of the input. This is essentially mandating the eager check for 1 trailing byte
-/// to catch the \n suffix case.
-// DecoderReader pseudo-engine can't handle DecodePaddingMode::RequireNone since it will decode
-// a complete quad with padding in it before encountering the stray byte that makes it an invalid
-// length
-#[apply(all_engines_except_decoder_reader)]
-fn decode_invalid_trailing_bytes_all_pad_modes_invalid_byte<E: EngineWrapper>(engine_wrapper: E) {
- for mode in all_pad_modes() {
- do_invalid_trailing_byte(E::standard_with_pad_mode(true, mode), mode);
- }
-}
-
-#[apply(all_engines)]
-fn decode_invalid_trailing_bytes_invalid_byte<E: EngineWrapper>(engine_wrapper: E) {
- // excluding no padding mode because the DecoderWrapper pseudo-engine will fail with
- // InvalidPadding because it will decode the last complete quad with padding first
- for mode in pad_modes_allowing_padding() {
- do_invalid_trailing_byte(E::standard_with_pad_mode(true, mode), mode);
- }
-}
-fn do_invalid_trailing_byte(engine: impl Engine, mode: DecodePaddingMode) {
- for last_byte in [b'*', b'\n'] {
- for num_prefix_quads in 0..256 {
- let mut s: String = "ABCD".repeat(num_prefix_quads);
- s.push_str("Cg==");
- let mut input = s.into_bytes();
- input.push(last_byte);
-
- // The case of trailing newlines is common enough to warrant a test for a good error
- // message.
- assert_eq!(
- Err(DecodeError::InvalidByte(
- num_prefix_quads * 4 + 4,
- last_byte
- )),
- engine.decode(&input),
- "mode: {:?}, input: {}",
- mode,
- String::from_utf8(input).unwrap()
- );
- }
- }
-}
-
-/// When there's 1 trailing byte, but it's padding, it's only InvalidByte if there isn't padding
-/// earlier.
-#[apply(all_engines)]
-fn decode_invalid_trailing_padding_as_invalid_byte_at_first_pad_byte<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- // excluding no padding mode because the DecoderWrapper pseudo-engine will fail with
- // InvalidPadding because it will decode the last complete quad with padding first
- for mode in pad_modes_allowing_padding() {
- do_invalid_trailing_padding_as_invalid_byte_at_first_padding(
- E::standard_with_pad_mode(true, mode),
- mode,
- );
- }
-}
-
-// DecoderReader pseudo-engine can't handle DecodePaddingMode::RequireNone since it will decode
-// a complete quad with padding in it before encountering the stray byte that makes it an invalid
-// length
-#[apply(all_engines_except_decoder_reader)]
-fn decode_invalid_trailing_padding_as_invalid_byte_at_first_byte_all_modes<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- for mode in all_pad_modes() {
- do_invalid_trailing_padding_as_invalid_byte_at_first_padding(
- E::standard_with_pad_mode(true, mode),
- mode,
- );
- }
-}
-fn do_invalid_trailing_padding_as_invalid_byte_at_first_padding(
- engine: impl Engine,
- mode: DecodePaddingMode,
-) {
- for num_prefix_quads in 0..256 {
- for (suffix, pad_offset) in [("AA===", 2), ("AAA==", 3), ("AAAA=", 4)] {
- let mut s: String = "ABCD".repeat(num_prefix_quads);
- s.push_str(suffix);
-
- assert_eq!(
- // pad after `g`, not the last one
- Err(DecodeError::InvalidByte(
- num_prefix_quads * 4 + pad_offset,
- PAD_BYTE
- )),
- engine.decode(&s),
- "mode: {:?}, input: {}",
- mode,
- s
- );
- }
- }
-}
-
-#[apply(all_engines)]
-fn decode_into_slice_fits_in_precisely_sized_slice<E: EngineWrapper>(engine_wrapper: E) {
- let mut orig_data = Vec::new();
- let mut encoded_data = String::new();
- let mut decode_buf = Vec::new();
-
- let input_len_range = distributions::Uniform::new(0, 1000);
- let mut rng = rngs::SmallRng::from_entropy();
-
- for _ in 0..10_000 {
- orig_data.clear();
- encoded_data.clear();
- decode_buf.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- for _ in 0..input_len {
- orig_data.push(rng.gen());
- }
-
- let engine = E::random(&mut rng);
- engine.encode_string(&orig_data, &mut encoded_data);
- assert_encode_sanity(&encoded_data, engine.config().encode_padding(), input_len);
-
- decode_buf.resize(input_len, 0);
- // decode into the non-empty buf
- let decode_bytes_written = engine
- .decode_slice_unchecked(encoded_data.as_bytes(), &mut decode_buf[..])
- .unwrap();
- assert_eq!(orig_data.len(), decode_bytes_written);
- assert_eq!(orig_data, decode_buf);
-
- // same for checked variant
- decode_buf.clear();
- decode_buf.resize(input_len, 0);
- // decode into the non-empty buf
- let decode_bytes_written = engine
- .decode_slice(encoded_data.as_bytes(), &mut decode_buf[..])
- .unwrap();
- assert_eq!(orig_data.len(), decode_bytes_written);
- assert_eq!(orig_data, decode_buf);
- }
-}
-
-#[apply(all_engines)]
-fn inner_decode_reports_padding_position<E: EngineWrapper>(engine_wrapper: E) {
- let mut b64 = String::new();
- let mut decoded = Vec::new();
- let engine = E::standard();
-
- for pad_position in 1..10_000 {
- b64.clear();
- decoded.clear();
- // plenty of room for original data
- decoded.resize(pad_position, 0);
-
- for _ in 0..pad_position {
- b64.push('A');
- }
- // finish the quad with padding
- for _ in 0..(4 - (pad_position % 4)) {
- b64.push('=');
- }
-
- let decode_res = engine.internal_decode(
- b64.as_bytes(),
- &mut decoded[..],
- engine.internal_decoded_len_estimate(b64.len()),
- );
- if pad_position % 4 < 2 {
- // impossible padding
- assert_eq!(
- Err(DecodeSliceError::DecodeError(DecodeError::InvalidByte(
- pad_position,
- PAD_BYTE
- ))),
- decode_res
- );
- } else {
- let decoded_bytes = pad_position / 4 * 3
- + match pad_position % 4 {
- 0 => 0,
- 2 => 1,
- 3 => 2,
- _ => unreachable!(),
- };
- assert_eq!(
- Ok(DecodeMetadata::new(decoded_bytes, Some(pad_position))),
- decode_res
- );
- }
- }
-}
-
-#[apply(all_engines)]
-fn decode_length_estimate_delta<E: EngineWrapper>(engine_wrapper: E) {
- for engine in [E::standard(), E::standard_unpadded()] {
- for &padding in &[true, false] {
- for orig_len in 0..1000 {
- let encoded_len = encoded_len(orig_len, padding).unwrap();
-
- let decoded_estimate = engine
- .internal_decoded_len_estimate(encoded_len)
- .decoded_len_estimate();
- assert!(decoded_estimate >= orig_len);
- assert!(
- decoded_estimate - orig_len < 3,
- "estimate: {}, encoded: {}, orig: {}",
- decoded_estimate,
- encoded_len,
- orig_len
- );
- }
- }
- }
-}
-
-#[apply(all_engines)]
-fn estimate_via_u128_inflation<E: EngineWrapper>(engine_wrapper: E) {
- // cover both ends of usize
- (0..1000)
- .chain(usize::MAX - 1000..=usize::MAX)
- .for_each(|encoded_len| {
- // inflate to 128 bit type to be able to safely use the easy formulas
- let len_128 = encoded_len as u128;
-
- let estimate = E::standard()
- .internal_decoded_len_estimate(encoded_len)
- .decoded_len_estimate();
-
- // This check is a little too strict: it requires using the (len + 3) / 4 * 3 formula
- // or equivalent, but until other engines come along that use a different formula
- // requiring that we think more carefully about what the allowable criteria are, this
- // will do.
- assert_eq!(
- ((len_128 + 3) / 4 * 3) as usize,
- estimate,
- "enc len {}",
- encoded_len
- );
- })
-}
-
-#[apply(all_engines)]
-fn decode_slice_checked_fails_gracefully_at_all_output_lengths<E: EngineWrapper>(
- engine_wrapper: E,
-) {
- let mut rng = seeded_rng();
- for original_len in 0..1000 {
- let mut original = vec![0; original_len];
- rng.fill(&mut original[..]);
-
- for mode in all_pad_modes() {
- let engine = E::standard_with_pad_mode(
- match mode {
- DecodePaddingMode::Indifferent | DecodePaddingMode::RequireCanonical => true,
- DecodePaddingMode::RequireNone => false,
- },
- mode,
- );
-
- let encoded = engine.encode(&original);
- let mut decode_buf = Vec::with_capacity(original_len);
- for decode_buf_len in 0..original_len {
- decode_buf.resize(decode_buf_len, 0);
- assert_eq!(
- DecodeSliceError::OutputSliceTooSmall,
- engine
- .decode_slice(&encoded, &mut decode_buf[..])
- .unwrap_err(),
- "original len: {}, encoded len: {}, buf len: {}, mode: {:?}",
- original_len,
- encoded.len(),
- decode_buf_len,
- mode
- );
- // internal method works the same
- assert_eq!(
- DecodeSliceError::OutputSliceTooSmall,
- engine
- .internal_decode(
- encoded.as_bytes(),
- &mut decode_buf[..],
- engine.internal_decoded_len_estimate(encoded.len())
- )
- .unwrap_err()
- );
- }
-
- decode_buf.resize(original_len, 0);
- rng.fill(&mut decode_buf[..]);
- assert_eq!(
- original_len,
- engine.decode_slice(&encoded, &mut decode_buf[..]).unwrap()
- );
- assert_eq!(original, decode_buf);
- }
- }
-}
-
-/// Returns a tuple of the original data length, the encoded data length (just data), and the length including padding.
-///
-/// Vecs provided should be empty.
-fn generate_random_encoded_data<E: Engine, R: rand::Rng, D: distributions::Distribution<usize>>(
- engine: &E,
- orig_data: &mut Vec<u8>,
- encode_buf: &mut Vec<u8>,
- rng: &mut R,
- length_distribution: &D,
-) -> (usize, usize, usize) {
- let padding: bool = engine.config().encode_padding();
-
- let orig_len = fill_rand(orig_data, rng, length_distribution);
- let expected_encoded_len = encoded_len(orig_len, padding).unwrap();
- encode_buf.resize(expected_encoded_len, 0);
-
- let base_encoded_len = engine.internal_encode(&orig_data[..], &mut encode_buf[..]);
-
- let enc_len_with_padding = if padding {
- base_encoded_len + add_padding(base_encoded_len, &mut encode_buf[base_encoded_len..])
- } else {
- base_encoded_len
- };
-
- assert_eq!(expected_encoded_len, enc_len_with_padding);
-
- (orig_len, base_encoded_len, enc_len_with_padding)
-}
-
-// fill to a random length
-fn fill_rand<R: rand::Rng, D: distributions::Distribution<usize>>(
- vec: &mut Vec<u8>,
- rng: &mut R,
- length_distribution: &D,
-) -> usize {
- let len = length_distribution.sample(rng);
- for _ in 0..len {
- vec.push(rng.gen());
- }
-
- len
-}
-
-fn fill_rand_len<R: rand::Rng>(vec: &mut Vec<u8>, rng: &mut R, len: usize) {
- for _ in 0..len {
- vec.push(rng.gen());
- }
-}
-
-fn prefixed_data<'i>(input_with_prefix: &'i mut String, prefix_len: usize, data: &str) -> &'i str {
- input_with_prefix.truncate(prefix_len);
- input_with_prefix.push_str(data);
- input_with_prefix.as_str()
-}
-
-/// A wrapper to make using engines in rstest fixtures easier.
-/// The functions don't need to be instance methods, but rstest does seem
-/// to want an instance, so instances are passed to test functions and then ignored.
-trait EngineWrapper {
- type Engine: Engine;
-
- /// Return an engine configured for RFC standard base64
- fn standard() -> Self::Engine;
-
- /// Return an engine configured for RFC standard base64, except with no padding appended on
- /// encode, and required no padding on decode.
- fn standard_unpadded() -> Self::Engine;
-
- /// Return an engine configured for RFC standard alphabet with the provided encode and decode
- /// pad settings
- fn standard_with_pad_mode(encode_pad: bool, decode_pad_mode: DecodePaddingMode)
- -> Self::Engine;
-
- /// Return an engine configured for RFC standard base64 that allows invalid trailing bits
- fn standard_allow_trailing_bits() -> Self::Engine;
-
- /// Return an engine configured with a randomized alphabet and config
- fn random<R: rand::Rng>(rng: &mut R) -> Self::Engine;
-
- /// Return an engine configured with the specified alphabet and randomized config
- fn random_alphabet<R: rand::Rng>(rng: &mut R, alphabet: &Alphabet) -> Self::Engine;
-}
-
-struct GeneralPurposeWrapper {}
-
-impl EngineWrapper for GeneralPurposeWrapper {
- type Engine = general_purpose::GeneralPurpose;
-
- fn standard() -> Self::Engine {
- general_purpose::GeneralPurpose::new(&STANDARD, general_purpose::PAD)
- }
-
- fn standard_unpadded() -> Self::Engine {
- general_purpose::GeneralPurpose::new(&STANDARD, general_purpose::NO_PAD)
- }
-
- fn standard_with_pad_mode(
- encode_pad: bool,
- decode_pad_mode: DecodePaddingMode,
- ) -> Self::Engine {
- general_purpose::GeneralPurpose::new(
- &STANDARD,
- general_purpose::GeneralPurposeConfig::new()
- .with_encode_padding(encode_pad)
- .with_decode_padding_mode(decode_pad_mode),
- )
- }
-
- fn standard_allow_trailing_bits() -> Self::Engine {
- general_purpose::GeneralPurpose::new(
- &STANDARD,
- general_purpose::GeneralPurposeConfig::new().with_decode_allow_trailing_bits(true),
- )
- }
-
- fn random<R: rand::Rng>(rng: &mut R) -> Self::Engine {
- let alphabet = random_alphabet(rng);
-
- Self::random_alphabet(rng, alphabet)
- }
-
- fn random_alphabet<R: rand::Rng>(rng: &mut R, alphabet: &Alphabet) -> Self::Engine {
- general_purpose::GeneralPurpose::new(alphabet, random_config(rng))
- }
-}
-
-struct NaiveWrapper {}
-
-impl EngineWrapper for NaiveWrapper {
- type Engine = naive::Naive;
-
- fn standard() -> Self::Engine {
- naive::Naive::new(
- &STANDARD,
- naive::NaiveConfig {
- encode_padding: true,
- decode_allow_trailing_bits: false,
- decode_padding_mode: DecodePaddingMode::RequireCanonical,
- },
- )
- }
-
- fn standard_unpadded() -> Self::Engine {
- naive::Naive::new(
- &STANDARD,
- naive::NaiveConfig {
- encode_padding: false,
- decode_allow_trailing_bits: false,
- decode_padding_mode: DecodePaddingMode::RequireNone,
- },
- )
- }
-
- fn standard_with_pad_mode(
- encode_pad: bool,
- decode_pad_mode: DecodePaddingMode,
- ) -> Self::Engine {
- naive::Naive::new(
- &STANDARD,
- naive::NaiveConfig {
- encode_padding: encode_pad,
- decode_allow_trailing_bits: false,
- decode_padding_mode: decode_pad_mode,
- },
- )
- }
-
- fn standard_allow_trailing_bits() -> Self::Engine {
- naive::Naive::new(
- &STANDARD,
- naive::NaiveConfig {
- encode_padding: true,
- decode_allow_trailing_bits: true,
- decode_padding_mode: DecodePaddingMode::RequireCanonical,
- },
- )
- }
-
- fn random<R: rand::Rng>(rng: &mut R) -> Self::Engine {
- let alphabet = random_alphabet(rng);
-
- Self::random_alphabet(rng, alphabet)
- }
-
- fn random_alphabet<R: rand::Rng>(rng: &mut R, alphabet: &Alphabet) -> Self::Engine {
- let mode = rng.gen();
-
- let config = naive::NaiveConfig {
- encode_padding: match mode {
- DecodePaddingMode::Indifferent => rng.gen(),
- DecodePaddingMode::RequireCanonical => true,
- DecodePaddingMode::RequireNone => false,
- },
- decode_allow_trailing_bits: rng.gen(),
- decode_padding_mode: mode,
- };
-
- naive::Naive::new(alphabet, config)
- }
-}
-
-/// A pseudo-Engine that routes all decoding through [DecoderReader]
-struct DecoderReaderEngine<E: Engine> {
- engine: E,
-}
-
-impl<E: Engine> From<E> for DecoderReaderEngine<E> {
- fn from(value: E) -> Self {
- Self { engine: value }
- }
-}
-
-impl<E: Engine> Engine for DecoderReaderEngine<E> {
- type Config = E::Config;
- type DecodeEstimate = E::DecodeEstimate;
-
- fn internal_encode(&self, input: &[u8], output: &mut [u8]) -> usize {
- self.engine.internal_encode(input, output)
- }
-
- fn internal_decoded_len_estimate(&self, input_len: usize) -> Self::DecodeEstimate {
- self.engine.internal_decoded_len_estimate(input_len)
- }
-
- fn internal_decode(
- &self,
- input: &[u8],
- output: &mut [u8],
- decode_estimate: Self::DecodeEstimate,
- ) -> Result<DecodeMetadata, DecodeSliceError> {
- let mut reader = DecoderReader::new(input, &self.engine);
- let mut buf = vec![0; input.len()];
- // to avoid effects like not detecting invalid length due to progressively growing
- // the output buffer in read_to_end etc, read into a big enough buffer in one go
- // to make behavior more consistent with normal engines
- let _ = reader
- .read(&mut buf)
- .and_then(|len| {
- buf.truncate(len);
- // make sure we got everything
- reader.read_to_end(&mut buf)
- })
- .map_err(|io_error| {
- *io_error
- .into_inner()
- .and_then(|inner| inner.downcast::<DecodeError>().ok())
- .unwrap()
- })?;
- if output.len() < buf.len() {
- return Err(DecodeSliceError::OutputSliceTooSmall);
- }
- output[..buf.len()].copy_from_slice(&buf);
- Ok(DecodeMetadata::new(
- buf.len(),
- input
- .iter()
- .enumerate()
- .filter(|(_offset, byte)| **byte == PAD_BYTE)
- .map(|(offset, _byte)| offset)
- .next(),
- ))
- }
-
- fn config(&self) -> &Self::Config {
- self.engine.config()
- }
-}
-
-struct DecoderReaderEngineWrapper {}
-
-impl EngineWrapper for DecoderReaderEngineWrapper {
- type Engine = DecoderReaderEngine<general_purpose::GeneralPurpose>;
-
- fn standard() -> Self::Engine {
- GeneralPurposeWrapper::standard().into()
- }
-
- fn standard_unpadded() -> Self::Engine {
- GeneralPurposeWrapper::standard_unpadded().into()
- }
-
- fn standard_with_pad_mode(
- encode_pad: bool,
- decode_pad_mode: DecodePaddingMode,
- ) -> Self::Engine {
- GeneralPurposeWrapper::standard_with_pad_mode(encode_pad, decode_pad_mode).into()
- }
-
- fn standard_allow_trailing_bits() -> Self::Engine {
- GeneralPurposeWrapper::standard_allow_trailing_bits().into()
- }
-
- fn random<R: rand::Rng>(rng: &mut R) -> Self::Engine {
- GeneralPurposeWrapper::random(rng).into()
- }
-
- fn random_alphabet<R: rand::Rng>(rng: &mut R, alphabet: &Alphabet) -> Self::Engine {
- GeneralPurposeWrapper::random_alphabet(rng, alphabet).into()
- }
-}
-
-fn seeded_rng() -> impl rand::Rng {
- rngs::SmallRng::from_entropy()
-}
-
-fn all_pad_modes() -> Vec<DecodePaddingMode> {
- vec![
- DecodePaddingMode::Indifferent,
- DecodePaddingMode::RequireCanonical,
- DecodePaddingMode::RequireNone,
- ]
-}
-
-fn pad_modes_allowing_padding() -> Vec<DecodePaddingMode> {
- vec![
- DecodePaddingMode::Indifferent,
- DecodePaddingMode::RequireCanonical,
- ]
-}
-
-fn assert_all_suffixes_ok<E: Engine>(engine: E, suffixes: Vec<&str>) {
- for num_prefix_quads in 0..256 {
- for &suffix in suffixes.iter() {
- let mut encoded = "AAAA".repeat(num_prefix_quads);
- encoded.push_str(suffix);
-
- let res = &engine.decode(&encoded);
- assert!(res.is_ok());
- }
- }
-}
diff --git a/vendor/base64/src/lib.rs b/vendor/base64/src/lib.rs
deleted file mode 100644
index 579a7225..00000000
--- a/vendor/base64/src/lib.rs
+++ /dev/null
@@ -1,277 +0,0 @@
-//! Correct, fast, and configurable [base64][] decoding and encoding. Base64
-//! transports binary data efficiently in contexts where only plain text is
-//! allowed.
-//!
-//! [base64]: https://developer.mozilla.org/en-US/docs/Glossary/Base64
-//!
-//! # Usage
-//!
-//! Use an [`Engine`] to decode or encode base64, configured with the base64
-//! alphabet and padding behavior best suited to your application.
-//!
-//! ## Engine setup
-//!
-//! There is more than one way to encode a stream of bytes as “base64”.
-//! Different applications use different encoding
-//! [alphabets][alphabet::Alphabet] and
-//! [padding behaviors][engine::general_purpose::GeneralPurposeConfig].
-//!
-//! ### Encoding alphabet
-//!
-//! Almost all base64 [alphabets][alphabet::Alphabet] use `A-Z`, `a-z`, and
-//! `0-9`, which gives nearly 64 characters (26 + 26 + 10 = 62), but they differ
-//! in their choice of their final 2.
-//!
-//! Most applications use the [standard][alphabet::STANDARD] alphabet specified
-//! in [RFC 4648][rfc-alphabet]. If that’s all you need, you can get started
-//! quickly by using the pre-configured
-//! [`STANDARD`][engine::general_purpose::STANDARD] engine, which is also available
-//! in the [`prelude`] module as shown here, if you prefer a minimal `use`
-//! footprint.
-//!
-#![cfg_attr(feature = "alloc", doc = "```")]
-#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
-//! use base64::prelude::*;
-//!
-//! # fn main() -> Result<(), base64::DecodeError> {
-//! assert_eq!(BASE64_STANDARD.decode(b"+uwgVQA=")?, b"\xFA\xEC\x20\x55\0");
-//! assert_eq!(BASE64_STANDARD.encode(b"\xFF\xEC\x20\x55\0"), "/+wgVQA=");
-//! # Ok(())
-//! # }
-//! ```
-//!
-//! [rfc-alphabet]: https://datatracker.ietf.org/doc/html/rfc4648#section-4
-//!
-//! Other common alphabets are available in the [`alphabet`] module.
-//!
-//! #### URL-safe alphabet
-//!
-//! The standard alphabet uses `+` and `/` as its two non-alphanumeric tokens,
-//! which cannot be safely used in URL’s without encoding them as `%2B` and
-//! `%2F`.
-//!
-//! To avoid that, some applications use a [“URL-safe” alphabet][alphabet::URL_SAFE],
-//! which uses `-` and `_` instead. To use that alternative alphabet, use the
-//! [`URL_SAFE`][engine::general_purpose::URL_SAFE] engine. This example doesn't
-//! use [`prelude`] to show what a more explicit `use` would look like.
-//!
-#![cfg_attr(feature = "alloc", doc = "```")]
-#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
-//! use base64::{engine::general_purpose::URL_SAFE, Engine as _};
-//!
-//! # fn main() -> Result<(), base64::DecodeError> {
-//! assert_eq!(URL_SAFE.decode(b"-uwgVQA=")?, b"\xFA\xEC\x20\x55\0");
-//! assert_eq!(URL_SAFE.encode(b"\xFF\xEC\x20\x55\0"), "_-wgVQA=");
-//! # Ok(())
-//! # }
-//! ```
-//!
-//! ### Padding characters
-//!
-//! Each base64 character represents 6 bits (2⁶ = 64) of the original binary
-//! data, and every 3 bytes of input binary data will encode to 4 base64
-//! characters (8 bits × 3 = 6 bits × 4 = 24 bits).
-//!
-//! When the input is not an even multiple of 3 bytes in length, [canonical][]
-//! base64 encoders insert padding characters at the end, so that the output
-//! length is always a multiple of 4:
-//!
-//! [canonical]: https://datatracker.ietf.org/doc/html/rfc4648#section-3.5
-//!
-#![cfg_attr(feature = "alloc", doc = "```")]
-#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
-//! use base64::{engine::general_purpose::STANDARD, Engine as _};
-//!
-//! assert_eq!(STANDARD.encode(b""), "");
-//! assert_eq!(STANDARD.encode(b"f"), "Zg==");
-//! assert_eq!(STANDARD.encode(b"fo"), "Zm8=");
-//! assert_eq!(STANDARD.encode(b"foo"), "Zm9v");
-//! ```
-//!
-//! Canonical encoding ensures that base64 encodings will be exactly the same,
-//! byte-for-byte, regardless of input length. But the `=` padding characters
-//! aren’t necessary for decoding, and they may be omitted by using a
-//! [`NO_PAD`][engine::general_purpose::NO_PAD] configuration:
-//!
-#![cfg_attr(feature = "alloc", doc = "```")]
-#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
-//! use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _};
-//!
-//! assert_eq!(STANDARD_NO_PAD.encode(b""), "");
-//! assert_eq!(STANDARD_NO_PAD.encode(b"f"), "Zg");
-//! assert_eq!(STANDARD_NO_PAD.encode(b"fo"), "Zm8");
-//! assert_eq!(STANDARD_NO_PAD.encode(b"foo"), "Zm9v");
-//! ```
-//!
-//! The pre-configured `NO_PAD` engines will reject inputs containing padding
-//! `=` characters. To encode without padding and still accept padding while
-//! decoding, create an [engine][engine::general_purpose::GeneralPurpose] with
-//! that [padding mode][engine::DecodePaddingMode].
-//!
-#![cfg_attr(feature = "alloc", doc = "```")]
-#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
-//! # use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _};
-//! assert_eq!(STANDARD_NO_PAD.decode(b"Zm8="), Err(base64::DecodeError::InvalidPadding));
-//! ```
-//!
-//! ### Further customization
-//!
-//! Decoding and encoding behavior can be customized by creating an
-//! [engine][engine::GeneralPurpose] with an [alphabet][alphabet::Alphabet] and
-//! [padding configuration][engine::GeneralPurposeConfig]:
-//!
-#![cfg_attr(feature = "alloc", doc = "```")]
-#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
-//! use base64::{engine, alphabet, Engine as _};
-//!
-//! // bizarro-world base64: +/ as the first symbols instead of the last
-//! let alphabet =
-//! alphabet::Alphabet::new("+/ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
-//! .unwrap();
-//!
-//! // a very weird config that encodes with padding but requires no padding when decoding...?
-//! let crazy_config = engine::GeneralPurposeConfig::new()
-//! .with_decode_allow_trailing_bits(true)
-//! .with_encode_padding(true)
-//! .with_decode_padding_mode(engine::DecodePaddingMode::RequireNone);
-//!
-//! let crazy_engine = engine::GeneralPurpose::new(&alphabet, crazy_config);
-//!
-//! let encoded = crazy_engine.encode(b"abc 123");
-//!
-//! ```
-//!
-//! ## Memory allocation
-//!
-//! The [decode][Engine::decode()] and [encode][Engine::encode()] engine methods
-//! allocate memory for their results – `decode` returns a `Vec<u8>` and
-//! `encode` returns a `String`. To instead decode or encode into a buffer that
-//! you allocated, use one of the alternative methods:
-//!
-//! #### Decoding
-//!
-//! | Method | Output | Allocates memory |
-//! | -------------------------- | ----------------------------- | ----------------------------- |
-//! | [`Engine::decode`] | returns a new `Vec<u8>` | always |
-//! | [`Engine::decode_vec`] | appends to provided `Vec<u8>` | if `Vec` lacks capacity |
-//! | [`Engine::decode_slice`] | writes to provided `&[u8]` | never
-//!
-//! #### Encoding
-//!
-//! | Method | Output | Allocates memory |
-//! | -------------------------- | ---------------------------- | ------------------------------ |
-//! | [`Engine::encode`] | returns a new `String` | always |
-//! | [`Engine::encode_string`] | appends to provided `String` | if `String` lacks capacity |
-//! | [`Engine::encode_slice`] | writes to provided `&[u8]` | never |
-//!
-//! ## Input and output
-//!
-//! The `base64` crate can [decode][Engine::decode()] and
-//! [encode][Engine::encode()] values in memory, or
-//! [`DecoderReader`][read::DecoderReader] and
-//! [`EncoderWriter`][write::EncoderWriter] provide streaming decoding and
-//! encoding for any [readable][std::io::Read] or [writable][std::io::Write]
-//! byte stream.
-//!
-//! #### Decoding
-//!
-#![cfg_attr(feature = "std", doc = "```")]
-#![cfg_attr(not(feature = "std"), doc = "```ignore")]
-//! # use std::io;
-//! use base64::{engine::general_purpose::STANDARD, read::DecoderReader};
-//!
-//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
-//! let mut input = io::stdin();
-//! let mut decoder = DecoderReader::new(&mut input, &STANDARD);
-//! io::copy(&mut decoder, &mut io::stdout())?;
-//! # Ok(())
-//! # }
-//! ```
-//!
-//! #### Encoding
-//!
-#![cfg_attr(feature = "std", doc = "```")]
-#![cfg_attr(not(feature = "std"), doc = "```ignore")]
-//! # use std::io;
-//! use base64::{engine::general_purpose::STANDARD, write::EncoderWriter};
-//!
-//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
-//! let mut output = io::stdout();
-//! let mut encoder = EncoderWriter::new(&mut output, &STANDARD);
-//! io::copy(&mut io::stdin(), &mut encoder)?;
-//! # Ok(())
-//! # }
-//! ```
-//!
-//! #### Display
-//!
-//! If you only need a base64 representation for implementing the
-//! [`Display`][std::fmt::Display] trait, use
-//! [`Base64Display`][display::Base64Display]:
-//!
-//! ```
-//! use base64::{display::Base64Display, engine::general_purpose::STANDARD};
-//!
-//! let value = Base64Display::new(b"\0\x01\x02\x03", &STANDARD);
-//! assert_eq!("base64: AAECAw==", format!("base64: {}", value));
-//! ```
-//!
-//! # Panics
-//!
-//! If length calculations result in overflowing `usize`, a panic will result.
-
-#![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))]
-#![deny(
- missing_docs,
- trivial_casts,
- trivial_numeric_casts,
- unused_extern_crates,
- unused_import_braces,
- unused_results,
- variant_size_differences
-)]
-#![forbid(unsafe_code)]
-// Allow globally until https://github.com/rust-lang/rust-clippy/issues/8768 is resolved.
-// The desired state is to allow it only for the rstest_reuse import.
-#![allow(clippy::single_component_path_imports)]
-#![cfg_attr(not(any(feature = "std", test)), no_std)]
-
-#[cfg(any(feature = "alloc", test))]
-extern crate alloc;
-
-// has to be included at top level because of the way rstest_reuse defines its macros
-#[cfg(test)]
-use rstest_reuse;
-
-mod chunked_encoder;
-pub mod display;
-#[cfg(any(feature = "std", test))]
-pub mod read;
-#[cfg(any(feature = "std", test))]
-pub mod write;
-
-pub mod engine;
-pub use engine::Engine;
-
-pub mod alphabet;
-
-mod encode;
-#[allow(deprecated)]
-#[cfg(any(feature = "alloc", test))]
-pub use crate::encode::{encode, encode_engine, encode_engine_string};
-#[allow(deprecated)]
-pub use crate::encode::{encode_engine_slice, encoded_len, EncodeSliceError};
-
-mod decode;
-#[allow(deprecated)]
-#[cfg(any(feature = "alloc", test))]
-pub use crate::decode::{decode, decode_engine, decode_engine_vec};
-#[allow(deprecated)]
-pub use crate::decode::{decode_engine_slice, decoded_len_estimate, DecodeError, DecodeSliceError};
-
-pub mod prelude;
-
-#[cfg(test)]
-mod tests;
-
-const PAD_BYTE: u8 = b'=';
diff --git a/vendor/base64/src/prelude.rs b/vendor/base64/src/prelude.rs
deleted file mode 100644
index df5fdb49..00000000
--- a/vendor/base64/src/prelude.rs
+++ /dev/null
@@ -1,20 +0,0 @@
-//! Preconfigured engines for common use cases.
-//!
-//! These are re-exports of `const` engines in [crate::engine::general_purpose], renamed with a `BASE64_`
-//! prefix for those who prefer to `use` the entire path to a name.
-//!
-//! # Examples
-//!
-#![cfg_attr(feature = "alloc", doc = "```")]
-#![cfg_attr(not(feature = "alloc"), doc = "```ignore")]
-//! use base64::prelude::{Engine as _, BASE64_STANDARD_NO_PAD};
-//!
-//! assert_eq!("c29tZSBieXRlcw", &BASE64_STANDARD_NO_PAD.encode(b"some bytes"));
-//! ```
-
-pub use crate::engine::Engine;
-
-pub use crate::engine::general_purpose::STANDARD as BASE64_STANDARD;
-pub use crate::engine::general_purpose::STANDARD_NO_PAD as BASE64_STANDARD_NO_PAD;
-pub use crate::engine::general_purpose::URL_SAFE as BASE64_URL_SAFE;
-pub use crate::engine::general_purpose::URL_SAFE_NO_PAD as BASE64_URL_SAFE_NO_PAD;
diff --git a/vendor/base64/src/read/decoder.rs b/vendor/base64/src/read/decoder.rs
deleted file mode 100644
index 781f6f88..00000000
--- a/vendor/base64/src/read/decoder.rs
+++ /dev/null
@@ -1,335 +0,0 @@
-use crate::{engine::Engine, DecodeError, DecodeSliceError, PAD_BYTE};
-use std::{cmp, fmt, io};
-
-// This should be large, but it has to fit on the stack.
-pub(crate) const BUF_SIZE: usize = 1024;
-
-// 4 bytes of base64 data encode 3 bytes of raw data (modulo padding).
-const BASE64_CHUNK_SIZE: usize = 4;
-const DECODED_CHUNK_SIZE: usize = 3;
-
-/// A `Read` implementation that decodes base64 data read from an underlying reader.
-///
-/// # Examples
-///
-/// ```
-/// use std::io::Read;
-/// use std::io::Cursor;
-/// use base64::engine::general_purpose;
-///
-/// // use a cursor as the simplest possible `Read` -- in real code this is probably a file, etc.
-/// let mut wrapped_reader = Cursor::new(b"YXNkZg==");
-/// let mut decoder = base64::read::DecoderReader::new(
-/// &mut wrapped_reader,
-/// &general_purpose::STANDARD);
-///
-/// // handle errors as you normally would
-/// let mut result = Vec::new();
-/// decoder.read_to_end(&mut result).unwrap();
-///
-/// assert_eq!(b"asdf", &result[..]);
-///
-/// ```
-pub struct DecoderReader<'e, E: Engine, R: io::Read> {
- engine: &'e E,
- /// Where b64 data is read from
- inner: R,
-
- /// Holds b64 data read from the delegate reader.
- b64_buffer: [u8; BUF_SIZE],
- /// The start of the pending buffered data in `b64_buffer`.
- b64_offset: usize,
- /// The amount of buffered b64 data after `b64_offset` in `b64_len`.
- b64_len: usize,
- /// Since the caller may provide us with a buffer of size 1 or 2 that's too small to copy a
- /// decoded chunk in to, we have to be able to hang on to a few decoded bytes.
- /// Technically we only need to hold 2 bytes, but then we'd need a separate temporary buffer to
- /// decode 3 bytes into and then juggle copying one byte into the provided read buf and the rest
- /// into here, which seems like a lot of complexity for 1 extra byte of storage.
- decoded_chunk_buffer: [u8; DECODED_CHUNK_SIZE],
- /// Index of start of decoded data in `decoded_chunk_buffer`
- decoded_offset: usize,
- /// Length of decoded data after `decoded_offset` in `decoded_chunk_buffer`
- decoded_len: usize,
- /// Input length consumed so far.
- /// Used to provide accurate offsets in errors
- input_consumed_len: usize,
- /// offset of previously seen padding, if any
- padding_offset: Option<usize>,
-}
-
-// exclude b64_buffer as it's uselessly large
-impl<'e, E: Engine, R: io::Read> fmt::Debug for DecoderReader<'e, E, R> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.debug_struct("DecoderReader")
- .field("b64_offset", &self.b64_offset)
- .field("b64_len", &self.b64_len)
- .field("decoded_chunk_buffer", &self.decoded_chunk_buffer)
- .field("decoded_offset", &self.decoded_offset)
- .field("decoded_len", &self.decoded_len)
- .field("input_consumed_len", &self.input_consumed_len)
- .field("padding_offset", &self.padding_offset)
- .finish()
- }
-}
-
-impl<'e, E: Engine, R: io::Read> DecoderReader<'e, E, R> {
- /// Create a new decoder that will read from the provided reader `r`.
- pub fn new(reader: R, engine: &'e E) -> Self {
- DecoderReader {
- engine,
- inner: reader,
- b64_buffer: [0; BUF_SIZE],
- b64_offset: 0,
- b64_len: 0,
- decoded_chunk_buffer: [0; DECODED_CHUNK_SIZE],
- decoded_offset: 0,
- decoded_len: 0,
- input_consumed_len: 0,
- padding_offset: None,
- }
- }
-
- /// Write as much as possible of the decoded buffer into the target buffer.
- /// Must only be called when there is something to write and space to write into.
- /// Returns a Result with the number of (decoded) bytes copied.
- fn flush_decoded_buf(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- debug_assert!(self.decoded_len > 0);
- debug_assert!(!buf.is_empty());
-
- let copy_len = cmp::min(self.decoded_len, buf.len());
- debug_assert!(copy_len > 0);
- debug_assert!(copy_len <= self.decoded_len);
-
- buf[..copy_len].copy_from_slice(
- &self.decoded_chunk_buffer[self.decoded_offset..self.decoded_offset + copy_len],
- );
-
- self.decoded_offset += copy_len;
- self.decoded_len -= copy_len;
-
- debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE);
-
- Ok(copy_len)
- }
-
- /// Read into the remaining space in the buffer after the current contents.
- /// Must only be called when there is space to read into in the buffer.
- /// Returns the number of bytes read.
- fn read_from_delegate(&mut self) -> io::Result<usize> {
- debug_assert!(self.b64_offset + self.b64_len < BUF_SIZE);
-
- let read = self
- .inner
- .read(&mut self.b64_buffer[self.b64_offset + self.b64_len..])?;
- self.b64_len += read;
-
- debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
-
- Ok(read)
- }
-
- /// Decode the requested number of bytes from the b64 buffer into the provided buffer. It's the
- /// caller's responsibility to choose the number of b64 bytes to decode correctly.
- ///
- /// Returns a Result with the number of decoded bytes written to `buf`.
- ///
- /// # Panics
- ///
- /// panics if `buf` is too small
- fn decode_to_buf(&mut self, b64_len_to_decode: usize, buf: &mut [u8]) -> io::Result<usize> {
- debug_assert!(self.b64_len >= b64_len_to_decode);
- debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
- debug_assert!(!buf.is_empty());
-
- let b64_to_decode = &self.b64_buffer[self.b64_offset..self.b64_offset + b64_len_to_decode];
- let decode_metadata = self
- .engine
- .internal_decode(
- b64_to_decode,
- buf,
- self.engine.internal_decoded_len_estimate(b64_len_to_decode),
- )
- .map_err(|dse| match dse {
- DecodeSliceError::DecodeError(de) => {
- match de {
- DecodeError::InvalidByte(offset, byte) => {
- match (byte, self.padding_offset) {
- // if there was padding in a previous block of decoding that happened to
- // be correct, and we now find more padding that happens to be incorrect,
- // to be consistent with non-reader decodes, record the error at the first
- // padding
- (PAD_BYTE, Some(first_pad_offset)) => {
- DecodeError::InvalidByte(first_pad_offset, PAD_BYTE)
- }
- _ => {
- DecodeError::InvalidByte(self.input_consumed_len + offset, byte)
- }
- }
- }
- DecodeError::InvalidLength(len) => {
- DecodeError::InvalidLength(self.input_consumed_len + len)
- }
- DecodeError::InvalidLastSymbol(offset, byte) => {
- DecodeError::InvalidLastSymbol(self.input_consumed_len + offset, byte)
- }
- DecodeError::InvalidPadding => DecodeError::InvalidPadding,
- }
- }
- DecodeSliceError::OutputSliceTooSmall => {
- unreachable!("buf is sized correctly in calling code")
- }
- })
- .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
-
- if let Some(offset) = self.padding_offset {
- // we've already seen padding
- if decode_metadata.decoded_len > 0 {
- // we read more after already finding padding; report error at first padding byte
- return Err(io::Error::new(
- io::ErrorKind::InvalidData,
- DecodeError::InvalidByte(offset, PAD_BYTE),
- ));
- }
- }
-
- self.padding_offset = self.padding_offset.or(decode_metadata
- .padding_offset
- .map(|offset| self.input_consumed_len + offset));
- self.input_consumed_len += b64_len_to_decode;
- self.b64_offset += b64_len_to_decode;
- self.b64_len -= b64_len_to_decode;
-
- debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
-
- Ok(decode_metadata.decoded_len)
- }
-
- /// Unwraps this `DecoderReader`, returning the base reader which it reads base64 encoded
- /// input from.
- ///
- /// Because `DecoderReader` performs internal buffering, the state of the inner reader is
- /// unspecified. This function is mainly provided because the inner reader type may provide
- /// additional functionality beyond the `Read` implementation which may still be useful.
- pub fn into_inner(self) -> R {
- self.inner
- }
-}
-
-impl<'e, E: Engine, R: io::Read> io::Read for DecoderReader<'e, E, R> {
- /// Decode input from the wrapped reader.
- ///
- /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes
- /// written in `buf`.
- ///
- /// Where possible, this function buffers base64 to minimize the number of read() calls to the
- /// delegate reader.
- ///
- /// # Errors
- ///
- /// Any errors emitted by the delegate reader are returned. Decoding errors due to invalid
- /// base64 are also possible, and will have `io::ErrorKind::InvalidData`.
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- if buf.is_empty() {
- return Ok(0);
- }
-
- // offset == BUF_SIZE when we copied it all last time
- debug_assert!(self.b64_offset <= BUF_SIZE);
- debug_assert!(self.b64_offset + self.b64_len <= BUF_SIZE);
- debug_assert!(if self.b64_offset == BUF_SIZE {
- self.b64_len == 0
- } else {
- self.b64_len <= BUF_SIZE
- });
-
- debug_assert!(if self.decoded_len == 0 {
- // can be = when we were able to copy the complete chunk
- self.decoded_offset <= DECODED_CHUNK_SIZE
- } else {
- self.decoded_offset < DECODED_CHUNK_SIZE
- });
-
- // We shouldn't ever decode into decoded_buffer when we can't immediately write at least one
- // byte into the provided buf, so the effective length should only be 3 momentarily between
- // when we decode and when we copy into the target buffer.
- debug_assert!(self.decoded_len < DECODED_CHUNK_SIZE);
- debug_assert!(self.decoded_len + self.decoded_offset <= DECODED_CHUNK_SIZE);
-
- if self.decoded_len > 0 {
- // we have a few leftover decoded bytes; flush that rather than pull in more b64
- self.flush_decoded_buf(buf)
- } else {
- let mut at_eof = false;
- while self.b64_len < BASE64_CHUNK_SIZE {
- // Copy any bytes we have to the start of the buffer.
- self.b64_buffer
- .copy_within(self.b64_offset..self.b64_offset + self.b64_len, 0);
- self.b64_offset = 0;
-
- // then fill in more data
- let read = self.read_from_delegate()?;
- if read == 0 {
- // we never read into an empty buf, so 0 => we've hit EOF
- at_eof = true;
- break;
- }
- }
-
- if self.b64_len == 0 {
- debug_assert!(at_eof);
- // we must be at EOF, and we have no data left to decode
- return Ok(0);
- };
-
- debug_assert!(if at_eof {
- // if we are at eof, we may not have a complete chunk
- self.b64_len > 0
- } else {
- // otherwise, we must have at least one chunk
- self.b64_len >= BASE64_CHUNK_SIZE
- });
-
- debug_assert_eq!(0, self.decoded_len);
-
- if buf.len() < DECODED_CHUNK_SIZE {
- // caller requested an annoyingly short read
- // have to write to a tmp buf first to avoid double mutable borrow
- let mut decoded_chunk = [0_u8; DECODED_CHUNK_SIZE];
- // if we are at eof, could have less than BASE64_CHUNK_SIZE, in which case we have
- // to assume that these last few tokens are, in fact, valid (i.e. must be 2-4 b64
- // tokens, not 1, since 1 token can't decode to 1 byte).
- let to_decode = cmp::min(self.b64_len, BASE64_CHUNK_SIZE);
-
- let decoded = self.decode_to_buf(to_decode, &mut decoded_chunk[..])?;
- self.decoded_chunk_buffer[..decoded].copy_from_slice(&decoded_chunk[..decoded]);
-
- self.decoded_offset = 0;
- self.decoded_len = decoded;
-
- // can be less than 3 on last block due to padding
- debug_assert!(decoded <= 3);
-
- self.flush_decoded_buf(buf)
- } else {
- let b64_bytes_that_can_decode_into_buf = (buf.len() / DECODED_CHUNK_SIZE)
- .checked_mul(BASE64_CHUNK_SIZE)
- .expect("too many chunks");
- debug_assert!(b64_bytes_that_can_decode_into_buf >= BASE64_CHUNK_SIZE);
-
- let b64_bytes_available_to_decode = if at_eof {
- self.b64_len
- } else {
- // only use complete chunks
- self.b64_len - self.b64_len % 4
- };
-
- let actual_decode_len = cmp::min(
- b64_bytes_that_can_decode_into_buf,
- b64_bytes_available_to_decode,
- );
- self.decode_to_buf(actual_decode_len, buf)
- }
- }
- }
-}
diff --git a/vendor/base64/src/read/decoder_tests.rs b/vendor/base64/src/read/decoder_tests.rs
deleted file mode 100644
index f3431457..00000000
--- a/vendor/base64/src/read/decoder_tests.rs
+++ /dev/null
@@ -1,487 +0,0 @@
-use std::{
- cmp,
- io::{self, Read as _},
- iter,
-};
-
-use rand::{Rng as _, RngCore as _};
-
-use super::decoder::{DecoderReader, BUF_SIZE};
-use crate::{
- alphabet,
- engine::{general_purpose::STANDARD, Engine, GeneralPurpose},
- tests::{random_alphabet, random_config, random_engine},
- DecodeError, PAD_BYTE,
-};
-
-#[test]
-fn simple() {
- let tests: &[(&[u8], &[u8])] = &[
- (&b"0"[..], &b"MA=="[..]),
- (b"01", b"MDE="),
- (b"012", b"MDEy"),
- (b"0123", b"MDEyMw=="),
- (b"01234", b"MDEyMzQ="),
- (b"012345", b"MDEyMzQ1"),
- (b"0123456", b"MDEyMzQ1Ng=="),
- (b"01234567", b"MDEyMzQ1Njc="),
- (b"012345678", b"MDEyMzQ1Njc4"),
- (b"0123456789", b"MDEyMzQ1Njc4OQ=="),
- ][..];
-
- for (text_expected, base64data) in tests.iter() {
- // Read n bytes at a time.
- for n in 1..base64data.len() + 1 {
- let mut wrapped_reader = io::Cursor::new(base64data);
- let mut decoder = DecoderReader::new(&mut wrapped_reader, &STANDARD);
-
- // handle errors as you normally would
- let mut text_got = Vec::new();
- let mut buffer = vec![0u8; n];
- while let Ok(read) = decoder.read(&mut buffer[..]) {
- if read == 0 {
- break;
- }
- text_got.extend_from_slice(&buffer[..read]);
- }
-
- assert_eq!(
- text_got,
- *text_expected,
- "\nGot: {}\nExpected: {}",
- String::from_utf8_lossy(&text_got[..]),
- String::from_utf8_lossy(text_expected)
- );
- }
- }
-}
-
-// Make sure we error out on trailing junk.
-#[test]
-fn trailing_junk() {
- let tests: &[&[u8]] = &[&b"MDEyMzQ1Njc4*!@#$%^&"[..], b"MDEyMzQ1Njc4OQ== "][..];
-
- for base64data in tests.iter() {
- // Read n bytes at a time.
- for n in 1..base64data.len() + 1 {
- let mut wrapped_reader = io::Cursor::new(base64data);
- let mut decoder = DecoderReader::new(&mut wrapped_reader, &STANDARD);
-
- // handle errors as you normally would
- let mut buffer = vec![0u8; n];
- let mut saw_error = false;
- loop {
- match decoder.read(&mut buffer[..]) {
- Err(_) => {
- saw_error = true;
- break;
- }
- Ok(0) => break,
- Ok(_len) => (),
- }
- }
-
- assert!(saw_error);
- }
- }
-}
-
-#[test]
-fn handles_short_read_from_delegate() {
- let mut rng = rand::thread_rng();
- let mut bytes = Vec::new();
- let mut b64 = String::new();
- let mut decoded = Vec::new();
-
- for _ in 0..10_000 {
- bytes.clear();
- b64.clear();
- decoded.clear();
-
- let size = rng.gen_range(0..(10 * BUF_SIZE));
- bytes.extend(iter::repeat(0).take(size));
- bytes.truncate(size);
- rng.fill_bytes(&mut bytes[..size]);
- assert_eq!(size, bytes.len());
-
- let engine = random_engine(&mut rng);
- engine.encode_string(&bytes[..], &mut b64);
-
- let mut wrapped_reader = io::Cursor::new(b64.as_bytes());
- let mut short_reader = RandomShortRead {
- delegate: &mut wrapped_reader,
- rng: &mut rng,
- };
-
- let mut decoder = DecoderReader::new(&mut short_reader, &engine);
-
- let decoded_len = decoder.read_to_end(&mut decoded).unwrap();
- assert_eq!(size, decoded_len);
- assert_eq!(&bytes[..], &decoded[..]);
- }
-}
-
-#[test]
-fn read_in_short_increments() {
- let mut rng = rand::thread_rng();
- let mut bytes = Vec::new();
- let mut b64 = String::new();
- let mut decoded = Vec::new();
-
- for _ in 0..10_000 {
- bytes.clear();
- b64.clear();
- decoded.clear();
-
- let size = rng.gen_range(0..(10 * BUF_SIZE));
- bytes.extend(iter::repeat(0).take(size));
- // leave room to play around with larger buffers
- decoded.extend(iter::repeat(0).take(size * 3));
-
- rng.fill_bytes(&mut bytes[..]);
- assert_eq!(size, bytes.len());
-
- let engine = random_engine(&mut rng);
-
- engine.encode_string(&bytes[..], &mut b64);
-
- let mut wrapped_reader = io::Cursor::new(&b64[..]);
- let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine);
-
- consume_with_short_reads_and_validate(&mut rng, &bytes[..], &mut decoded, &mut decoder);
- }
-}
-
-#[test]
-fn read_in_short_increments_with_short_delegate_reads() {
- let mut rng = rand::thread_rng();
- let mut bytes = Vec::new();
- let mut b64 = String::new();
- let mut decoded = Vec::new();
-
- for _ in 0..10_000 {
- bytes.clear();
- b64.clear();
- decoded.clear();
-
- let size = rng.gen_range(0..(10 * BUF_SIZE));
- bytes.extend(iter::repeat(0).take(size));
- // leave room to play around with larger buffers
- decoded.extend(iter::repeat(0).take(size * 3));
-
- rng.fill_bytes(&mut bytes[..]);
- assert_eq!(size, bytes.len());
-
- let engine = random_engine(&mut rng);
-
- engine.encode_string(&bytes[..], &mut b64);
-
- let mut base_reader = io::Cursor::new(&b64[..]);
- let mut decoder = DecoderReader::new(&mut base_reader, &engine);
- let mut short_reader = RandomShortRead {
- delegate: &mut decoder,
- rng: &mut rand::thread_rng(),
- };
-
- consume_with_short_reads_and_validate(
- &mut rng,
- &bytes[..],
- &mut decoded,
- &mut short_reader,
- );
- }
-}
-
-#[test]
-fn reports_invalid_last_symbol_correctly() {
- let mut rng = rand::thread_rng();
- let mut bytes = Vec::new();
- let mut b64 = String::new();
- let mut b64_bytes = Vec::new();
- let mut decoded = Vec::new();
- let mut bulk_decoded = Vec::new();
-
- for _ in 0..1_000 {
- bytes.clear();
- b64.clear();
- b64_bytes.clear();
-
- let size = rng.gen_range(1..(10 * BUF_SIZE));
- bytes.extend(iter::repeat(0).take(size));
- decoded.extend(iter::repeat(0).take(size));
- rng.fill_bytes(&mut bytes[..]);
- assert_eq!(size, bytes.len());
-
- let config = random_config(&mut rng);
- let alphabet = random_alphabet(&mut rng);
- // changing padding will cause invalid padding errors when we twiddle the last byte
- let engine = GeneralPurpose::new(alphabet, config.with_encode_padding(false));
- engine.encode_string(&bytes[..], &mut b64);
- b64_bytes.extend(b64.bytes());
- assert_eq!(b64_bytes.len(), b64.len());
-
- // change the last character to every possible symbol. Should behave the same as bulk
- // decoding whether invalid or valid.
- for &s1 in alphabet.symbols.iter() {
- decoded.clear();
- bulk_decoded.clear();
-
- // replace the last
- *b64_bytes.last_mut().unwrap() = s1;
- let bulk_res = engine.decode_vec(&b64_bytes[..], &mut bulk_decoded);
-
- let mut wrapped_reader = io::Cursor::new(&b64_bytes[..]);
- let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine);
-
- let stream_res = decoder.read_to_end(&mut decoded).map(|_| ()).map_err(|e| {
- e.into_inner()
- .and_then(|e| e.downcast::<DecodeError>().ok())
- });
-
- assert_eq!(bulk_res.map_err(|e| Some(Box::new(e))), stream_res);
- }
- }
-}
-
-#[test]
-fn reports_invalid_byte_correctly() {
- let mut rng = rand::thread_rng();
- let mut bytes = Vec::new();
- let mut b64 = String::new();
- let mut stream_decoded = Vec::new();
- let mut bulk_decoded = Vec::new();
-
- for _ in 0..10_000 {
- bytes.clear();
- b64.clear();
- stream_decoded.clear();
- bulk_decoded.clear();
-
- let size = rng.gen_range(1..(10 * BUF_SIZE));
- bytes.extend(iter::repeat(0).take(size));
- rng.fill_bytes(&mut bytes[..size]);
- assert_eq!(size, bytes.len());
-
- let engine = GeneralPurpose::new(&alphabet::STANDARD, random_config(&mut rng));
-
- engine.encode_string(&bytes[..], &mut b64);
- // replace one byte, somewhere, with '*', which is invalid
- let bad_byte_pos = rng.gen_range(0..b64.len());
- let mut b64_bytes = b64.bytes().collect::<Vec<u8>>();
- b64_bytes[bad_byte_pos] = b'*';
-
- let mut wrapped_reader = io::Cursor::new(b64_bytes.clone());
- let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine);
-
- let read_decode_err = decoder
- .read_to_end(&mut stream_decoded)
- .map_err(|e| {
- let kind = e.kind();
- let inner = e
- .into_inner()
- .and_then(|e| e.downcast::<DecodeError>().ok());
- inner.map(|i| (*i, kind))
- })
- .err()
- .and_then(|o| o);
-
- let bulk_decode_err = engine.decode_vec(&b64_bytes[..], &mut bulk_decoded).err();
-
- // it's tricky to predict where the invalid data's offset will be since if it's in the last
- // chunk it will be reported at the first padding location because it's treated as invalid
- // padding. So, we just check that it's the same as it is for decoding all at once.
- assert_eq!(
- bulk_decode_err.map(|e| (e, io::ErrorKind::InvalidData)),
- read_decode_err
- );
- }
-}
-
-#[test]
-fn internal_padding_error_with_short_read_concatenated_texts_invalid_byte_error() {
- let mut rng = rand::thread_rng();
- let mut bytes = Vec::new();
- let mut b64 = String::new();
- let mut reader_decoded = Vec::new();
- let mut bulk_decoded = Vec::new();
-
- // encodes with padding, requires that padding be present so we don't get InvalidPadding
- // just because padding is there at all
- let engine = STANDARD;
-
- for _ in 0..10_000 {
- bytes.clear();
- b64.clear();
- reader_decoded.clear();
- bulk_decoded.clear();
-
- // at least 2 bytes so there can be a split point between bytes
- let size = rng.gen_range(2..(10 * BUF_SIZE));
- bytes.resize(size, 0);
- rng.fill_bytes(&mut bytes[..size]);
-
- // Concatenate two valid b64s, yielding padding in the middle.
- // This avoids scenarios that are challenging to assert on, like random padding location
- // that might be InvalidLastSymbol when decoded at certain buffer sizes but InvalidByte
- // when done all at once.
- let split = loop {
- // find a split point that will produce padding on the first part
- let s = rng.gen_range(1..size);
- if s % 3 != 0 {
- // short enough to need padding
- break s;
- };
- };
-
- engine.encode_string(&bytes[..split], &mut b64);
- assert!(b64.contains('='), "split: {}, b64: {}", split, b64);
- let bad_byte_pos = b64.find('=').unwrap();
- engine.encode_string(&bytes[split..], &mut b64);
- let b64_bytes = b64.as_bytes();
-
- // short read to make it plausible for padding to happen on a read boundary
- let read_len = rng.gen_range(1..10);
- let mut wrapped_reader = ShortRead {
- max_read_len: read_len,
- delegate: io::Cursor::new(&b64_bytes),
- };
-
- let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine);
-
- let read_decode_err = decoder
- .read_to_end(&mut reader_decoded)
- .map_err(|e| {
- *e.into_inner()
- .and_then(|e| e.downcast::<DecodeError>().ok())
- .unwrap()
- })
- .unwrap_err();
-
- let bulk_decode_err = engine.decode_vec(b64_bytes, &mut bulk_decoded).unwrap_err();
-
- assert_eq!(
- bulk_decode_err,
- read_decode_err,
- "read len: {}, bad byte pos: {}, b64: {}",
- read_len,
- bad_byte_pos,
- std::str::from_utf8(b64_bytes).unwrap()
- );
- assert_eq!(
- DecodeError::InvalidByte(
- split / 3 * 4
- + match split % 3 {
- 1 => 2,
- 2 => 3,
- _ => unreachable!(),
- },
- PAD_BYTE
- ),
- read_decode_err
- );
- }
-}
-
-#[test]
-fn internal_padding_anywhere_error() {
- let mut rng = rand::thread_rng();
- let mut bytes = Vec::new();
- let mut b64 = String::new();
- let mut reader_decoded = Vec::new();
-
- // encodes with padding, requires that padding be present so we don't get InvalidPadding
- // just because padding is there at all
- let engine = STANDARD;
-
- for _ in 0..10_000 {
- bytes.clear();
- b64.clear();
- reader_decoded.clear();
-
- bytes.resize(10 * BUF_SIZE, 0);
- rng.fill_bytes(&mut bytes[..]);
-
- // Just shove a padding byte in there somewhere.
- // The specific error to expect is challenging to predict precisely because it
- // will vary based on the position of the padding in the quad and the read buffer
- // length, but SOMETHING should go wrong.
-
- engine.encode_string(&bytes[..], &mut b64);
- let mut b64_bytes = b64.as_bytes().to_vec();
- // put padding somewhere other than the last quad
- b64_bytes[rng.gen_range(0..bytes.len() - 4)] = PAD_BYTE;
-
- // short read to make it plausible for padding to happen on a read boundary
- let read_len = rng.gen_range(1..10);
- let mut wrapped_reader = ShortRead {
- max_read_len: read_len,
- delegate: io::Cursor::new(&b64_bytes),
- };
-
- let mut decoder = DecoderReader::new(&mut wrapped_reader, &engine);
-
- let result = decoder.read_to_end(&mut reader_decoded);
- assert!(result.is_err());
- }
-}
-
-fn consume_with_short_reads_and_validate<R: io::Read>(
- rng: &mut rand::rngs::ThreadRng,
- expected_bytes: &[u8],
- decoded: &mut [u8],
- short_reader: &mut R,
-) {
- let mut total_read = 0_usize;
- loop {
- assert!(
- total_read <= expected_bytes.len(),
- "tr {} size {}",
- total_read,
- expected_bytes.len()
- );
- if total_read == expected_bytes.len() {
- assert_eq!(expected_bytes, &decoded[..total_read]);
- // should be done
- assert_eq!(0, short_reader.read(&mut *decoded).unwrap());
- // didn't write anything
- assert_eq!(expected_bytes, &decoded[..total_read]);
-
- break;
- }
- let decode_len = rng.gen_range(1..cmp::max(2, expected_bytes.len() * 2));
-
- let read = short_reader
- .read(&mut decoded[total_read..total_read + decode_len])
- .unwrap();
- total_read += read;
- }
-}
-
-/// Limits how many bytes a reader will provide in each read call.
-/// Useful for shaking out code that may work fine only with typical input sources that always fill
-/// the buffer.
-struct RandomShortRead<'a, 'b, R: io::Read, N: rand::Rng> {
- delegate: &'b mut R,
- rng: &'a mut N,
-}
-
-impl<'a, 'b, R: io::Read, N: rand::Rng> io::Read for RandomShortRead<'a, 'b, R, N> {
- fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
- // avoid 0 since it means EOF for non-empty buffers
- let effective_len = cmp::min(self.rng.gen_range(1..20), buf.len());
-
- self.delegate.read(&mut buf[..effective_len])
- }
-}
-
-struct ShortRead<R: io::Read> {
- delegate: R,
- max_read_len: usize,
-}
-
-impl<R: io::Read> io::Read for ShortRead<R> {
- fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
- let len = self.max_read_len.max(buf.len());
- self.delegate.read(&mut buf[..len])
- }
-}
diff --git a/vendor/base64/src/read/mod.rs b/vendor/base64/src/read/mod.rs
deleted file mode 100644
index 85606448..00000000
--- a/vendor/base64/src/read/mod.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-//! Implementations of `io::Read` to transparently decode base64.
-mod decoder;
-pub use self::decoder::DecoderReader;
-
-#[cfg(test)]
-mod decoder_tests;
diff --git a/vendor/base64/src/tests.rs b/vendor/base64/src/tests.rs
deleted file mode 100644
index 7083b543..00000000
--- a/vendor/base64/src/tests.rs
+++ /dev/null
@@ -1,117 +0,0 @@
-use std::str;
-
-use rand::{
- distributions,
- distributions::{Distribution as _, Uniform},
- seq::SliceRandom,
- Rng, SeedableRng,
-};
-
-use crate::{
- alphabet,
- encode::encoded_len,
- engine::{
- general_purpose::{GeneralPurpose, GeneralPurposeConfig},
- Config, DecodePaddingMode, Engine,
- },
-};
-
-#[test]
-fn roundtrip_random_config_short() {
- // exercise the slower encode/decode routines that operate on shorter buffers more vigorously
- roundtrip_random_config(Uniform::new(0, 50), 10_000);
-}
-
-#[test]
-fn roundtrip_random_config_long() {
- roundtrip_random_config(Uniform::new(0, 1000), 10_000);
-}
-
-pub fn assert_encode_sanity(encoded: &str, padded: bool, input_len: usize) {
- let input_rem = input_len % 3;
- let expected_padding_len = if input_rem > 0 {
- if padded {
- 3 - input_rem
- } else {
- 0
- }
- } else {
- 0
- };
-
- let expected_encoded_len = encoded_len(input_len, padded).unwrap();
-
- assert_eq!(expected_encoded_len, encoded.len());
-
- let padding_len = encoded.chars().filter(|&c| c == '=').count();
-
- assert_eq!(expected_padding_len, padding_len);
-
- let _ = str::from_utf8(encoded.as_bytes()).expect("Base64 should be valid utf8");
-}
-
-fn roundtrip_random_config(input_len_range: Uniform<usize>, iterations: u32) {
- let mut input_buf: Vec<u8> = Vec::new();
- let mut encoded_buf = String::new();
- let mut rng = rand::rngs::SmallRng::from_entropy();
-
- for _ in 0..iterations {
- input_buf.clear();
- encoded_buf.clear();
-
- let input_len = input_len_range.sample(&mut rng);
-
- let engine = random_engine(&mut rng);
-
- for _ in 0..input_len {
- input_buf.push(rng.gen());
- }
-
- engine.encode_string(&input_buf, &mut encoded_buf);
-
- assert_encode_sanity(&encoded_buf, engine.config().encode_padding(), input_len);
-
- assert_eq!(input_buf, engine.decode(&encoded_buf).unwrap());
- }
-}
-
-pub fn random_config<R: Rng>(rng: &mut R) -> GeneralPurposeConfig {
- let mode = rng.gen();
- GeneralPurposeConfig::new()
- .with_encode_padding(match mode {
- DecodePaddingMode::Indifferent => rng.gen(),
- DecodePaddingMode::RequireCanonical => true,
- DecodePaddingMode::RequireNone => false,
- })
- .with_decode_padding_mode(mode)
- .with_decode_allow_trailing_bits(rng.gen())
-}
-
-impl distributions::Distribution<DecodePaddingMode> for distributions::Standard {
- fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> DecodePaddingMode {
- match rng.gen_range(0..=2) {
- 0 => DecodePaddingMode::Indifferent,
- 1 => DecodePaddingMode::RequireCanonical,
- _ => DecodePaddingMode::RequireNone,
- }
- }
-}
-
-pub fn random_alphabet<R: Rng>(rng: &mut R) -> &'static alphabet::Alphabet {
- ALPHABETS.choose(rng).unwrap()
-}
-
-pub fn random_engine<R: Rng>(rng: &mut R) -> GeneralPurpose {
- let alphabet = random_alphabet(rng);
- let config = random_config(rng);
- GeneralPurpose::new(alphabet, config)
-}
-
-const ALPHABETS: &[alphabet::Alphabet] = &[
- alphabet::URL_SAFE,
- alphabet::STANDARD,
- alphabet::CRYPT,
- alphabet::BCRYPT,
- alphabet::IMAP_MUTF7,
- alphabet::BIN_HEX,
-];
diff --git a/vendor/base64/src/write/encoder.rs b/vendor/base64/src/write/encoder.rs
deleted file mode 100644
index 1c19bb42..00000000
--- a/vendor/base64/src/write/encoder.rs
+++ /dev/null
@@ -1,407 +0,0 @@
-use crate::engine::Engine;
-use std::{
- cmp, fmt, io,
- io::{ErrorKind, Result},
-};
-
-pub(crate) const BUF_SIZE: usize = 1024;
-/// The most bytes whose encoding will fit in `BUF_SIZE`
-const MAX_INPUT_LEN: usize = BUF_SIZE / 4 * 3;
-// 3 bytes of input = 4 bytes of base64, always (because we don't allow line wrapping)
-const MIN_ENCODE_CHUNK_SIZE: usize = 3;
-
-/// A `Write` implementation that base64 encodes data before delegating to the wrapped writer.
-///
-/// Because base64 has special handling for the end of the input data (padding, etc), there's a
-/// `finish()` method on this type that encodes any leftover input bytes and adds padding if
-/// appropriate. It's called automatically when deallocated (see the `Drop` implementation), but
-/// any error that occurs when invoking the underlying writer will be suppressed. If you want to
-/// handle such errors, call `finish()` yourself.
-///
-/// # Examples
-///
-/// ```
-/// use std::io::Write;
-/// use base64::engine::general_purpose;
-///
-/// // use a vec as the simplest possible `Write` -- in real code this is probably a file, etc.
-/// let mut enc = base64::write::EncoderWriter::new(Vec::new(), &general_purpose::STANDARD);
-///
-/// // handle errors as you normally would
-/// enc.write_all(b"asdf").unwrap();
-///
-/// // could leave this out to be called by Drop, if you don't care
-/// // about handling errors or getting the delegate writer back
-/// let delegate = enc.finish().unwrap();
-///
-/// // base64 was written to the writer
-/// assert_eq!(b"YXNkZg==", &delegate[..]);
-///
-/// ```
-///
-/// # Panics
-///
-/// Calling `write()` (or related methods) or `finish()` after `finish()` has completed without
-/// error is invalid and will panic.
-///
-/// # Errors
-///
-/// Base64 encoding itself does not generate errors, but errors from the wrapped writer will be
-/// returned as per the contract of `Write`.
-///
-/// # Performance
-///
-/// It has some minor performance loss compared to encoding slices (a couple percent).
-/// It does not do any heap allocation.
-///
-/// # Limitations
-///
-/// Owing to the specification of the `write` and `flush` methods on the `Write` trait and their
-/// implications for a buffering implementation, these methods may not behave as expected. In
-/// particular, calling `write_all` on this interface may fail with `io::ErrorKind::WriteZero`.
-/// See the documentation of the `Write` trait implementation for further details.
-pub struct EncoderWriter<'e, E: Engine, W: io::Write> {
- engine: &'e E,
- /// Where encoded data is written to. It's an Option as it's None immediately before Drop is
- /// called so that finish() can return the underlying writer. None implies that finish() has
- /// been called successfully.
- delegate: Option<W>,
- /// Holds a partial chunk, if any, after the last `write()`, so that we may then fill the chunk
- /// with the next `write()`, encode it, then proceed with the rest of the input normally.
- extra_input: [u8; MIN_ENCODE_CHUNK_SIZE],
- /// How much of `extra` is occupied, in `[0, MIN_ENCODE_CHUNK_SIZE]`.
- extra_input_occupied_len: usize,
- /// Buffer to encode into. May hold leftover encoded bytes from a previous write call that the underlying writer
- /// did not write last time.
- output: [u8; BUF_SIZE],
- /// How much of `output` is occupied with encoded data that couldn't be written last time
- output_occupied_len: usize,
- /// panic safety: don't write again in destructor if writer panicked while we were writing to it
- panicked: bool,
-}
-
-impl<'e, E: Engine, W: io::Write> fmt::Debug for EncoderWriter<'e, E, W> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- write!(
- f,
- "extra_input: {:?} extra_input_occupied_len:{:?} output[..5]: {:?} output_occupied_len: {:?}",
- self.extra_input,
- self.extra_input_occupied_len,
- &self.output[0..5],
- self.output_occupied_len
- )
- }
-}
-
-impl<'e, E: Engine, W: io::Write> EncoderWriter<'e, E, W> {
- /// Create a new encoder that will write to the provided delegate writer.
- pub fn new(delegate: W, engine: &'e E) -> EncoderWriter<'e, E, W> {
- EncoderWriter {
- engine,
- delegate: Some(delegate),
- extra_input: [0u8; MIN_ENCODE_CHUNK_SIZE],
- extra_input_occupied_len: 0,
- output: [0u8; BUF_SIZE],
- output_occupied_len: 0,
- panicked: false,
- }
- }
-
- /// Encode all remaining buffered data and write it, including any trailing incomplete input
- /// triples and associated padding.
- ///
- /// Once this succeeds, no further writes or calls to this method are allowed.
- ///
- /// This may write to the delegate writer multiple times if the delegate writer does not accept
- /// all input provided to its `write` each invocation.
- ///
- /// If you don't care about error handling, it is not necessary to call this function, as the
- /// equivalent finalization is done by the Drop impl.
- ///
- /// Returns the writer that this was constructed around.
- ///
- /// # Errors
- ///
- /// The first error that is not of `ErrorKind::Interrupted` will be returned.
- pub fn finish(&mut self) -> Result<W> {
- // If we could consume self in finish(), we wouldn't have to worry about this case, but
- // finish() is retryable in the face of I/O errors, so we can't consume here.
- if self.delegate.is_none() {
- panic!("Encoder has already had finish() called");
- };
-
- self.write_final_leftovers()?;
-
- let writer = self.delegate.take().expect("Writer must be present");
-
- Ok(writer)
- }
-
- /// Write any remaining buffered data to the delegate writer.
- fn write_final_leftovers(&mut self) -> Result<()> {
- if self.delegate.is_none() {
- // finish() has already successfully called this, and we are now in drop() with a None
- // writer, so just no-op
- return Ok(());
- }
-
- self.write_all_encoded_output()?;
-
- if self.extra_input_occupied_len > 0 {
- let encoded_len = self
- .engine
- .encode_slice(
- &self.extra_input[..self.extra_input_occupied_len],
- &mut self.output[..],
- )
- .expect("buffer is large enough");
-
- self.output_occupied_len = encoded_len;
-
- self.write_all_encoded_output()?;
-
- // write succeeded, do not write the encoding of extra again if finish() is retried
- self.extra_input_occupied_len = 0;
- }
-
- Ok(())
- }
-
- /// Write as much of the encoded output to the delegate writer as it will accept, and store the
- /// leftovers to be attempted at the next write() call. Updates `self.output_occupied_len`.
- ///
- /// # Errors
- ///
- /// Errors from the delegate writer are returned. In the case of an error,
- /// `self.output_occupied_len` will not be updated, as errors from `write` are specified to mean
- /// that no write took place.
- fn write_to_delegate(&mut self, current_output_len: usize) -> Result<()> {
- self.panicked = true;
- let res = self
- .delegate
- .as_mut()
- .expect("Writer must be present")
- .write(&self.output[..current_output_len]);
- self.panicked = false;
-
- res.map(|consumed| {
- debug_assert!(consumed <= current_output_len);
-
- if consumed < current_output_len {
- self.output_occupied_len = current_output_len.checked_sub(consumed).unwrap();
- // If we're blocking on I/O, the minor inefficiency of copying bytes to the
- // start of the buffer is the least of our concerns...
- // TODO Rotate moves more than we need to; copy_within now stable.
- self.output.rotate_left(consumed);
- } else {
- self.output_occupied_len = 0;
- }
- })
- }
-
- /// Write all buffered encoded output. If this returns `Ok`, `self.output_occupied_len` is `0`.
- ///
- /// This is basically write_all for the remaining buffered data but without the undesirable
- /// abort-on-`Ok(0)` behavior.
- ///
- /// # Errors
- ///
- /// Any error emitted by the delegate writer abort the write loop and is returned, unless it's
- /// `Interrupted`, in which case the error is ignored and writes will continue.
- fn write_all_encoded_output(&mut self) -> Result<()> {
- while self.output_occupied_len > 0 {
- let remaining_len = self.output_occupied_len;
- match self.write_to_delegate(remaining_len) {
- // try again on interrupts ala write_all
- Err(ref e) if e.kind() == ErrorKind::Interrupted => {}
- // other errors return
- Err(e) => return Err(e),
- // success no-ops because remaining length is already updated
- Ok(_) => {}
- };
- }
-
- debug_assert_eq!(0, self.output_occupied_len);
- Ok(())
- }
-
- /// Unwraps this `EncoderWriter`, returning the base writer it writes base64 encoded output
- /// to.
- ///
- /// Normally this method should not be needed, since `finish()` returns the inner writer if
- /// it completes successfully. That will also ensure all data has been flushed, which the
- /// `into_inner()` function does *not* do.
- ///
- /// Calling this method after `finish()` has completed successfully will panic, since the
- /// writer has already been returned.
- ///
- /// This method may be useful if the writer implements additional APIs beyond the `Write`
- /// trait. Note that the inner writer might be in an error state or have an incomplete
- /// base64 string written to it.
- pub fn into_inner(mut self) -> W {
- self.delegate
- .take()
- .expect("Encoder has already had finish() called")
- }
-}
-
-impl<'e, E: Engine, W: io::Write> io::Write for EncoderWriter<'e, E, W> {
- /// Encode input and then write to the delegate writer.
- ///
- /// Under non-error circumstances, this returns `Ok` with the value being the number of bytes
- /// of `input` consumed. The value may be `0`, which interacts poorly with `write_all`, which
- /// interprets `Ok(0)` as an error, despite it being allowed by the contract of `write`. See
- /// <https://github.com/rust-lang/rust/issues/56889> for more on that.
- ///
- /// If the previous call to `write` provided more (encoded) data than the delegate writer could
- /// accept in a single call to its `write`, the remaining data is buffered. As long as buffered
- /// data is present, subsequent calls to `write` will try to write the remaining buffered data
- /// to the delegate and return either `Ok(0)` -- and therefore not consume any of `input` -- or
- /// an error.
- ///
- /// # Errors
- ///
- /// Any errors emitted by the delegate writer are returned.
- fn write(&mut self, input: &[u8]) -> Result<usize> {
- if self.delegate.is_none() {
- panic!("Cannot write more after calling finish()");
- }
-
- if input.is_empty() {
- return Ok(0);
- }
-
- // The contract of `Write::write` places some constraints on this implementation:
- // - a call to `write()` represents at most one call to a wrapped `Write`, so we can't
- // iterate over the input and encode multiple chunks.
- // - Errors mean that "no bytes were written to this writer", so we need to reset the
- // internal state to what it was before the error occurred
-
- // before reading any input, write any leftover encoded output from last time
- if self.output_occupied_len > 0 {
- let current_len = self.output_occupied_len;
- return self
- .write_to_delegate(current_len)
- // did not read any input
- .map(|_| 0);
- }
-
- debug_assert_eq!(0, self.output_occupied_len);
-
- // how many bytes, if any, were read into `extra` to create a triple to encode
- let mut extra_input_read_len = 0;
- let mut input = input;
-
- let orig_extra_len = self.extra_input_occupied_len;
-
- let mut encoded_size = 0;
- // always a multiple of MIN_ENCODE_CHUNK_SIZE
- let mut max_input_len = MAX_INPUT_LEN;
-
- // process leftover un-encoded input from last write
- if self.extra_input_occupied_len > 0 {
- debug_assert!(self.extra_input_occupied_len < 3);
- if input.len() + self.extra_input_occupied_len >= MIN_ENCODE_CHUNK_SIZE {
- // Fill up `extra`, encode that into `output`, and consume as much of the rest of
- // `input` as possible.
- // We could write just the encoding of `extra` by itself but then we'd have to
- // return after writing only 4 bytes, which is inefficient if the underlying writer
- // would make a syscall.
- extra_input_read_len = MIN_ENCODE_CHUNK_SIZE - self.extra_input_occupied_len;
- debug_assert!(extra_input_read_len > 0);
- // overwrite only bytes that weren't already used. If we need to rollback extra_len
- // (when the subsequent write errors), the old leading bytes will still be there.
- self.extra_input[self.extra_input_occupied_len..MIN_ENCODE_CHUNK_SIZE]
- .copy_from_slice(&input[0..extra_input_read_len]);
-
- let len = self.engine.internal_encode(
- &self.extra_input[0..MIN_ENCODE_CHUNK_SIZE],
- &mut self.output[..],
- );
- debug_assert_eq!(4, len);
-
- input = &input[extra_input_read_len..];
-
- // consider extra to be used up, since we encoded it
- self.extra_input_occupied_len = 0;
- // don't clobber where we just encoded to
- encoded_size = 4;
- // and don't read more than can be encoded
- max_input_len = MAX_INPUT_LEN - MIN_ENCODE_CHUNK_SIZE;
-
- // fall through to normal encoding
- } else {
- // `extra` and `input` are non empty, but `|extra| + |input| < 3`, so there must be
- // 1 byte in each.
- debug_assert_eq!(1, input.len());
- debug_assert_eq!(1, self.extra_input_occupied_len);
-
- self.extra_input[self.extra_input_occupied_len] = input[0];
- self.extra_input_occupied_len += 1;
- return Ok(1);
- };
- } else if input.len() < MIN_ENCODE_CHUNK_SIZE {
- // `extra` is empty, and `input` fits inside it
- self.extra_input[0..input.len()].copy_from_slice(input);
- self.extra_input_occupied_len = input.len();
- return Ok(input.len());
- };
-
- // either 0 or 1 complete chunks encoded from extra
- debug_assert!(encoded_size == 0 || encoded_size == 4);
- debug_assert!(
- // didn't encode extra input
- MAX_INPUT_LEN == max_input_len
- // encoded one triple
- || MAX_INPUT_LEN == max_input_len + MIN_ENCODE_CHUNK_SIZE
- );
-
- // encode complete triples only
- let input_complete_chunks_len = input.len() - (input.len() % MIN_ENCODE_CHUNK_SIZE);
- let input_chunks_to_encode_len = cmp::min(input_complete_chunks_len, max_input_len);
- debug_assert_eq!(0, max_input_len % MIN_ENCODE_CHUNK_SIZE);
- debug_assert_eq!(0, input_chunks_to_encode_len % MIN_ENCODE_CHUNK_SIZE);
-
- encoded_size += self.engine.internal_encode(
- &input[..(input_chunks_to_encode_len)],
- &mut self.output[encoded_size..],
- );
-
- // not updating `self.output_occupied_len` here because if the below write fails, it should
- // "never take place" -- the buffer contents we encoded are ignored and perhaps retried
- // later, if the consumer chooses.
-
- self.write_to_delegate(encoded_size)
- // no matter whether we wrote the full encoded buffer or not, we consumed the same
- // input
- .map(|_| extra_input_read_len + input_chunks_to_encode_len)
- .map_err(|e| {
- // in case we filled and encoded `extra`, reset extra_len
- self.extra_input_occupied_len = orig_extra_len;
-
- e
- })
- }
-
- /// Because this is usually treated as OK to call multiple times, it will *not* flush any
- /// incomplete chunks of input or write padding.
- /// # Errors
- ///
- /// The first error that is not of [`ErrorKind::Interrupted`] will be returned.
- fn flush(&mut self) -> Result<()> {
- self.write_all_encoded_output()?;
- self.delegate
- .as_mut()
- .expect("Writer must be present")
- .flush()
- }
-}
-
-impl<'e, E: Engine, W: io::Write> Drop for EncoderWriter<'e, E, W> {
- fn drop(&mut self) {
- if !self.panicked {
- // like `BufWriter`, ignore errors during drop
- let _ = self.write_final_leftovers();
- }
- }
-}
diff --git a/vendor/base64/src/write/encoder_string_writer.rs b/vendor/base64/src/write/encoder_string_writer.rs
deleted file mode 100644
index 9c02bcde..00000000
--- a/vendor/base64/src/write/encoder_string_writer.rs
+++ /dev/null
@@ -1,207 +0,0 @@
-use super::encoder::EncoderWriter;
-use crate::engine::Engine;
-use std::io;
-
-/// A `Write` implementation that base64-encodes data using the provided config and accumulates the
-/// resulting base64 utf8 `&str` in a [StrConsumer] implementation (typically `String`), which is
-/// then exposed via `into_inner()`.
-///
-/// # Examples
-///
-/// Buffer base64 in a new String:
-///
-/// ```
-/// use std::io::Write;
-/// use base64::engine::general_purpose;
-///
-/// let mut enc = base64::write::EncoderStringWriter::new(&general_purpose::STANDARD);
-///
-/// enc.write_all(b"asdf").unwrap();
-///
-/// // get the resulting String
-/// let b64_string = enc.into_inner();
-///
-/// assert_eq!("YXNkZg==", &b64_string);
-/// ```
-///
-/// Or, append to an existing `String`, which implements `StrConsumer`:
-///
-/// ```
-/// use std::io::Write;
-/// use base64::engine::general_purpose;
-///
-/// let mut buf = String::from("base64: ");
-///
-/// let mut enc = base64::write::EncoderStringWriter::from_consumer(
-/// &mut buf,
-/// &general_purpose::STANDARD);
-///
-/// enc.write_all(b"asdf").unwrap();
-///
-/// // release the &mut reference on buf
-/// let _ = enc.into_inner();
-///
-/// assert_eq!("base64: YXNkZg==", &buf);
-/// ```
-///
-/// # Performance
-///
-/// Because it has to validate that the base64 is UTF-8, it is about 80% as fast as writing plain
-/// bytes to a `io::Write`.
-pub struct EncoderStringWriter<'e, E: Engine, S: StrConsumer> {
- encoder: EncoderWriter<'e, E, Utf8SingleCodeUnitWriter<S>>,
-}
-
-impl<'e, E: Engine, S: StrConsumer> EncoderStringWriter<'e, E, S> {
- /// Create a EncoderStringWriter that will append to the provided `StrConsumer`.
- pub fn from_consumer(str_consumer: S, engine: &'e E) -> Self {
- EncoderStringWriter {
- encoder: EncoderWriter::new(Utf8SingleCodeUnitWriter { str_consumer }, engine),
- }
- }
-
- /// Encode all remaining buffered data, including any trailing incomplete input triples and
- /// associated padding.
- ///
- /// Returns the base64-encoded form of the accumulated written data.
- pub fn into_inner(mut self) -> S {
- self.encoder
- .finish()
- .expect("Writing to a consumer should never fail")
- .str_consumer
- }
-}
-
-impl<'e, E: Engine> EncoderStringWriter<'e, E, String> {
- /// Create a EncoderStringWriter that will encode into a new `String` with the provided config.
- pub fn new(engine: &'e E) -> Self {
- EncoderStringWriter::from_consumer(String::new(), engine)
- }
-}
-
-impl<'e, E: Engine, S: StrConsumer> io::Write for EncoderStringWriter<'e, E, S> {
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- self.encoder.write(buf)
- }
-
- fn flush(&mut self) -> io::Result<()> {
- self.encoder.flush()
- }
-}
-
-/// An abstraction around consuming `str`s produced by base64 encoding.
-pub trait StrConsumer {
- /// Consume the base64 encoded data in `buf`
- fn consume(&mut self, buf: &str);
-}
-
-/// As for io::Write, `StrConsumer` is implemented automatically for `&mut S`.
-impl<S: StrConsumer + ?Sized> StrConsumer for &mut S {
- fn consume(&mut self, buf: &str) {
- (**self).consume(buf);
- }
-}
-
-/// Pushes the str onto the end of the String
-impl StrConsumer for String {
- fn consume(&mut self, buf: &str) {
- self.push_str(buf);
- }
-}
-
-/// A `Write` that only can handle bytes that are valid single-byte UTF-8 code units.
-///
-/// This is safe because we only use it when writing base64, which is always valid UTF-8.
-struct Utf8SingleCodeUnitWriter<S: StrConsumer> {
- str_consumer: S,
-}
-
-impl<S: StrConsumer> io::Write for Utf8SingleCodeUnitWriter<S> {
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- // Because we expect all input to be valid utf-8 individual bytes, we can encode any buffer
- // length
- let s = std::str::from_utf8(buf).expect("Input must be valid UTF-8");
-
- self.str_consumer.consume(s);
-
- Ok(buf.len())
- }
-
- fn flush(&mut self) -> io::Result<()> {
- // no op
- Ok(())
- }
-}
-
-#[cfg(test)]
-mod tests {
- use crate::{
- engine::Engine, tests::random_engine, write::encoder_string_writer::EncoderStringWriter,
- };
- use rand::Rng;
- use std::cmp;
- use std::io::Write;
-
- #[test]
- fn every_possible_split_of_input() {
- let mut rng = rand::thread_rng();
- let mut orig_data = Vec::<u8>::new();
- let mut normal_encoded = String::new();
-
- let size = 5_000;
-
- for i in 0..size {
- orig_data.clear();
- normal_encoded.clear();
-
- orig_data.resize(size, 0);
- rng.fill(&mut orig_data[..]);
-
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut normal_encoded);
-
- let mut stream_encoder = EncoderStringWriter::new(&engine);
- // Write the first i bytes, then the rest
- stream_encoder.write_all(&orig_data[0..i]).unwrap();
- stream_encoder.write_all(&orig_data[i..]).unwrap();
-
- let stream_encoded = stream_encoder.into_inner();
-
- assert_eq!(normal_encoded, stream_encoded);
- }
- }
- #[test]
- fn incremental_writes() {
- let mut rng = rand::thread_rng();
- let mut orig_data = Vec::<u8>::new();
- let mut normal_encoded = String::new();
-
- let size = 5_000;
-
- for _ in 0..size {
- orig_data.clear();
- normal_encoded.clear();
-
- orig_data.resize(size, 0);
- rng.fill(&mut orig_data[..]);
-
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut normal_encoded);
-
- let mut stream_encoder = EncoderStringWriter::new(&engine);
- // write small nibbles of data
- let mut offset = 0;
- while offset < size {
- let nibble_size = cmp::min(rng.gen_range(0..=64), size - offset);
- let len = stream_encoder
- .write(&orig_data[offset..offset + nibble_size])
- .unwrap();
- offset += len;
- }
-
- let stream_encoded = stream_encoder.into_inner();
-
- assert_eq!(normal_encoded, stream_encoded);
- }
- }
-}
diff --git a/vendor/base64/src/write/encoder_tests.rs b/vendor/base64/src/write/encoder_tests.rs
deleted file mode 100644
index 1f1a1650..00000000
--- a/vendor/base64/src/write/encoder_tests.rs
+++ /dev/null
@@ -1,554 +0,0 @@
-use std::io::{Cursor, Write};
-use std::{cmp, io, str};
-
-use rand::Rng;
-
-use crate::{
- alphabet::{STANDARD, URL_SAFE},
- engine::{
- general_purpose::{GeneralPurpose, NO_PAD, PAD},
- Engine,
- },
- tests::random_engine,
-};
-
-use super::EncoderWriter;
-
-const URL_SAFE_ENGINE: GeneralPurpose = GeneralPurpose::new(&URL_SAFE, PAD);
-const NO_PAD_ENGINE: GeneralPurpose = GeneralPurpose::new(&STANDARD, NO_PAD);
-
-#[test]
-fn encode_three_bytes() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
-
- let sz = enc.write(b"abc").unwrap();
- assert_eq!(sz, 3);
- }
- assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abc").as_bytes());
-}
-
-#[test]
-fn encode_nine_bytes_two_writes() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
-
- let sz = enc.write(b"abcdef").unwrap();
- assert_eq!(sz, 6);
- let sz = enc.write(b"ghi").unwrap();
- assert_eq!(sz, 3);
- }
- assert_eq!(
- &c.get_ref()[..],
- URL_SAFE_ENGINE.encode("abcdefghi").as_bytes()
- );
-}
-
-#[test]
-fn encode_one_then_two_bytes() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
-
- let sz = enc.write(b"a").unwrap();
- assert_eq!(sz, 1);
- let sz = enc.write(b"bc").unwrap();
- assert_eq!(sz, 2);
- }
- assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abc").as_bytes());
-}
-
-#[test]
-fn encode_one_then_five_bytes() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
-
- let sz = enc.write(b"a").unwrap();
- assert_eq!(sz, 1);
- let sz = enc.write(b"bcdef").unwrap();
- assert_eq!(sz, 5);
- }
- assert_eq!(
- &c.get_ref()[..],
- URL_SAFE_ENGINE.encode("abcdef").as_bytes()
- );
-}
-
-#[test]
-fn encode_1_2_3_bytes() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
-
- let sz = enc.write(b"a").unwrap();
- assert_eq!(sz, 1);
- let sz = enc.write(b"bc").unwrap();
- assert_eq!(sz, 2);
- let sz = enc.write(b"def").unwrap();
- assert_eq!(sz, 3);
- }
- assert_eq!(
- &c.get_ref()[..],
- URL_SAFE_ENGINE.encode("abcdef").as_bytes()
- );
-}
-
-#[test]
-fn encode_with_padding() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
-
- enc.write_all(b"abcd").unwrap();
-
- enc.flush().unwrap();
- }
- assert_eq!(&c.get_ref()[..], URL_SAFE_ENGINE.encode("abcd").as_bytes());
-}
-
-#[test]
-fn encode_with_padding_multiple_writes() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
-
- assert_eq!(1, enc.write(b"a").unwrap());
- assert_eq!(2, enc.write(b"bc").unwrap());
- assert_eq!(3, enc.write(b"def").unwrap());
- assert_eq!(1, enc.write(b"g").unwrap());
-
- enc.flush().unwrap();
- }
- assert_eq!(
- &c.get_ref()[..],
- URL_SAFE_ENGINE.encode("abcdefg").as_bytes()
- );
-}
-
-#[test]
-fn finish_writes_extra_byte() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &URL_SAFE_ENGINE);
-
- assert_eq!(6, enc.write(b"abcdef").unwrap());
-
- // will be in extra
- assert_eq!(1, enc.write(b"g").unwrap());
-
- // 1 trailing byte = 2 encoded chars
- let _ = enc.finish().unwrap();
- }
- assert_eq!(
- &c.get_ref()[..],
- URL_SAFE_ENGINE.encode("abcdefg").as_bytes()
- );
-}
-
-#[test]
-fn write_partial_chunk_encodes_partial_chunk() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
-
- // nothing encoded yet
- assert_eq!(2, enc.write(b"ab").unwrap());
- // encoded here
- let _ = enc.finish().unwrap();
- }
- assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("ab").as_bytes());
- assert_eq!(3, c.get_ref().len());
-}
-
-#[test]
-fn write_1_chunk_encodes_complete_chunk() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
-
- assert_eq!(3, enc.write(b"abc").unwrap());
- let _ = enc.finish().unwrap();
- }
- assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes());
- assert_eq!(4, c.get_ref().len());
-}
-
-#[test]
-fn write_1_chunk_and_partial_encodes_only_complete_chunk() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
-
- // "d" not consumed since it's not a full chunk
- assert_eq!(3, enc.write(b"abcd").unwrap());
- let _ = enc.finish().unwrap();
- }
- assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes());
- assert_eq!(4, c.get_ref().len());
-}
-
-#[test]
-fn write_2_partials_to_exactly_complete_chunk_encodes_complete_chunk() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
-
- assert_eq!(1, enc.write(b"a").unwrap());
- assert_eq!(2, enc.write(b"bc").unwrap());
- let _ = enc.finish().unwrap();
- }
- assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes());
- assert_eq!(4, c.get_ref().len());
-}
-
-#[test]
-fn write_partial_then_enough_to_complete_chunk_but_not_complete_another_chunk_encodes_complete_chunk_without_consuming_remaining(
-) {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
-
- assert_eq!(1, enc.write(b"a").unwrap());
- // doesn't consume "d"
- assert_eq!(2, enc.write(b"bcd").unwrap());
- let _ = enc.finish().unwrap();
- }
- assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abc").as_bytes());
- assert_eq!(4, c.get_ref().len());
-}
-
-#[test]
-fn write_partial_then_enough_to_complete_chunk_and_another_chunk_encodes_complete_chunks() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
-
- assert_eq!(1, enc.write(b"a").unwrap());
- // completes partial chunk, and another chunk
- assert_eq!(5, enc.write(b"bcdef").unwrap());
- let _ = enc.finish().unwrap();
- }
- assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abcdef").as_bytes());
- assert_eq!(8, c.get_ref().len());
-}
-
-#[test]
-fn write_partial_then_enough_to_complete_chunk_and_another_chunk_and_another_partial_chunk_encodes_only_complete_chunks(
-) {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
-
- assert_eq!(1, enc.write(b"a").unwrap());
- // completes partial chunk, and another chunk, with one more partial chunk that's not
- // consumed
- assert_eq!(5, enc.write(b"bcdefe").unwrap());
- let _ = enc.finish().unwrap();
- }
- assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("abcdef").as_bytes());
- assert_eq!(8, c.get_ref().len());
-}
-
-#[test]
-fn drop_calls_finish_for_you() {
- let mut c = Cursor::new(Vec::new());
- {
- let mut enc = EncoderWriter::new(&mut c, &NO_PAD_ENGINE);
- assert_eq!(1, enc.write(b"a").unwrap());
- }
- assert_eq!(&c.get_ref()[..], NO_PAD_ENGINE.encode("a").as_bytes());
- assert_eq!(2, c.get_ref().len());
-}
-
-#[test]
-fn every_possible_split_of_input() {
- let mut rng = rand::thread_rng();
- let mut orig_data = Vec::<u8>::new();
- let mut stream_encoded = Vec::<u8>::new();
- let mut normal_encoded = String::new();
-
- let size = 5_000;
-
- for i in 0..size {
- orig_data.clear();
- stream_encoded.clear();
- normal_encoded.clear();
-
- for _ in 0..size {
- orig_data.push(rng.gen());
- }
-
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut normal_encoded);
-
- {
- let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, &engine);
- // Write the first i bytes, then the rest
- stream_encoder.write_all(&orig_data[0..i]).unwrap();
- stream_encoder.write_all(&orig_data[i..]).unwrap();
- }
-
- assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
- }
-}
-
-#[test]
-fn encode_random_config_matches_normal_encode_reasonable_input_len() {
- // choose up to 2 * buf size, so ~half the time it'll use a full buffer
- do_encode_random_config_matches_normal_encode(super::encoder::BUF_SIZE * 2);
-}
-
-#[test]
-fn encode_random_config_matches_normal_encode_tiny_input_len() {
- do_encode_random_config_matches_normal_encode(10);
-}
-
-#[test]
-fn retrying_writes_that_error_with_interrupted_works() {
- let mut rng = rand::thread_rng();
- let mut orig_data = Vec::<u8>::new();
- let mut stream_encoded = Vec::<u8>::new();
- let mut normal_encoded = String::new();
-
- for _ in 0..1_000 {
- orig_data.clear();
- stream_encoded.clear();
- normal_encoded.clear();
-
- let orig_len: usize = rng.gen_range(100..20_000);
- for _ in 0..orig_len {
- orig_data.push(rng.gen());
- }
-
- // encode the normal way
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut normal_encoded);
-
- // encode via the stream encoder
- {
- let mut interrupt_rng = rand::thread_rng();
- let mut interrupting_writer = InterruptingWriter {
- w: &mut stream_encoded,
- rng: &mut interrupt_rng,
- fraction: 0.8,
- };
-
- let mut stream_encoder = EncoderWriter::new(&mut interrupting_writer, &engine);
- let mut bytes_consumed = 0;
- while bytes_consumed < orig_len {
- // use short inputs since we want to use `extra` a lot as that's what needs rollback
- // when errors occur
- let input_len: usize = cmp::min(rng.gen_range(0..10), orig_len - bytes_consumed);
-
- retry_interrupted_write_all(
- &mut stream_encoder,
- &orig_data[bytes_consumed..bytes_consumed + input_len],
- )
- .unwrap();
-
- bytes_consumed += input_len;
- }
-
- loop {
- let res = stream_encoder.finish();
- match res {
- Ok(_) => break,
- Err(e) => match e.kind() {
- io::ErrorKind::Interrupted => continue,
- _ => panic!("{:?}", e), // bail
- },
- }
- }
-
- assert_eq!(orig_len, bytes_consumed);
- }
-
- assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
- }
-}
-
-#[test]
-fn writes_that_only_write_part_of_input_and_sometimes_interrupt_produce_correct_encoded_data() {
- let mut rng = rand::thread_rng();
- let mut orig_data = Vec::<u8>::new();
- let mut stream_encoded = Vec::<u8>::new();
- let mut normal_encoded = String::new();
-
- for _ in 0..1_000 {
- orig_data.clear();
- stream_encoded.clear();
- normal_encoded.clear();
-
- let orig_len: usize = rng.gen_range(100..20_000);
- for _ in 0..orig_len {
- orig_data.push(rng.gen());
- }
-
- // encode the normal way
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut normal_encoded);
-
- // encode via the stream encoder
- {
- let mut partial_rng = rand::thread_rng();
- let mut partial_writer = PartialInterruptingWriter {
- w: &mut stream_encoded,
- rng: &mut partial_rng,
- full_input_fraction: 0.1,
- no_interrupt_fraction: 0.1,
- };
-
- let mut stream_encoder = EncoderWriter::new(&mut partial_writer, &engine);
- let mut bytes_consumed = 0;
- while bytes_consumed < orig_len {
- // use at most medium-length inputs to exercise retry logic more aggressively
- let input_len: usize = cmp::min(rng.gen_range(0..100), orig_len - bytes_consumed);
-
- let res =
- stream_encoder.write(&orig_data[bytes_consumed..bytes_consumed + input_len]);
-
- // retry on interrupt
- match res {
- Ok(len) => bytes_consumed += len,
- Err(e) => match e.kind() {
- io::ErrorKind::Interrupted => continue,
- _ => {
- panic!("should not see other errors");
- }
- },
- }
- }
-
- let _ = stream_encoder.finish().unwrap();
-
- assert_eq!(orig_len, bytes_consumed);
- }
-
- assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
- }
-}
-
-/// Retry writes until all the data is written or an error that isn't Interrupted is returned.
-fn retry_interrupted_write_all<W: Write>(w: &mut W, buf: &[u8]) -> io::Result<()> {
- let mut bytes_consumed = 0;
-
- while bytes_consumed < buf.len() {
- let res = w.write(&buf[bytes_consumed..]);
-
- match res {
- Ok(len) => bytes_consumed += len,
- Err(e) => match e.kind() {
- io::ErrorKind::Interrupted => continue,
- _ => return Err(e),
- },
- }
- }
-
- Ok(())
-}
-
-fn do_encode_random_config_matches_normal_encode(max_input_len: usize) {
- let mut rng = rand::thread_rng();
- let mut orig_data = Vec::<u8>::new();
- let mut stream_encoded = Vec::<u8>::new();
- let mut normal_encoded = String::new();
-
- for _ in 0..1_000 {
- orig_data.clear();
- stream_encoded.clear();
- normal_encoded.clear();
-
- let orig_len: usize = rng.gen_range(100..20_000);
- for _ in 0..orig_len {
- orig_data.push(rng.gen());
- }
-
- // encode the normal way
- let engine = random_engine(&mut rng);
- engine.encode_string(&orig_data, &mut normal_encoded);
-
- // encode via the stream encoder
- {
- let mut stream_encoder = EncoderWriter::new(&mut stream_encoded, &engine);
- let mut bytes_consumed = 0;
- while bytes_consumed < orig_len {
- let input_len: usize =
- cmp::min(rng.gen_range(0..max_input_len), orig_len - bytes_consumed);
-
- // write a little bit of the data
- stream_encoder
- .write_all(&orig_data[bytes_consumed..bytes_consumed + input_len])
- .unwrap();
-
- bytes_consumed += input_len;
- }
-
- let _ = stream_encoder.finish().unwrap();
-
- assert_eq!(orig_len, bytes_consumed);
- }
-
- assert_eq!(normal_encoded, str::from_utf8(&stream_encoded).unwrap());
- }
-}
-
-/// A `Write` implementation that returns Interrupted some fraction of the time, randomly.
-struct InterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> {
- w: &'a mut W,
- rng: &'a mut R,
- /// In [0, 1]. If a random number in [0, 1] is `<= threshold`, `Write` methods will return
- /// an `Interrupted` error
- fraction: f64,
-}
-
-impl<'a, W: Write, R: Rng> Write for InterruptingWriter<'a, W, R> {
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- if self.rng.gen_range(0.0..1.0) <= self.fraction {
- return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
- }
-
- self.w.write(buf)
- }
-
- fn flush(&mut self) -> io::Result<()> {
- if self.rng.gen_range(0.0..1.0) <= self.fraction {
- return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
- }
-
- self.w.flush()
- }
-}
-
-/// A `Write` implementation that sometimes will only write part of its input.
-struct PartialInterruptingWriter<'a, W: 'a + Write, R: 'a + Rng> {
- w: &'a mut W,
- rng: &'a mut R,
- /// In [0, 1]. If a random number in [0, 1] is `<= threshold`, `write()` will write all its
- /// input. Otherwise, it will write a random substring
- full_input_fraction: f64,
- no_interrupt_fraction: f64,
-}
-
-impl<'a, W: Write, R: Rng> Write for PartialInterruptingWriter<'a, W, R> {
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- if self.rng.gen_range(0.0..1.0) > self.no_interrupt_fraction {
- return Err(io::Error::new(io::ErrorKind::Interrupted, "interrupted"));
- }
-
- if self.rng.gen_range(0.0..1.0) <= self.full_input_fraction || buf.is_empty() {
- // pass through the buf untouched
- self.w.write(buf)
- } else {
- // only use a prefix of it
- self.w
- .write(&buf[0..(self.rng.gen_range(0..(buf.len() - 1)))])
- }
- }
-
- fn flush(&mut self) -> io::Result<()> {
- self.w.flush()
- }
-}
diff --git a/vendor/base64/src/write/mod.rs b/vendor/base64/src/write/mod.rs
deleted file mode 100644
index 2a617db9..00000000
--- a/vendor/base64/src/write/mod.rs
+++ /dev/null
@@ -1,11 +0,0 @@
-//! Implementations of `io::Write` to transparently handle base64.
-mod encoder;
-mod encoder_string_writer;
-
-pub use self::{
- encoder::EncoderWriter,
- encoder_string_writer::{EncoderStringWriter, StrConsumer},
-};
-
-#[cfg(test)]
-mod encoder_tests;
diff --git a/vendor/base64/tests/encode.rs b/vendor/base64/tests/encode.rs
deleted file mode 100644
index 9d694474..00000000
--- a/vendor/base64/tests/encode.rs
+++ /dev/null
@@ -1,77 +0,0 @@
-use base64::{
- alphabet::URL_SAFE, engine::general_purpose::PAD, engine::general_purpose::STANDARD, *,
-};
-
-fn compare_encode(expected: &str, target: &[u8]) {
- assert_eq!(expected, STANDARD.encode(target));
-}
-
-#[test]
-fn encode_all_ascii() {
- let ascii: Vec<u8> = (0..=127).collect();
-
- compare_encode(
- "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7P\
- D0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn8\
- =",
- &ascii,
- );
-}
-
-#[test]
-fn encode_all_bytes() {
- let bytes: Vec<u8> = (0..=255).collect();
-
- compare_encode(
- "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7P\
- D0+P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn\
- +AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq+wsbKztLW2t7i5uru8vb6\
- /wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t/g4eLj5OXm5+jp6uvs7e7v8PHy8/T19vf4+fr7/P3+/w==",
- &bytes,
- );
-}
-
-#[test]
-fn encode_all_bytes_url() {
- let bytes: Vec<u8> = (0..=255).collect();
-
- assert_eq!(
- "AAECAwQFBgcICQoLDA0ODxAREhMUFRYXGBkaGxwdHh8gISIjJCUmJygpKissLS4vMDEyMzQ1Njc4OTo7PD0\
- -P0BBQkNERUZHSElKS0xNTk9QUVJTVFVWV1hZWltcXV5fYGFiY2RlZmdoaWprbG1ub3BxcnN0dXZ3eHl6e3x9fn\
- -AgYKDhIWGh4iJiouMjY6PkJGSk5SVlpeYmZqbnJ2en6ChoqOkpaanqKmqq6ytrq\
- -wsbKztLW2t7i5uru8vb6_wMHCw8TFxsfIycrLzM3Oz9DR0tPU1dbX2Nna29zd3t_g4eLj5OXm5-jp6uvs7e7v8PHy\
- 8_T19vf4-fr7_P3-_w==",
- &engine::GeneralPurpose::new(&URL_SAFE, PAD).encode(bytes)
- );
-}
-
-#[test]
-fn encoded_len_unpadded() {
- assert_eq!(0, encoded_len(0, false).unwrap());
- assert_eq!(2, encoded_len(1, false).unwrap());
- assert_eq!(3, encoded_len(2, false).unwrap());
- assert_eq!(4, encoded_len(3, false).unwrap());
- assert_eq!(6, encoded_len(4, false).unwrap());
- assert_eq!(7, encoded_len(5, false).unwrap());
- assert_eq!(8, encoded_len(6, false).unwrap());
- assert_eq!(10, encoded_len(7, false).unwrap());
-}
-
-#[test]
-fn encoded_len_padded() {
- assert_eq!(0, encoded_len(0, true).unwrap());
- assert_eq!(4, encoded_len(1, true).unwrap());
- assert_eq!(4, encoded_len(2, true).unwrap());
- assert_eq!(4, encoded_len(3, true).unwrap());
- assert_eq!(8, encoded_len(4, true).unwrap());
- assert_eq!(8, encoded_len(5, true).unwrap());
- assert_eq!(8, encoded_len(6, true).unwrap());
- assert_eq!(12, encoded_len(7, true).unwrap());
-}
-#[test]
-fn encoded_len_overflow() {
- let max_size = usize::MAX / 4 * 3 + 2;
- assert_eq!(2, max_size % 3);
- assert_eq!(Some(usize::MAX), encoded_len(max_size, false));
- assert_eq!(None, encoded_len(max_size + 1, false));
-}
diff --git a/vendor/base64/tests/tests.rs b/vendor/base64/tests/tests.rs
deleted file mode 100644
index eceff40d..00000000
--- a/vendor/base64/tests/tests.rs
+++ /dev/null
@@ -1,161 +0,0 @@
-use rand::{Rng, SeedableRng};
-
-use base64::engine::{general_purpose::STANDARD, Engine};
-use base64::*;
-
-use base64::engine::general_purpose::{GeneralPurpose, NO_PAD};
-
-// generate random contents of the specified length and test encode/decode roundtrip
-fn roundtrip_random<E: Engine>(
- byte_buf: &mut Vec<u8>,
- str_buf: &mut String,
- engine: &E,
- byte_len: usize,
- approx_values_per_byte: u8,
- max_rounds: u64,
-) {
- // let the short ones be short but don't let it get too crazy large
- let num_rounds = calculate_number_of_rounds(byte_len, approx_values_per_byte, max_rounds);
- let mut r = rand::rngs::SmallRng::from_entropy();
- let mut decode_buf = Vec::new();
-
- for _ in 0..num_rounds {
- byte_buf.clear();
- str_buf.clear();
- decode_buf.clear();
- while byte_buf.len() < byte_len {
- byte_buf.push(r.gen::<u8>());
- }
-
- engine.encode_string(&byte_buf, str_buf);
- engine.decode_vec(&str_buf, &mut decode_buf).unwrap();
-
- assert_eq!(byte_buf, &decode_buf);
- }
-}
-
-fn calculate_number_of_rounds(byte_len: usize, approx_values_per_byte: u8, max: u64) -> u64 {
- // don't overflow
- let mut prod = approx_values_per_byte as u64;
-
- for _ in 0..byte_len {
- if prod > max {
- return max;
- }
-
- prod = prod.saturating_mul(prod);
- }
-
- prod
-}
-
-#[test]
-fn roundtrip_random_short_standard() {
- let mut byte_buf: Vec<u8> = Vec::new();
- let mut str_buf = String::new();
-
- for input_len in 0..40 {
- roundtrip_random(&mut byte_buf, &mut str_buf, &STANDARD, input_len, 4, 10000);
- }
-}
-
-#[test]
-fn roundtrip_random_with_fast_loop_standard() {
- let mut byte_buf: Vec<u8> = Vec::new();
- let mut str_buf = String::new();
-
- for input_len in 40..100 {
- roundtrip_random(&mut byte_buf, &mut str_buf, &STANDARD, input_len, 4, 1000);
- }
-}
-
-#[test]
-fn roundtrip_random_short_no_padding() {
- let mut byte_buf: Vec<u8> = Vec::new();
- let mut str_buf = String::new();
-
- let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
- for input_len in 0..40 {
- roundtrip_random(&mut byte_buf, &mut str_buf, &engine, input_len, 4, 10000);
- }
-}
-
-#[test]
-fn roundtrip_random_no_padding() {
- let mut byte_buf: Vec<u8> = Vec::new();
- let mut str_buf = String::new();
-
- let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
-
- for input_len in 40..100 {
- roundtrip_random(&mut byte_buf, &mut str_buf, &engine, input_len, 4, 1000);
- }
-}
-
-#[test]
-fn roundtrip_decode_trailing_10_bytes() {
- // This is a special case because we decode 8 byte blocks of input at a time as much as we can,
- // ideally unrolled to 32 bytes at a time, in stages 1 and 2. Since we also write a u64's worth
- // of bytes (8) to the output, we always write 2 garbage bytes that then will be overwritten by
- // the NEXT block. However, if the next block only contains 2 bytes, it will decode to 1 byte,
- // and therefore be too short to cover up the trailing 2 garbage bytes. Thus, we have stage 3
- // to handle that case.
-
- for num_quads in 0..25 {
- let mut s: String = "ABCD".repeat(num_quads);
- s.push_str("EFGHIJKLZg");
-
- let engine = GeneralPurpose::new(&alphabet::STANDARD, NO_PAD);
- let decoded = engine.decode(&s).unwrap();
- assert_eq!(num_quads * 3 + 7, decoded.len());
-
- assert_eq!(s, engine.encode(&decoded));
- }
-}
-
-#[test]
-fn display_wrapper_matches_normal_encode() {
- let mut bytes = Vec::<u8>::with_capacity(256);
-
- for i in 0..255 {
- bytes.push(i);
- }
- bytes.push(255);
-
- assert_eq!(
- STANDARD.encode(&bytes),
- format!("{}", display::Base64Display::new(&bytes, &STANDARD))
- );
-}
-
-#[test]
-fn encode_engine_slice_error_when_buffer_too_small() {
- for num_triples in 1..100 {
- let input = "AAA".repeat(num_triples);
- let mut vec = vec![0; (num_triples - 1) * 4];
- assert_eq!(
- EncodeSliceError::OutputSliceTooSmall,
- STANDARD.encode_slice(&input, &mut vec).unwrap_err()
- );
- vec.push(0);
- assert_eq!(
- EncodeSliceError::OutputSliceTooSmall,
- STANDARD.encode_slice(&input, &mut vec).unwrap_err()
- );
- vec.push(0);
- assert_eq!(
- EncodeSliceError::OutputSliceTooSmall,
- STANDARD.encode_slice(&input, &mut vec).unwrap_err()
- );
- vec.push(0);
- assert_eq!(
- EncodeSliceError::OutputSliceTooSmall,
- STANDARD.encode_slice(&input, &mut vec).unwrap_err()
- );
- vec.push(0);
- assert_eq!(
- num_triples * 4,
- STANDARD.encode_slice(&input, &mut vec).unwrap()
- );
- }
-}