diff options
| author | mo khan <mo@mokhan.ca> | 2025-07-02 18:36:06 -0600 |
|---|---|---|
| committer | mo khan <mo@mokhan.ca> | 2025-07-02 18:36:06 -0600 |
| commit | 8cdfa445d6629ffef4cb84967ff7017654045bc2 (patch) | |
| tree | 22f0b0907c024c78d26a731e2e1f5219407d8102 /vendor/backtrace | |
| parent | 4351c74c7c5f97156bc94d3a8549b9940ac80e3f (diff) | |
chore: add vendor directory
Diffstat (limited to 'vendor/backtrace')
55 files changed, 9576 insertions, 0 deletions
diff --git a/vendor/backtrace/.cargo-checksum.json b/vendor/backtrace/.cargo-checksum.json new file mode 100644 index 00000000..3a109ba0 --- /dev/null +++ b/vendor/backtrace/.cargo-checksum.json @@ -0,0 +1 @@ +{"files":{"Cargo.lock":"84a9e0c43fd262cffc64498dfe6d80f503d48c2c0f4e7a8b8cf5b72c68942fb2","Cargo.toml":"d1ca71656a6d939b616996f89ef511f8b05b80baf199872f21abf0b2f69f08e8","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"66627f6723d9c702276ae529aaf52fc2ed0fd13204ad510d0c9a2d63e6d7d68d","benches/benchmarks.rs":"029b78bb79052ec940eecfd18067b743925189202fc16015d3c4c25b05eb6d67","bindings.txt":"292ed6429d70ea9f31724691675b9b7fcd38eba969d74e6274fce057d2dc8d0b","examples/backtrace.rs":"5da0c95ccebfaffbbe7b5a92b0e488017dc375cbb5b8fb2b7712dd65b2dfb2ba","examples/raw.rs":"eda88454164ce5ebdd99c9c70ea35a61d34f5aecfdb8fcb4efd4f9738d10b093","src/backtrace/libunwind.rs":"827c5188e2b80c2a6d06cee4b00f7cfcb38a94c5ab83f2b139cd3afe40e4662e","src/backtrace/miri.rs":"fd98e52da0acb98dc49bee28c10335426fe843790f04da30fdfeebc72d757d9c","src/backtrace/mod.rs":"666383a0bec82dbd9c7413e0fa96d62240dd20f5969b6f8b80411f7fcff2d84c","src/backtrace/noop.rs":"970db5bce5bbc129466b92409730a92f23fb9f21e13e9514187836d34e25f673","src/backtrace/win32.rs":"6b4fd32b5bd353e50eae1f2d4f1b518b2f686fd1c0088f21da33f6b34488a502","src/backtrace/win64.rs":"ff0d382dee8e0ea39c39af5ba95ce0c8c2d0f28ca370d4a71c1e120f5c8d7206","src/capture.rs":"8ea1d14f1f832315729275685556518c66bfbcb37830bc2cb210e14a1fa36dda","src/dbghelp.rs":"830f8f5eef091eb26d59ce3bb62f8df5a331044181412ae316089bd56b39f076","src/lib.rs":"6d7f5afc9a30f106adfa21c9c0d7bde4afcd492e4de5011934127ad2f2a1a00d","src/print.rs":"5cc1d102e75cc2aa3c0a84cb75e33438edfac88505acbd3463ad90e104a02c93","src/print/fuchsia.rs":"8bcb14e5999497882ff50461aa6a7c0c27a372145d0efcbfe090f23a8190ed62","src/symbolize/dbghelp.rs":"198aee3a1f4a925dee8554051168da5378c3ac7f2aaabcdbb6cd0a6286a9c28f","src/symbolize/gimli.rs":"fc42a9cc720ee49c42195b62b4727e129adb908c74fe43ffa045c1475f3b8415","src/symbolize/gimli/coff.rs":"e9f3062252f6750d376575b4fc39484647b46038245c6af9e84556cb67b7543d","src/symbolize/gimli/elf.rs":"6a62a23a79f965edf13120e920f1c0da07963117c26af2baa583c1a198a5a147","src/symbolize/gimli/libs_aix.rs":"8ec6ce735671fe2c42458454d49b4333997c4e441b7ed48313054ca37491c821","src/symbolize/gimli/libs_dl_iterate_phdr.rs":"f2453be9c182f243774f8fd502f7165fd90b8bb27d22819e574f5c5104ef5e73","src/symbolize/gimli/libs_haiku.rs":"b21476fc5787aa4758a66a8ef7200780a09814b17e2863c9db9cdf1a218f6af4","src/symbolize/gimli/libs_illumos.rs":"4fb8c252d437d0f9b74eefc72cdf00af5a51db679f4cc0cbd67ca457b0cfe4e8","src/symbolize/gimli/libs_libnx.rs":"f0f4e2d0c9ee34d7c6da2f749df56aa6dffcd78c4cd52037799a6b5d5c82582e","src/symbolize/gimli/libs_macos.rs":"d24a2a571ddcdbfc70f73fba07a47413649ef97ca79d38e28cb4fc9713691a5c","src/symbolize/gimli/libs_windows.rs":"acdfe1ea48d2cc1fba6c304f3f161128a90d6328c5b91ea8c7fae52a62672c43","src/symbolize/gimli/lru.rs":"2ca20aacc9b1ac71fb5bedc3a85b82fc12e7d283ae68136df0316c07735b6b5c","src/symbolize/gimli/macho.rs":"cdd5bfe7a4dd981d6b7d2f7f68c2581d507cef068aa7d9ce4d53b33310004baf","src/symbolize/gimli/mmap_fake.rs":"adec262cfda1d047f4c6b3bed61c256bccfac6e10f0b47d589dd76c82675303c","src/symbolize/gimli/mmap_unix.rs":"486809e3e6c2f621fc615d347a774f30683c74a61748dcbd923c81ec52b91e17","src/symbolize/gimli/mmap_windows.rs":"8631f82b71bf65b800a842fec53903daf94e7e91f7f5b663b94f2860424ace88","src/symbolize/gimli/parse_running_mmaps_unix.rs":"c23ee114ab05f8d1a37d8c47746f26a4b6741dae63e1393e2659e7c66d44c18e","src/symbolize/gimli/stash.rs":"e9b4c8b5849fda70c25a40be2f9a16473b601926cf96909087cfff25a8ab42b5","src/symbolize/gimli/xcoff.rs":"59c4f145f79225791a7075a971fc2b190e9ccabd147120de320880fa3ac4a198","src/symbolize/miri.rs":"f5201cc8a7de24ad3424d2472cb0af59cd28563d09cc0c21e998f4cee4367ade","src/symbolize/mod.rs":"226079872fb7228086d0a550cd5cd78ca691b8f965e31e64c04a761c9b718bb8","src/symbolize/noop.rs":"5d4432079b8ae2b9382945a57ae43df57bb4b7ed2e5956d4167e051a44567388","src/types.rs":"20cc0d35c3862f705d4ae1f2eb0564ea0afb140d444ecb17a5d5d18c6a4b1f92","src/windows_sys.rs":"f6db5489cec1c71a3f3abaae4b3874230aa1211aba86761db09692108658f931","src/windows_sys_arm32_shim.rs":"8453195e374cde8ffbba82dd1543c8eedeeae9a94195197cdffe995119bf9567","tests/accuracy/auxiliary.rs":"71d2238da401042e007ef5ee20336d6834724bae96d93c8c52d11a5a332d7d34","tests/accuracy/main.rs":"851778c046bc5b51f91777716ffe9896e0f872197d069e2dcd9a8b5ef4c98b01","tests/common/mod.rs":"733101288a48cf94d5a87a1957724deaf2650c3e4e8aa0190a4a7db62aa90d01","tests/concurrent-panics.rs":"d2b958cd2147f456e10193f874290e721915c78735ac83ef975565c2deb00a0d","tests/current-exe-mismatch.rs":"0e27d53ba66997dcf06583f3000f7e1ddb824bdbe05b3862e712f454b88014f2","tests/long_fn_name.rs":"12af8bcef41f2d4f9e2711cbe2a605e15ed47b571fd871f4da1fd159494d779a","tests/sgx-image-base.rs":"564d799ce613569b9d8b65ecf027e01719409fcf3d07c9179f3c7935e364bb41","tests/skip_inner_frames.rs":"7e6af1d71df6f5793900cef01a7c56fd684a3dceebbd2b03015d2f5c1dcd92f0","tests/smoke.rs":"70d6e7852d76f21190c8b176087f49af6137099197ae9bbb59e835128211c125"},"package":"6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"}
\ No newline at end of file diff --git a/vendor/backtrace/Cargo.lock b/vendor/backtrace/Cargo.lock new file mode 100644 index 00000000..5f2b9e3a --- /dev/null +++ b/vendor/backtrace/Cargo.lock @@ -0,0 +1,227 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" + +[[package]] +name = "backtrace" +version = "0.3.75" +dependencies = [ + "addr2line", + "cfg-if", + "cpp_demangle", + "libc", + "libloading", + "miniz_oxide", + "object", + "rustc-demangle", + "ruzstd", + "serde", + "windows-targets", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "cpp_demangle" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96e58d342ad113c2b878f16d5d034c03be492ae460cdbc02b7f0f2284d310c7d" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "gimli" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" + +[[package]] +name = "libc" +version = "0.2.171" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" + +[[package]] +name = "libloading" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +dependencies = [ + "cfg-if", + "windows-targets", +] + +[[package]] +name = "memchr" +version = "2.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" + +[[package]] +name = "miniz_oxide" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" +dependencies = [ + "adler2", +] + +[[package]] +name = "object" +version = "0.36.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +dependencies = [ + "memchr", +] + +[[package]] +name = "proc-macro2" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" + +[[package]] +name = "ruzstd" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fad02996bfc73da3e301efe90b1837be9ed8f4a462b6ed410aa35d00381de89f" + +[[package]] +name = "serde" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.210" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "syn" +version = "2.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/vendor/backtrace/Cargo.toml b/vendor/backtrace/Cargo.toml new file mode 100644 index 00000000..541be605 --- /dev/null +++ b/vendor/backtrace/Cargo.toml @@ -0,0 +1,157 @@ +# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO +# +# When uploading crates to the registry Cargo will automatically +# "normalize" Cargo.toml files for maximal compatibility +# with all versions of Cargo and also rewrite `path` dependencies +# to registry (e.g., crates.io) dependencies. +# +# If you are reading this file be aware that the original Cargo.toml +# will likely look very different (and much more reasonable). +# See Cargo.toml.orig for the original contents. + +[package] +edition = "2021" +rust-version = "1.82.0" +name = "backtrace" +version = "0.3.75" +authors = ["The Rust Project Developers"] +build = false +exclude = ["/ci/"] +autolib = false +autobins = false +autoexamples = false +autotests = false +autobenches = false +description = """ +A library to acquire a stack trace (backtrace) at runtime in a Rust program. +""" +homepage = "https://github.com/rust-lang/backtrace-rs" +documentation = "https://docs.rs/backtrace" +readme = "README.md" +license = "MIT OR Apache-2.0" +repository = "https://github.com/rust-lang/backtrace-rs" + +[features] +coresymbolication = [] +dbghelp = [] +default = ["std"] +dl_iterate_phdr = [] +dladdr = [] +kernel32 = [] +libunwind = [] +ruzstd = ["dep:ruzstd"] +serialize-serde = ["serde"] +std = [] +unix-backtrace = [] + +[lib] +name = "backtrace" +path = "src/lib.rs" + +[[example]] +name = "backtrace" +path = "examples/backtrace.rs" +required-features = ["std"] + +[[example]] +name = "raw" +path = "examples/raw.rs" +required-features = ["std"] + +[[test]] +name = "accuracy" +path = "tests/accuracy/main.rs" +required-features = ["std"] +edition = "2021" + +[[test]] +name = "concurrent-panics" +path = "tests/concurrent-panics.rs" +harness = false +required-features = ["std"] + +[[test]] +name = "current-exe-mismatch" +path = "tests/current-exe-mismatch.rs" +harness = false +required-features = ["std"] + +[[test]] +name = "long_fn_name" +path = "tests/long_fn_name.rs" +required-features = ["std"] + +[[test]] +name = "sgx-image-base" +path = "tests/sgx-image-base.rs" + +[[test]] +name = "skip_inner_frames" +path = "tests/skip_inner_frames.rs" +required-features = ["std"] + +[[test]] +name = "smoke" +path = "tests/smoke.rs" +required-features = ["std"] +edition = "2021" + +[[bench]] +name = "benchmarks" +path = "benches/benchmarks.rs" + +[dependencies.cfg-if] +version = "1.0" + +[dependencies.cpp_demangle] +version = "0.4.0" +features = ["alloc"] +optional = true +default-features = false + +[dependencies.rustc-demangle] +version = "0.1.24" + +[dependencies.serde] +version = "1.0" +features = ["derive"] +optional = true + +[dev-dependencies.libloading] +version = "0.8" + +[target.'cfg(any(windows, target_os = "cygwin"))'.dependencies.windows-targets] +version = "0.52.6" + +[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies.addr2line] +version = "0.24.0" +default-features = false + +[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies.libc] +version = "0.2.156" +default-features = false + +[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies.miniz_oxide] +version = "0.8" +default-features = false + +[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies.object] +version = "0.36.0" +features = [ + "read_core", + "elf", + "macho", + "pe", + "xcoff", + "unaligned", + "archive", +] +default-features = false + +[target.'cfg(not(all(windows, target_env = "msvc", not(target_vendor = "uwp"))))'.dependencies.ruzstd] +version = "0.7.3" +optional = true +default-features = false + +[lints.rust] +unexpected_cfgs = "allow" diff --git a/vendor/backtrace/LICENSE-APACHE b/vendor/backtrace/LICENSE-APACHE new file mode 100644 index 00000000..16fe87b0 --- /dev/null +++ b/vendor/backtrace/LICENSE-APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/backtrace/LICENSE-MIT b/vendor/backtrace/LICENSE-MIT new file mode 100644 index 00000000..39e0ed66 --- /dev/null +++ b/vendor/backtrace/LICENSE-MIT @@ -0,0 +1,25 @@ +Copyright (c) 2014 Alex Crichton + +Permission is hereby granted, free of charge, to any +person obtaining a copy of this software and associated +documentation files (the "Software"), to deal in the +Software without restriction, including without +limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software +is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice +shall be included in all copies or substantial portions +of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF +ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED +TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR +IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. diff --git a/vendor/backtrace/README.md b/vendor/backtrace/README.md new file mode 100644 index 00000000..cebbad6d --- /dev/null +++ b/vendor/backtrace/README.md @@ -0,0 +1,85 @@ +# backtrace-rs + +[Documentation](https://docs.rs/backtrace) + +A library for acquiring backtraces at runtime for Rust. This library aims to +enhance the support of the standard library by providing a programmatic +interface to work with, but it also supports simply easily printing the current +backtrace like libstd's panics. + +## Install + +```toml +[dependencies] +backtrace = "0.3" +``` + +## Usage + +To simply capture a backtrace and defer dealing with it until a later time, +you can use the top-level `Backtrace` type. + +```rust +use backtrace::Backtrace; + +fn main() { + let bt = Backtrace::new(); + + // do_some_work(); + + println!("{bt:?}"); +} +``` + +If, however, you'd like more raw access to the actual tracing functionality, you +can use the `trace` and `resolve` functions directly. + +```rust +fn main() { + backtrace::trace(|frame| { + let ip = frame.ip(); + let symbol_address = frame.symbol_address(); + + // Resolve this instruction pointer to a symbol name + backtrace::resolve_frame(frame, |symbol| { + if let Some(name) = symbol.name() { + // ... + } + if let Some(filename) = symbol.filename() { + // ... + } + }); + + true // keep going to the next frame + }); +} +``` + +# Supported Rust Versions + +The `backtrace` crate is a core component of the standard library, and must +at times keep up with the evolution of various platforms in order to serve +the standard library's needs. This often means using recent libraries +that provide unwinding and symbolication for various platforms. +Thus `backtrace` is likely to use recent Rust features or depend on a library +which itself uses them. Its minimum supported Rust version, by policy, is +within a few versions of current stable, approximately "stable - 2". + +This policy takes precedence over versions written anywhere else in this repo. + +# License + +This project is licensed under either of + + * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or + https://www.apache.org/licenses/LICENSE-2.0) + * MIT license ([LICENSE-MIT](LICENSE-MIT) or + https://opensource.org/licenses/MIT) + +at your option. + +### Contribution + +Unless you explicitly state otherwise, any contribution intentionally submitted +for inclusion in backtrace-rs by you, as defined in the Apache-2.0 license, shall be +dual licensed as above, without any additional terms or conditions. diff --git a/vendor/backtrace/benches/benchmarks.rs b/vendor/backtrace/benches/benchmarks.rs new file mode 100644 index 00000000..e14e733b --- /dev/null +++ b/vendor/backtrace/benches/benchmarks.rs @@ -0,0 +1,92 @@ +#![feature(test)] + +extern crate test; + +#[cfg(feature = "std")] +use backtrace::Backtrace; + +#[bench] +#[cfg(feature = "std")] +fn trace(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + backtrace::trace(|frame| { + let ip = frame.ip(); + test::black_box(ip); + true + }); + } + b.iter(the_function); +} + +#[bench] +#[cfg(feature = "std")] +fn trace_and_resolve_callback(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + backtrace::trace(|frame| { + backtrace::resolve(frame.ip(), |symbol| { + let addr = symbol.addr(); + test::black_box(addr); + }); + true + }); + } + b.iter(the_function); +} + +#[bench] +#[cfg(feature = "std")] +fn trace_and_resolve_separate(b: &mut test::Bencher) { + #[inline(never)] + fn the_function(frames: &mut Vec<*mut std::ffi::c_void>) { + backtrace::trace(|frame| { + frames.push(frame.ip()); + true + }); + frames.iter().for_each(|frame_ip| { + backtrace::resolve(*frame_ip, |symbol| { + test::black_box(symbol); + }); + }); + } + let mut frames = Vec::with_capacity(1024); + b.iter(|| { + the_function(&mut frames); + frames.clear(); + }); +} + +#[bench] +#[cfg(feature = "std")] +fn new_unresolved(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + let bt = Backtrace::new_unresolved(); + test::black_box(bt); + } + b.iter(the_function); +} + +#[bench] +#[cfg(feature = "std")] +fn new(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + let bt = Backtrace::new(); + test::black_box(bt); + } + b.iter(the_function); +} + +#[bench] +#[cfg(feature = "std")] +fn new_unresolved_and_resolve_separate(b: &mut test::Bencher) { + #[inline(never)] + fn the_function() { + let mut bt = Backtrace::new_unresolved(); + bt.resolve(); + test::black_box(bt); + } + b.iter(the_function); +} diff --git a/vendor/backtrace/bindings.txt b/vendor/backtrace/bindings.txt new file mode 100644 index 00000000..166224b9 --- /dev/null +++ b/vendor/backtrace/bindings.txt @@ -0,0 +1,63 @@ +--out src/windows_sys.rs +--config sys flatten +--filter +Windows.Win32.Foundation.CloseHandle +Windows.Win32.Foundation.FALSE +Windows.Win32.Foundation.HINSTANCE +Windows.Win32.Foundation.INVALID_HANDLE_VALUE +Windows.Win32.Foundation.TRUE +Windows.Win32.Globalization.CP_UTF8 +Windows.Win32.Globalization.lstrlenW +Windows.Win32.Globalization.WideCharToMultiByte +Windows.Win32.System.Diagnostics.Debug.AddrModeFlat +Windows.Win32.System.Diagnostics.Debug.CONTEXT +Windows.Win32.System.Diagnostics.Debug.EnumerateLoadedModulesW64 +Windows.Win32.System.Diagnostics.Debug.IMAGEHLP_LINEW64 +Windows.Win32.System.Diagnostics.Debug.MAX_SYM_NAME +Windows.Win32.System.Diagnostics.Debug.PENUMLOADED_MODULES_CALLBACKW64 +Windows.Win32.System.Diagnostics.Debug.PFUNCTION_TABLE_ACCESS_ROUTINE64 +Windows.Win32.System.Diagnostics.Debug.PGET_MODULE_BASE_ROUTINE64 +Windows.Win32.System.Diagnostics.Debug.PREAD_PROCESS_MEMORY_ROUTINE64 +Windows.Win32.System.Diagnostics.Debug.PTRANSLATE_ADDRESS_ROUTINE64 +Windows.Win32.System.Diagnostics.Debug.RtlCaptureContext +Windows.Win32.System.Diagnostics.Debug.RtlLookupFunctionEntry +Windows.Win32.System.Diagnostics.Debug.RtlVirtualUnwind +Windows.Win32.System.Diagnostics.Debug.STACKFRAME64 +Windows.Win32.System.Diagnostics.Debug.STACKFRAME_EX +Windows.Win32.System.Diagnostics.Debug.StackWalk64 +Windows.Win32.System.Diagnostics.Debug.StackWalkEx +Windows.Win32.System.Diagnostics.Debug.SymAddrIncludeInlineTrace +Windows.Win32.System.Diagnostics.Debug.SYMBOL_INFOW +Windows.Win32.System.Diagnostics.Debug.SymFromAddrW +Windows.Win32.System.Diagnostics.Debug.SymFromInlineContextW +Windows.Win32.System.Diagnostics.Debug.SymFunctionTableAccess64 +Windows.Win32.System.Diagnostics.Debug.SymGetLineFromAddrW64 +Windows.Win32.System.Diagnostics.Debug.SymGetLineFromInlineContextW +Windows.Win32.System.Diagnostics.Debug.SymGetModuleBase64 +Windows.Win32.System.Diagnostics.Debug.SymGetOptions +Windows.Win32.System.Diagnostics.Debug.SymGetSearchPathW +Windows.Win32.System.Diagnostics.Debug.SymInitializeW +Windows.Win32.System.Diagnostics.Debug.SYMOPT_DEFERRED_LOADS +Windows.Win32.System.Diagnostics.Debug.SymQueryInlineTrace +Windows.Win32.System.Diagnostics.Debug.SymSetOptions +Windows.Win32.System.Diagnostics.Debug.SymSetSearchPathW +Windows.Win32.System.Diagnostics.ToolHelp.CreateToolhelp32Snapshot +Windows.Win32.System.Diagnostics.ToolHelp.Module32FirstW +Windows.Win32.System.Diagnostics.ToolHelp.Module32NextW +Windows.Win32.System.Diagnostics.ToolHelp.MODULEENTRY32W +Windows.Win32.System.Diagnostics.ToolHelp.TH32CS_SNAPMODULE +Windows.Win32.System.LibraryLoader.GetProcAddress +Windows.Win32.System.LibraryLoader.LoadLibraryA +Windows.Win32.System.Memory.CreateFileMappingA +Windows.Win32.System.Memory.FILE_MAP_READ +Windows.Win32.System.Memory.MapViewOfFile +Windows.Win32.System.Memory.PAGE_READONLY +Windows.Win32.System.Memory.UnmapViewOfFile +Windows.Win32.System.SystemInformation.IMAGE_FILE_MACHINE_I386 +Windows.Win32.System.Threading.CreateMutexA +Windows.Win32.System.Threading.GetCurrentProcess +Windows.Win32.System.Threading.GetCurrentProcessId +Windows.Win32.System.Threading.GetCurrentThread +Windows.Win32.System.Threading.INFINITE +Windows.Win32.System.Threading.ReleaseMutex +Windows.Win32.System.Threading.WaitForSingleObjectEx
\ No newline at end of file diff --git a/vendor/backtrace/examples/backtrace.rs b/vendor/backtrace/examples/backtrace.rs new file mode 100644 index 00000000..7ff6cd39 --- /dev/null +++ b/vendor/backtrace/examples/backtrace.rs @@ -0,0 +1,5 @@ +use backtrace::Backtrace; + +fn main() { + println!("{:?}", Backtrace::new()); +} diff --git a/vendor/backtrace/examples/raw.rs b/vendor/backtrace/examples/raw.rs new file mode 100644 index 00000000..95e17dbd --- /dev/null +++ b/vendor/backtrace/examples/raw.rs @@ -0,0 +1,52 @@ +fn main() { + foo(); +} + +fn foo() { + bar() +} +fn bar() { + baz() +} +fn baz() { + print() +} + +#[cfg(target_pointer_width = "32")] +const HEX_WIDTH: usize = 10; +#[cfg(target_pointer_width = "64")] +const HEX_WIDTH: usize = 20; + +fn print() { + let mut cnt = 0; + backtrace::trace(|frame| { + let ip = frame.ip(); + print!("frame #{:<2} - {:#02$x}", cnt, ip as usize, HEX_WIDTH); + cnt += 1; + + let mut resolved = false; + backtrace::resolve(frame.ip(), |symbol| { + if !resolved { + resolved = true; + } else { + print!("{}", vec![" "; 7 + 2 + 3 + HEX_WIDTH].join("")); + } + + if let Some(name) = symbol.name() { + print!(" - {name}"); + } else { + print!(" - <unknown>"); + } + if let Some(file) = symbol.filename() { + if let Some(l) = symbol.lineno() { + print!("\n{:13}{:4$}@ {}:{}", "", "", file.display(), l, HEX_WIDTH); + } + } + println!(""); + }); + if !resolved { + println!(" - <no info>"); + } + true // keep going + }); +} diff --git a/vendor/backtrace/src/backtrace/libunwind.rs b/vendor/backtrace/src/backtrace/libunwind.rs new file mode 100644 index 00000000..0564f2ea --- /dev/null +++ b/vendor/backtrace/src/backtrace/libunwind.rs @@ -0,0 +1,301 @@ +//! Backtrace support using libunwind/gcc_s/etc APIs. +//! +//! This module contains the ability to unwind the stack using libunwind-style +//! APIs. Note that there's a whole bunch of implementations of the +//! libunwind-like API, and this is just trying to be compatible with most of +//! them all at once instead of being picky. +//! +//! The libunwind API is powered by `_Unwind_Backtrace` and is in practice very +//! reliable at generating a backtrace. It's not entirely clear how it does it +//! (frame pointers? eh_frame info? both?) but it seems to work! +//! +//! Most of the complexity of this module is handling the various platform +//! differences across libunwind implementations. Otherwise this is a pretty +//! straightforward Rust binding to the libunwind APIs. +//! +//! This is the default unwinding API for all non-Windows platforms currently. + +use core::ffi::c_void; +use core::ptr::addr_of_mut; + +pub enum Frame { + Raw(*mut uw::_Unwind_Context), + Cloned { + ip: *mut c_void, + sp: *mut c_void, + symbol_address: *mut c_void, + }, +} + +// With a raw libunwind pointer it should only ever be access in a readonly +// threadsafe fashion, so it's `Sync`. When sending to other threads via `Clone` +// we always switch to a version which doesn't retain interior pointers, so we +// should be `Send` as well. +unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} + +impl Frame { + pub fn ip(&self) -> *mut c_void { + let ctx = match *self { + Frame::Raw(ctx) => ctx, + Frame::Cloned { ip, .. } => return ip, + }; + #[allow(unused_mut)] + let mut ip = unsafe { uw::_Unwind_GetIP(ctx) as *mut c_void }; + + // To reduce TCB size in SGX enclaves, we do not want to implement + // symbol resolution functionality. Rather, we can print the offset of + // the address here, which could be later mapped to correct function. + #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] + { + let image_base = super::sgx_image_base::get_image_base(); + ip = usize::wrapping_sub(ip as usize, image_base as _) as _; + } + ip + } + + pub fn sp(&self) -> *mut c_void { + match *self { + Frame::Raw(ctx) => unsafe { uw::get_sp(ctx) as *mut c_void }, + Frame::Cloned { sp, .. } => sp, + } + } + + pub fn symbol_address(&self) -> *mut c_void { + if let Frame::Cloned { symbol_address, .. } = *self { + return symbol_address; + } + + // The macOS linker emits a "compact" unwind table that only includes an + // entry for a function if that function either has an LSDA or its + // encoding differs from that of the previous entry. Consequently, on + // macOS, `_Unwind_FindEnclosingFunction` is unreliable (it can return a + // pointer to some totally unrelated function). Instead, we just always + // return the ip. + // + // https://github.com/rust-lang/rust/issues/74771#issuecomment-664056788 + // + // Note the `skip_inner_frames.rs` test is skipped on macOS due to this + // clause, and if this is fixed that test in theory can be run on macOS! + if cfg!(target_vendor = "apple") { + self.ip() + } else { + unsafe { uw::_Unwind_FindEnclosingFunction(self.ip()) } + } + } + + pub fn module_base_address(&self) -> Option<*mut c_void> { + None + } +} + +impl Clone for Frame { + fn clone(&self) -> Frame { + Frame::Cloned { + ip: self.ip(), + sp: self.sp(), + symbol_address: self.symbol_address(), + } + } +} + +struct Bomb { + enabled: bool, +} + +impl Drop for Bomb { + fn drop(&mut self) { + if self.enabled { + panic!("cannot panic during the backtrace function"); + } + } +} + +#[inline(always)] +pub unsafe fn trace(mut cb: &mut dyn FnMut(&super::Frame) -> bool) { + unsafe { + uw::_Unwind_Backtrace(trace_fn, addr_of_mut!(cb).cast()); + } + + extern "C" fn trace_fn( + ctx: *mut uw::_Unwind_Context, + arg: *mut c_void, + ) -> uw::_Unwind_Reason_Code { + let cb = unsafe { &mut *arg.cast::<&mut dyn FnMut(&super::Frame) -> bool>() }; + let cx = super::Frame { + inner: Frame::Raw(ctx), + }; + + let mut bomb = Bomb { enabled: true }; + let keep_going = cb(&cx); + bomb.enabled = false; + + if keep_going { + uw::_URC_NO_REASON + } else { + uw::_URC_FAILURE + } + } +} + +/// Unwind library interface used for backtraces +/// +/// Note that dead code is allowed as here are just bindings +/// iOS doesn't use all of them it but adding more +/// platform-specific configs pollutes the code too much +#[allow(non_camel_case_types)] +#[allow(non_snake_case)] +#[allow(dead_code)] +mod uw { + pub use self::_Unwind_Reason_Code::*; + + use core::ffi::c_void; + + #[repr(C)] + pub enum _Unwind_Reason_Code { + _URC_NO_REASON = 0, + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_FATAL_PHASE2_ERROR = 2, + _URC_FATAL_PHASE1_ERROR = 3, + _URC_NORMAL_STOP = 4, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8, + _URC_FAILURE = 9, // used only by ARM EABI + } + + pub enum _Unwind_Context {} + + pub type _Unwind_Trace_Fn = + extern "C" fn(ctx: *mut _Unwind_Context, arg: *mut c_void) -> _Unwind_Reason_Code; + + unsafe extern "C" { + pub fn _Unwind_Backtrace( + trace: _Unwind_Trace_Fn, + trace_argument: *mut c_void, + ) -> _Unwind_Reason_Code; + } + + cfg_if::cfg_if! { + // available since GCC 4.2.0, should be fine for our purpose + if #[cfg(all( + not(all(target_os = "android", target_arch = "arm")), + not(all(target_os = "freebsd", target_arch = "arm")), + not(all(target_os = "linux", target_arch = "arm")), + not(all(target_os = "horizon", target_arch = "arm")), + not(all(target_os = "rtems", target_arch = "arm")), + not(all(target_os = "vita", target_arch = "arm")), + not(all(target_os = "nuttx", target_arch = "arm")), + ))] { + unsafe extern "C" { + pub fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t; + pub fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void; + + #[cfg(not(all(target_os = "linux", target_arch = "s390x")))] + // This function is a misnomer: rather than getting this frame's + // Canonical Frame Address (aka the caller frame's SP) it + // returns this frame's SP. + // + // https://github.com/libunwind/libunwind/blob/d32956507cf29d9b1a98a8bce53c78623908f4fe/src/unwind/GetCFA.c#L28-L35 + #[link_name = "_Unwind_GetCFA"] + pub fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t; + + } + + // s390x uses a biased CFA value, therefore we need to use + // _Unwind_GetGR to get the stack pointer register (%r15) + // instead of relying on _Unwind_GetCFA. + #[cfg(all(target_os = "linux", target_arch = "s390x"))] + pub unsafe fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t { + unsafe extern "C" { + pub fn _Unwind_GetGR(ctx: *mut _Unwind_Context, index: libc::c_int) -> libc::uintptr_t; + } + unsafe { _Unwind_GetGR(ctx, 15) } + } + } else { + use core::ptr::addr_of_mut; + + // On android and arm, the function `_Unwind_GetIP` and a bunch of + // others are macros, so we define functions containing the + // expansion of the macros. + // + // TODO: link to the header file that defines these macros, if you + // can find it. (I, fitzgen, cannot find the header file that some + // of these macro expansions were originally borrowed from.) + #[repr(C)] + enum _Unwind_VRS_Result { + _UVRSR_OK = 0, + _UVRSR_NOT_IMPLEMENTED = 1, + _UVRSR_FAILED = 2, + } + #[repr(C)] + enum _Unwind_VRS_RegClass { + _UVRSC_CORE = 0, + _UVRSC_VFP = 1, + _UVRSC_FPA = 2, + _UVRSC_WMMXD = 3, + _UVRSC_WMMXC = 4, + } + #[repr(C)] + enum _Unwind_VRS_DataRepresentation { + _UVRSD_UINT32 = 0, + _UVRSD_VFPX = 1, + _UVRSD_FPAX = 2, + _UVRSD_UINT64 = 3, + _UVRSD_FLOAT = 4, + _UVRSD_DOUBLE = 5, + } + + type _Unwind_Word = libc::c_uint; + unsafe extern "C" { + fn _Unwind_VRS_Get( + ctx: *mut _Unwind_Context, + klass: _Unwind_VRS_RegClass, + word: _Unwind_Word, + repr: _Unwind_VRS_DataRepresentation, + data: *mut c_void, + ) -> _Unwind_VRS_Result; + } + + pub unsafe fn _Unwind_GetIP(ctx: *mut _Unwind_Context) -> libc::uintptr_t { + let mut val: _Unwind_Word = 0; + let ptr = addr_of_mut!(val); + unsafe { + let _ = _Unwind_VRS_Get( + ctx, + _Unwind_VRS_RegClass::_UVRSC_CORE, + 15, + _Unwind_VRS_DataRepresentation::_UVRSD_UINT32, + ptr.cast::<c_void>(), + ); + } + (val & !1) as libc::uintptr_t + } + + // R13 is the stack pointer on arm. + const SP: _Unwind_Word = 13; + + pub unsafe fn get_sp(ctx: *mut _Unwind_Context) -> libc::uintptr_t { + let mut val: _Unwind_Word = 0; + let ptr = addr_of_mut!(val); + unsafe { + let _ = _Unwind_VRS_Get( + ctx, + _Unwind_VRS_RegClass::_UVRSC_CORE, + SP, + _Unwind_VRS_DataRepresentation::_UVRSD_UINT32, + ptr.cast::<c_void>(), + ); + } + val as libc::uintptr_t + } + + // This function also doesn't exist on Android or ARM/Linux, so make it + // a no-op. + pub unsafe fn _Unwind_FindEnclosingFunction(pc: *mut c_void) -> *mut c_void { + pc + } + } + } +} diff --git a/vendor/backtrace/src/backtrace/miri.rs b/vendor/backtrace/src/backtrace/miri.rs new file mode 100644 index 00000000..2c36be42 --- /dev/null +++ b/vendor/backtrace/src/backtrace/miri.rs @@ -0,0 +1,119 @@ +use alloc::boxed::Box; +use alloc::vec::Vec; +use core::ffi::c_void; + +unsafe extern "Rust" { + fn miri_backtrace_size(flags: u64) -> usize; + fn miri_get_backtrace(flags: u64, buf: *mut *mut ()); + fn miri_resolve_frame(ptr: *mut (), flags: u64) -> MiriFrame; + fn miri_resolve_frame_names(ptr: *mut (), flags: u64, name_buf: *mut u8, filename_buf: *mut u8); +} + +#[repr(C)] +pub struct MiriFrame { + pub name_len: usize, + pub filename_len: usize, + pub lineno: u32, + pub colno: u32, + pub fn_ptr: *mut c_void, +} + +#[derive(Clone, Debug)] +pub struct FullMiriFrame { + pub name: Box<[u8]>, + pub filename: Box<[u8]>, + pub lineno: u32, + pub colno: u32, + pub fn_ptr: *mut c_void, +} + +#[derive(Debug, Clone)] +pub struct Frame { + pub addr: *mut c_void, + pub inner: FullMiriFrame, +} + +// SAFETY: Miri guarantees that the returned pointer +// can be used from any thread. +unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} + +impl Frame { + pub fn ip(&self) -> *mut c_void { + self.addr + } + + pub fn sp(&self) -> *mut c_void { + core::ptr::null_mut() + } + + pub fn symbol_address(&self) -> *mut c_void { + self.inner.fn_ptr + } + + pub fn module_base_address(&self) -> Option<*mut c_void> { + None + } +} + +// SAFETY: This function is safe to call. It is only marked as `unsafe` to +// avoid having to allow `unused_unsafe` since other implementations are +// unsafe. +pub unsafe fn trace<F: FnMut(&super::Frame) -> bool>(cb: F) { + // SAFETY: Miri guarantees that the backtrace API functions + // can be called from any thread. + unsafe { trace_unsynchronized(cb) }; +} + +pub fn resolve_addr(ptr: *mut c_void) -> Frame { + // SAFETY: Miri will stop execution with an error if this pointer + // is invalid. + let frame = unsafe { miri_resolve_frame(ptr.cast::<()>(), 1) }; + + let mut name = Vec::with_capacity(frame.name_len); + let mut filename = Vec::with_capacity(frame.filename_len); + + // SAFETY: name and filename have been allocated with the amount + // of memory miri has asked for, and miri guarantees it will initialize it + unsafe { + miri_resolve_frame_names( + ptr.cast::<()>(), + 0, + name.as_mut_ptr(), + filename.as_mut_ptr(), + ); + + name.set_len(frame.name_len); + filename.set_len(frame.filename_len); + } + + Frame { + addr: ptr, + inner: FullMiriFrame { + name: name.into(), + filename: filename.into(), + lineno: frame.lineno, + colno: frame.colno, + fn_ptr: frame.fn_ptr, + }, + } +} + +unsafe fn trace_unsynchronized<F: FnMut(&super::Frame) -> bool>(mut cb: F) { + let len = unsafe { miri_backtrace_size(0) }; + + let mut frames = Vec::with_capacity(len); + + unsafe { + miri_get_backtrace(1, frames.as_mut_ptr()); + + frames.set_len(len); + } + + for ptr in frames.iter() { + let frame = resolve_addr((*ptr).cast::<c_void>()); + if !cb(&super::Frame { inner: frame }) { + return; + } + } +} diff --git a/vendor/backtrace/src/backtrace/mod.rs b/vendor/backtrace/src/backtrace/mod.rs new file mode 100644 index 00000000..2a36214b --- /dev/null +++ b/vendor/backtrace/src/backtrace/mod.rs @@ -0,0 +1,206 @@ +use core::ffi::c_void; +use core::fmt; + +/// Inspects the current call-stack, passing all active frames into the closure +/// provided to calculate a stack trace. +/// +/// This function is the workhorse of this library in calculating the stack +/// traces for a program. The given closure `cb` is yielded instances of a +/// `Frame` which represent information about that call frame on the stack. The +/// closure is yielded frames in a top-down fashion (most recently called +/// functions first). +/// +/// The closure's return value is an indication of whether the backtrace should +/// continue. A return value of `false` will terminate the backtrace and return +/// immediately. +/// +/// Once a `Frame` is acquired you will likely want to call `backtrace::resolve` +/// to convert the `ip` (instruction pointer) or symbol address to a `Symbol` +/// through which the name and/or filename/line number can be learned. +/// +/// Note that this is a relatively low-level function and if you'd like to, for +/// example, capture a backtrace to be inspected later, then the `Backtrace` +/// type may be more appropriate. +/// +/// # Required features +/// +/// This function requires the `std` feature of the `backtrace` crate to be +/// enabled, and the `std` feature is enabled by default. +/// +/// # Panics +/// +/// This function strives to never panic, but if the `cb` provided panics then +/// some platforms will force a double panic to abort the process. Some +/// platforms use a C library which internally uses callbacks which cannot be +/// unwound through, so panicking from `cb` may trigger a process abort. +/// +/// # Example +/// +/// ``` +/// extern crate backtrace; +/// +/// fn main() { +/// backtrace::trace(|frame| { +/// // ... +/// +/// true // continue the backtrace +/// }); +/// } +/// ``` +#[cfg(feature = "std")] +pub fn trace<F: FnMut(&Frame) -> bool>(cb: F) { + let _guard = crate::lock::lock(); + unsafe { trace_unsynchronized(cb) } +} + +/// Same as `trace`, only unsafe as it's unsynchronized. +/// +/// This function does not have synchronization guarantees but is available +/// when the `std` feature of this crate isn't compiled in. See the `trace` +/// function for more documentation and examples. +/// +/// # Panics +/// +/// See information on `trace` for caveats on `cb` panicking. +pub unsafe fn trace_unsynchronized<F: FnMut(&Frame) -> bool>(mut cb: F) { + unsafe { trace_imp(&mut cb) } +} + +/// A trait representing one frame of a backtrace, yielded to the `trace` +/// function of this crate. +/// +/// The tracing function's closure will be yielded frames, and the frame is +/// virtually dispatched as the underlying implementation is not always known +/// until runtime. +#[derive(Clone)] +pub struct Frame { + pub(crate) inner: FrameImp, +} + +impl Frame { + /// Returns the current instruction pointer of this frame. + /// + /// This is normally the next instruction to execute in the frame, but not + /// all implementations list this with 100% accuracy (but it's generally + /// pretty close). + /// + /// It is recommended to pass this value to `backtrace::resolve` to turn it + /// into a symbol name. + pub fn ip(&self) -> *mut c_void { + self.inner.ip() + } + + /// Returns the current stack pointer of this frame. + /// + /// In the case that a backend cannot recover the stack pointer for this + /// frame, a null pointer is returned. + pub fn sp(&self) -> *mut c_void { + self.inner.sp() + } + + /// Returns the starting symbol address of the frame of this function. + /// + /// This will attempt to rewind the instruction pointer returned by `ip` to + /// the start of the function, returning that value. In some cases, however, + /// backends will just return `ip` from this function. + /// + /// The returned value can sometimes be used if `backtrace::resolve` failed + /// on the `ip` given above. + pub fn symbol_address(&self) -> *mut c_void { + self.inner.symbol_address() + } + + /// Returns the base address of the module to which the frame belongs. + pub fn module_base_address(&self) -> Option<*mut c_void> { + self.inner.module_base_address() + } +} + +impl fmt::Debug for Frame { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Frame") + .field("ip", &self.ip()) + .field("symbol_address", &self.symbol_address()) + .finish() + } +} + +#[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] +mod sgx_image_base { + + #[cfg(not(feature = "std"))] + pub(crate) mod imp { + use core::ffi::c_void; + use core::sync::atomic::{AtomicUsize, Ordering::SeqCst}; + + static IMAGE_BASE: AtomicUsize = AtomicUsize::new(0); + + /// Set the image base address. This is only available for Fortanix SGX + /// target when the `std` feature is not enabled. This can be used in the + /// standard library to set the correct base address. + #[doc(hidden)] + pub fn set_image_base(base_addr: *mut c_void) { + IMAGE_BASE.store(base_addr as _, SeqCst); + } + + pub(crate) fn get_image_base() -> *mut c_void { + IMAGE_BASE.load(SeqCst) as _ + } + } + + #[cfg(feature = "std")] + mod imp { + use core::ffi::c_void; + + pub(crate) fn get_image_base() -> *mut c_void { + std::os::fortanix_sgx::mem::image_base() as _ + } + } + + pub(crate) use imp::get_image_base; +} + +#[cfg(all(target_env = "sgx", target_vendor = "fortanix", not(feature = "std")))] +pub use sgx_image_base::imp::set_image_base; + +cfg_if::cfg_if! { + // This needs to come first, to ensure that + // Miri takes priority over the host platform + if #[cfg(miri)] { + pub(crate) mod miri; + use self::miri::trace as trace_imp; + pub(crate) use self::miri::Frame as FrameImp; + } else if #[cfg( + any( + all( + unix, + not(target_os = "emscripten"), + not(all(target_os = "ios", target_arch = "arm")), + ), + all( + target_env = "sgx", + target_vendor = "fortanix", + ), + ) + )] { + mod libunwind; + use self::libunwind::trace as trace_imp; + pub(crate) use self::libunwind::Frame as FrameImp; + } else if #[cfg(all(windows, not(target_vendor = "uwp")))] { + cfg_if::cfg_if! { + if #[cfg(any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "arm64ec"))] { + mod win64; + use self::win64::trace as trace_imp; + pub(crate) use self::win64::Frame as FrameImp; + } else if #[cfg(any(target_arch = "x86", target_arch = "arm"))] { + mod win32; + use self::win32::trace as trace_imp; + pub(crate) use self::win32::Frame as FrameImp; + } + } + } else { + mod noop; + use self::noop::trace as trace_imp; + pub(crate) use self::noop::Frame as FrameImp; + } +} diff --git a/vendor/backtrace/src/backtrace/noop.rs b/vendor/backtrace/src/backtrace/noop.rs new file mode 100644 index 00000000..98dbcfad --- /dev/null +++ b/vendor/backtrace/src/backtrace/noop.rs @@ -0,0 +1,32 @@ +//! Empty implementation of unwinding used when no other implementation is +//! appropriate. + +use core::ffi::c_void; +use core::ptr::null_mut; + +// SAFETY: This function is safe to call. It is only marked as `unsafe` to +// avoid having to allow `unused_unsafe` since other implementations are +// unsafe. +#[inline(always)] +pub unsafe fn trace(_cb: &mut dyn FnMut(&super::Frame) -> bool) {} + +#[derive(Clone)] +pub struct Frame; + +impl Frame { + pub fn ip(&self) -> *mut c_void { + null_mut() + } + + pub fn sp(&self) -> *mut c_void { + null_mut() + } + + pub fn symbol_address(&self) -> *mut c_void { + null_mut() + } + + pub fn module_base_address(&self) -> Option<*mut c_void> { + None + } +} diff --git a/vendor/backtrace/src/backtrace/win32.rs b/vendor/backtrace/src/backtrace/win32.rs new file mode 100644 index 00000000..9c459306 --- /dev/null +++ b/vendor/backtrace/src/backtrace/win32.rs @@ -0,0 +1,214 @@ +//! Backtrace strategy for Windows platforms. +//! +//! This module contains the ability to generate a backtrace on Windows using one +//! of two possible methods. The `StackWalkEx` function is primarily used if +//! possible, but not all systems have that. Failing that the `StackWalk64` +//! function is used instead. Note that `StackWalkEx` is favored because it +//! handles debuginfo internally and returns inline frame information. +//! +//! Note that all dbghelp support is loaded dynamically, see `src/dbghelp.rs` +//! for more information about that. + +use super::super::{dbghelp, windows_sys::*}; +use core::ffi::c_void; +use core::mem; + +#[derive(Clone, Copy)] +pub enum StackFrame { + New(STACKFRAME_EX), + Old(STACKFRAME64), +} + +#[derive(Clone, Copy)] +pub struct Frame { + pub(crate) stack_frame: StackFrame, + base_address: *mut c_void, +} + +// we're just sending around raw pointers and reading them, never interpreting +// them so this should be safe to both send and share across threads. +unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} + +impl Frame { + pub fn ip(&self) -> *mut c_void { + self.addr_pc().Offset as *mut _ + } + + pub fn sp(&self) -> *mut c_void { + self.addr_stack().Offset as *mut _ + } + + pub fn symbol_address(&self) -> *mut c_void { + self.ip() + } + + pub fn module_base_address(&self) -> Option<*mut c_void> { + Some(self.base_address) + } + + #[cfg(not(target_env = "gnu"))] + pub fn inline_context(&self) -> Option<u32> { + match self.stack_frame { + StackFrame::New(ref new) => Some(new.InlineFrameContext), + StackFrame::Old(_) => None, + } + } + + fn addr_pc(&self) -> &ADDRESS64 { + match self.stack_frame { + StackFrame::New(ref new) => &new.AddrPC, + StackFrame::Old(ref old) => &old.AddrPC, + } + } + + fn addr_pc_mut(&mut self) -> &mut ADDRESS64 { + match self.stack_frame { + StackFrame::New(ref mut new) => &mut new.AddrPC, + StackFrame::Old(ref mut old) => &mut old.AddrPC, + } + } + + fn addr_frame_mut(&mut self) -> &mut ADDRESS64 { + match self.stack_frame { + StackFrame::New(ref mut new) => &mut new.AddrFrame, + StackFrame::Old(ref mut old) => &mut old.AddrFrame, + } + } + + fn addr_stack(&self) -> &ADDRESS64 { + match self.stack_frame { + StackFrame::New(ref new) => &new.AddrStack, + StackFrame::Old(ref old) => &old.AddrStack, + } + } + + fn addr_stack_mut(&mut self) -> &mut ADDRESS64 { + match self.stack_frame { + StackFrame::New(ref mut new) => &mut new.AddrStack, + StackFrame::Old(ref mut old) => &mut old.AddrStack, + } + } +} + +#[repr(C, align(16))] // required by `CONTEXT`, is a FIXME in windows metadata right now +struct MyContext(CONTEXT); + +#[inline(always)] +pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) { + // Allocate necessary structures for doing the stack walk + let process = GetCurrentProcess(); + let thread = GetCurrentThread(); + + let mut context = mem::zeroed::<MyContext>(); + RtlCaptureContext(&mut context.0); + + // Ensure this process's symbols are initialized + let dbghelp = match dbghelp::init() { + Ok(dbghelp) => dbghelp, + Err(()) => return, // oh well... + }; + + let function_table_access = dbghelp.SymFunctionTableAccess64(); + let get_module_base = dbghelp.SymGetModuleBase64(); + + let process_handle = GetCurrentProcess(); + + // Attempt to use `StackWalkEx` if we can, but fall back to `StackWalk64` + // since it's in theory supported on more systems. + match (*dbghelp.dbghelp()).StackWalkEx() { + #[allow(non_snake_case)] + Some(StackWalkEx) => { + let mut inner: STACKFRAME_EX = mem::zeroed(); + inner.StackFrameSize = mem::size_of::<STACKFRAME_EX>() as u32; + let mut frame = super::Frame { + inner: Frame { + stack_frame: StackFrame::New(inner), + base_address: 0 as _, + }, + }; + let image = init_frame(&mut frame.inner, &context.0); + let frame_ptr = match &mut frame.inner.stack_frame { + StackFrame::New(ptr) => ptr as *mut STACKFRAME_EX, + _ => unreachable!(), + }; + + while StackWalkEx( + image as u32, + process, + thread, + frame_ptr, + &mut context.0 as *mut CONTEXT as *mut _, + None, + Some(function_table_access), + Some(get_module_base), + None, + 0, + ) == TRUE + { + frame.inner.base_address = get_module_base(process_handle, frame.ip() as _) as _; + + if !cb(&frame) { + break; + } + } + } + None => { + let mut frame = super::Frame { + inner: Frame { + stack_frame: StackFrame::Old(mem::zeroed()), + base_address: 0 as _, + }, + }; + let image = init_frame(&mut frame.inner, &context.0); + let frame_ptr = match &mut frame.inner.stack_frame { + StackFrame::Old(ptr) => ptr as *mut STACKFRAME64, + _ => unreachable!(), + }; + + while dbghelp.StackWalk64()( + image as u32, + process, + thread, + frame_ptr, + &mut context.0 as *mut CONTEXT as *mut _, + None, + Some(function_table_access), + Some(get_module_base), + None, + ) == TRUE + { + frame.inner.base_address = get_module_base(process_handle, frame.ip() as _) as _; + + if !cb(&frame) { + break; + } + } + } + } +} + +#[cfg(target_arch = "x86")] +fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> u16 { + frame.addr_pc_mut().Offset = ctx.Eip as u64; + frame.addr_pc_mut().Mode = AddrModeFlat; + frame.addr_stack_mut().Offset = ctx.Esp as u64; + frame.addr_stack_mut().Mode = AddrModeFlat; + frame.addr_frame_mut().Offset = ctx.Ebp as u64; + frame.addr_frame_mut().Mode = AddrModeFlat; + + IMAGE_FILE_MACHINE_I386 +} + +#[cfg(target_arch = "arm")] +fn init_frame(frame: &mut Frame, ctx: &CONTEXT) -> u16 { + frame.addr_pc_mut().Offset = ctx.Pc as u64; + frame.addr_pc_mut().Mode = AddrModeFlat; + frame.addr_stack_mut().Offset = ctx.Sp as u64; + frame.addr_stack_mut().Mode = AddrModeFlat; + unsafe { + frame.addr_frame_mut().Offset = ctx.R11 as u64; + } + frame.addr_frame_mut().Mode = AddrModeFlat; + IMAGE_FILE_MACHINE_ARMNT +} diff --git a/vendor/backtrace/src/backtrace/win64.rs b/vendor/backtrace/src/backtrace/win64.rs new file mode 100644 index 00000000..81f63585 --- /dev/null +++ b/vendor/backtrace/src/backtrace/win64.rs @@ -0,0 +1,151 @@ +//! Backtrace strategy for Windows `x86_64` and `aarch64` platforms. +//! +//! This module contains the ability to capture a backtrace on Windows using +//! `RtlVirtualUnwind` to walk the stack one frame at a time. This function is much faster than using +//! `dbghelp!StackWalk*` because it does not load debug info to report inlined frames. +//! We still report inlined frames during symbolization by consulting the appropriate +//! `dbghelp` functions. + +use super::super::windows_sys::*; +use core::ffi::c_void; + +#[derive(Clone, Copy)] +pub struct Frame { + base_address: *mut c_void, + ip: *mut c_void, + sp: *mut c_void, + #[cfg(not(target_env = "gnu"))] + inline_context: Option<u32>, +} + +// we're just sending around raw pointers and reading them, never interpreting +// them so this should be safe to both send and share across threads. +unsafe impl Send for Frame {} +unsafe impl Sync for Frame {} + +impl Frame { + pub fn ip(&self) -> *mut c_void { + self.ip + } + + pub fn sp(&self) -> *mut c_void { + self.sp + } + + pub fn symbol_address(&self) -> *mut c_void { + self.ip + } + + pub fn module_base_address(&self) -> Option<*mut c_void> { + Some(self.base_address) + } + + #[cfg(not(target_env = "gnu"))] + pub fn inline_context(&self) -> Option<u32> { + self.inline_context + } +} + +#[repr(C, align(16))] // required by `CONTEXT`, is a FIXME in windows metadata right now +struct MyContext(CONTEXT); + +#[cfg(any(target_arch = "x86_64", target_arch = "arm64ec"))] +impl MyContext { + #[inline(always)] + fn ip(&self) -> u64 { + self.0.Rip + } + + #[inline(always)] + fn sp(&self) -> u64 { + self.0.Rsp + } +} + +#[cfg(target_arch = "aarch64")] +impl MyContext { + #[inline(always)] + fn ip(&self) -> usize { + self.0.Pc as usize + } + + #[inline(always)] + fn sp(&self) -> usize { + self.0.Sp as usize + } +} + +#[inline(always)] +pub unsafe fn trace(cb: &mut dyn FnMut(&super::Frame) -> bool) { + use core::ptr; + + // Capture the initial context to start walking from. + // FIXME: shouldn't this have a Default impl? + let mut context = unsafe { core::mem::zeroed::<MyContext>() }; + unsafe { RtlCaptureContext(&mut context.0) }; + + loop { + let ip = context.ip(); + + // The base address of the module containing the function will be stored here + // when RtlLookupFunctionEntry returns successfully. + let mut base = 0; + // We use the `RtlLookupFunctionEntry` function in kernel32 which allows + // us to backtrace through JIT frames. + // Note that `RtlLookupFunctionEntry` only works for in-process backtraces, + // but that's all we support anyway, so it all lines up well. + let fn_entry = unsafe { RtlLookupFunctionEntry(ip, &mut base, ptr::null_mut()) }; + if fn_entry.is_null() { + // No function entry could be found - this may indicate a corrupt + // stack or that a binary was unloaded (amongst other issues). Stop + // walking and don't call the callback as we can't be confident in + // this frame or the rest of the stack. + break; + } + + let frame = super::Frame { + inner: Frame { + base_address: base as *mut c_void, + ip: ip as *mut c_void, + sp: context.sp() as *mut c_void, + #[cfg(not(target_env = "gnu"))] + inline_context: None, + }, + }; + + // We've loaded all the info about the current frame, so now call the + // callback. + if !cb(&frame) { + // Callback told us to stop, so we're done. + break; + } + + // Unwind to the next frame. + let previous_ip = ip; + let previous_sp = context.sp(); + let mut handler_data = 0usize; + let mut establisher_frame = 0; + unsafe { + RtlVirtualUnwind( + 0, + base, + ip, + fn_entry, + &mut context.0, + ptr::addr_of_mut!(handler_data).cast::<*mut c_void>(), + &mut establisher_frame, + ptr::null_mut(), + ); + } + + // RtlVirtualUnwind indicates the end of the stack in two different ways: + // * On x64, it sets the instruction pointer to 0. + // * On ARM64, it leaves the context unchanged (easiest way to check is + // to see if the instruction and stack pointers are the same). + // If we detect either of these, then unwinding is completed. + let ip = context.ip(); + if ip == 0 || (ip == previous_ip && context.sp() == previous_sp) { + break; + } + } +} diff --git a/vendor/backtrace/src/capture.rs b/vendor/backtrace/src/capture.rs new file mode 100644 index 00000000..eb672593 --- /dev/null +++ b/vendor/backtrace/src/capture.rs @@ -0,0 +1,621 @@ +#![allow(clippy::from_over_into)] + +#[cfg(feature = "serde")] +use crate::resolve; +use crate::PrintFmt; +use crate::{resolve_frame, trace, BacktraceFmt, Symbol, SymbolName}; +use core::ffi::c_void; +use std::fmt; +use std::path::{Path, PathBuf}; +use std::prelude::v1::*; + +#[cfg(feature = "serde")] +use serde::{Deserialize, Serialize}; + +/// Representation of an owned and self-contained backtrace. +/// +/// This structure can be used to capture a backtrace at various points in a +/// program and later used to inspect what the backtrace was at that time. +/// +/// `Backtrace` supports pretty-printing of backtraces through its `Debug` +/// implementation. +/// +/// # Required features +/// +/// This function requires the `std` feature of the `backtrace` crate to be +/// enabled, and the `std` feature is enabled by default. +#[derive(Clone)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +pub struct Backtrace { + // Frames here are listed from top-to-bottom of the stack + frames: Box<[BacktraceFrame]>, +} + +#[derive(Clone, Copy)] +struct TracePtr(*mut c_void); +/// SAFETY: These pointers are always valid within a process and are not used for mutation. +unsafe impl Send for TracePtr {} +/// SAFETY: These pointers are always valid within a process and are not used for mutation. +unsafe impl Sync for TracePtr {} + +impl TracePtr { + fn into_void(self) -> *mut c_void { + self.0 + } + #[cfg(feature = "serde")] + fn from_addr(addr: usize) -> Self { + TracePtr(addr as *mut c_void) + } +} + +#[cfg(feature = "serde")] +impl<'de> Deserialize<'de> for TracePtr { + #[inline] + fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> + where + D: serde::Deserializer<'de>, + { + struct PrimitiveVisitor; + + impl<'de> serde::de::Visitor<'de> for PrimitiveVisitor { + type Value = TracePtr; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("usize") + } + + #[inline] + fn visit_u8<E>(self, v: u8) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + Ok(TracePtr(v as usize as *mut c_void)) + } + + #[inline] + fn visit_u16<E>(self, v: u16) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + Ok(TracePtr(v as usize as *mut c_void)) + } + + #[inline] + fn visit_u32<E>(self, v: u32) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + if usize::BITS >= 32 { + Ok(TracePtr(v as usize as *mut c_void)) + } else { + Err(E::invalid_type( + serde::de::Unexpected::Unsigned(v as _), + &self, + )) + } + } + + #[inline] + fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E> + where + E: serde::de::Error, + { + if usize::BITS >= 64 { + Ok(TracePtr(v as usize as *mut c_void)) + } else { + Err(E::invalid_type( + serde::de::Unexpected::Unsigned(v as _), + &self, + )) + } + } + } + + deserializer.deserialize_u64(PrimitiveVisitor) + } +} + +#[cfg(feature = "serde")] +impl Serialize for TracePtr { + #[inline] + fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> + where + S: serde::ser::Serializer, + { + serializer.serialize_u64(self.0 as usize as u64) + } +} + +fn _assert_send_sync() { + fn _assert<T: Send + Sync>() {} + _assert::<Backtrace>(); +} + +/// Captured version of a frame in a backtrace. +/// +/// This type is returned as a list from `Backtrace::frames` and represents one +/// stack frame in a captured backtrace. +/// +/// # Required features +/// +/// This function requires the `std` feature of the `backtrace` crate to be +/// enabled, and the `std` feature is enabled by default. +#[derive(Clone)] +pub struct BacktraceFrame { + frame: Frame, + symbols: Option<Box<[BacktraceSymbol]>>, +} + +#[derive(Clone)] +enum Frame { + Raw(crate::Frame), + #[cfg(feature = "serde")] + Deserialized { + ip: TracePtr, + symbol_address: TracePtr, + module_base_address: Option<TracePtr>, + }, +} + +impl Frame { + fn ip(&self) -> *mut c_void { + match *self { + Frame::Raw(ref f) => f.ip(), + #[cfg(feature = "serde")] + Frame::Deserialized { ip, .. } => ip.into_void(), + } + } + + fn symbol_address(&self) -> *mut c_void { + match *self { + Frame::Raw(ref f) => f.symbol_address(), + #[cfg(feature = "serde")] + Frame::Deserialized { symbol_address, .. } => symbol_address.into_void(), + } + } + + fn module_base_address(&self) -> Option<*mut c_void> { + match *self { + Frame::Raw(ref f) => f.module_base_address(), + #[cfg(feature = "serde")] + Frame::Deserialized { + module_base_address, + .. + } => module_base_address.map(|addr| addr.into_void()), + } + } + + /// Resolve all addresses in the frame to their symbolic names. + fn resolve_symbols(&self) -> Box<[BacktraceSymbol]> { + let mut symbols = Vec::new(); + let sym = |symbol: &Symbol| { + symbols.push(BacktraceSymbol { + name: symbol.name().map(|m| m.as_bytes().into()), + addr: symbol.addr().map(TracePtr), + filename: symbol.filename().map(|m| m.to_owned()), + lineno: symbol.lineno(), + colno: symbol.colno(), + }); + }; + match *self { + Frame::Raw(ref f) => resolve_frame(f, sym), + #[cfg(feature = "serde")] + Frame::Deserialized { ip, .. } => { + resolve(ip.into_void(), sym); + } + } + symbols.into_boxed_slice() + } +} + +/// Captured version of a symbol in a backtrace. +/// +/// This type is returned as a list from `BacktraceFrame::symbols` and +/// represents the metadata for a symbol in a backtrace. +/// +/// # Required features +/// +/// This function requires the `std` feature of the `backtrace` crate to be +/// enabled, and the `std` feature is enabled by default. +#[derive(Clone)] +#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))] +pub struct BacktraceSymbol { + name: Option<Box<[u8]>>, + addr: Option<TracePtr>, + filename: Option<PathBuf>, + lineno: Option<u32>, + colno: Option<u32>, +} + +impl Backtrace { + /// Captures a backtrace at the callsite of this function, returning an + /// owned representation. + /// + /// This function is useful for representing a backtrace as an object in + /// Rust. This returned value can be sent across threads and printed + /// elsewhere, and the purpose of this value is to be entirely self + /// contained. + /// + /// Note that on some platforms acquiring a full backtrace and resolving it + /// can be extremely expensive. If the cost is too much for your application + /// it's recommended to instead use `Backtrace::new_unresolved()` which + /// avoids the symbol resolution step (which typically takes the longest) + /// and allows deferring that to a later date. + /// + /// # Examples + /// + /// ``` + /// use backtrace::Backtrace; + /// + /// let current_backtrace = Backtrace::new(); + /// ``` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + #[inline(never)] // want to make sure there's a frame here to remove + pub fn new() -> Backtrace { + let mut bt = Self::create(Self::new as usize); + bt.resolve(); + bt + } + + /// Similar to `new` except that this does not resolve any symbols, this + /// simply captures the backtrace as a list of addresses. + /// + /// At a later time the `resolve` function can be called to resolve this + /// backtrace's symbols into readable names. This function exists because + /// the resolution process can sometimes take a significant amount of time + /// whereas any one backtrace may only be rarely printed. + /// + /// # Examples + /// + /// ``` + /// use backtrace::Backtrace; + /// + /// let mut current_backtrace = Backtrace::new_unresolved(); + /// println!("{current_backtrace:?}"); // no symbol names + /// current_backtrace.resolve(); + /// println!("{current_backtrace:?}"); // symbol names now present + /// ``` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + #[inline(never)] // want to make sure there's a frame here to remove + pub fn new_unresolved() -> Backtrace { + Self::create(Self::new_unresolved as usize) + } + + fn create(ip: usize) -> Backtrace { + let mut frames = Vec::new(); + trace(|frame| { + frames.push(BacktraceFrame { + frame: Frame::Raw(frame.clone()), + symbols: None, + }); + + // clear inner frames, and start with call site. + if frame.symbol_address() as usize == ip { + frames.clear(); + } + + true + }); + frames.shrink_to_fit(); + + Backtrace { + frames: frames.into_boxed_slice(), + } + } + + /// Returns the frames from when this backtrace was captured. + /// + /// The first entry of this slice is likely the function `Backtrace::new`, + /// and the last frame is likely something about how this thread or the main + /// function started. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn frames(&self) -> &[BacktraceFrame] { + self.frames.as_ref() + } + + /// If this backtrace was created from `new_unresolved` then this function + /// will resolve all addresses in the backtrace to their symbolic names. + /// + /// If this backtrace has been previously resolved or was created through + /// `new`, this function does nothing. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn resolve(&mut self) { + self.frames.iter_mut().for_each(BacktraceFrame::resolve); + } +} + +impl From<Vec<BacktraceFrame>> for Backtrace { + fn from(frames: Vec<BacktraceFrame>) -> Self { + Backtrace { + frames: frames.into_boxed_slice(), + } + } +} + +impl From<crate::Frame> for BacktraceFrame { + fn from(frame: crate::Frame) -> Self { + BacktraceFrame { + frame: Frame::Raw(frame), + symbols: None, + } + } +} + +// we don't want to implement `impl From<Backtrace> for Vec<BacktraceFrame>` on purpose, +// because "... additional directions for Vec<T> can weaken type inference ..." +// more information on https://github.com/rust-lang/backtrace-rs/pull/526 +impl Into<Vec<BacktraceFrame>> for Backtrace { + fn into(self) -> Vec<BacktraceFrame> { + self.frames.into_vec() + } +} + +impl BacktraceFrame { + /// Same as `Frame::ip` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn ip(&self) -> *mut c_void { + self.frame.ip() + } + + /// Same as `Frame::symbol_address` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn symbol_address(&self) -> *mut c_void { + self.frame.symbol_address() + } + + /// Same as `Frame::module_base_address` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn module_base_address(&self) -> Option<*mut c_void> { + self.frame.module_base_address() + } + + /// Returns the list of symbols that this frame corresponds to. + /// + /// Normally there is only one symbol per frame, but sometimes if a number + /// of functions are inlined into one frame then multiple symbols will be + /// returned. The first symbol listed is the "innermost function", whereas + /// the last symbol is the outermost (last caller). + /// + /// Note that if this frame came from an unresolved backtrace then this will + /// return an empty list. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn symbols(&self) -> &[BacktraceSymbol] { + self.symbols.as_ref().map(|s| &s[..]).unwrap_or(&[]) + } + + /// Resolve all addresses in this frame to their symbolic names. + /// + /// If this frame has been previously resolved, this function does nothing. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn resolve(&mut self) { + if self.symbols.is_none() { + self.symbols = Some(self.frame.resolve_symbols()); + } + } +} + +impl BacktraceSymbol { + /// Same as `Symbol::name` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn name(&self) -> Option<SymbolName<'_>> { + self.name.as_ref().map(|s| SymbolName::new(s)) + } + + /// Same as `Symbol::addr` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn addr(&self) -> Option<*mut c_void> { + self.addr.map(|s| s.into_void()) + } + + /// Same as `Symbol::filename` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn filename(&self) -> Option<&Path> { + self.filename.as_deref() + } + + /// Same as `Symbol::lineno` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn lineno(&self) -> Option<u32> { + self.lineno + } + + /// Same as `Symbol::colno` + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn colno(&self) -> Option<u32> { + self.colno + } +} + +impl fmt::Debug for Backtrace { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + let style = if fmt.alternate() { + PrintFmt::Full + } else { + PrintFmt::Short + }; + + // When printing paths we try to strip the cwd if it exists, otherwise + // we just print the path as-is. Note that we also only do this for the + // short format, because if it's full we presumably want to print + // everything. + let cwd = std::env::current_dir(); + let mut print_path = + move |fmt: &mut fmt::Formatter<'_>, path: crate::BytesOrWideString<'_>| { + let path = path.into_path_buf(); + if style == PrintFmt::Full { + if let Ok(cwd) = &cwd { + if let Ok(suffix) = path.strip_prefix(cwd) { + return fmt::Display::fmt(&suffix.display(), fmt); + } + } + } + fmt::Display::fmt(&path.display(), fmt) + }; + + let mut f = BacktraceFmt::new(fmt, style, &mut print_path); + f.add_context()?; + for frame in &self.frames { + f.frame().backtrace_frame(frame)?; + } + f.finish()?; + Ok(()) + } +} + +impl Default for Backtrace { + fn default() -> Backtrace { + Backtrace::new() + } +} + +impl fmt::Debug for BacktraceFrame { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("BacktraceFrame") + .field("ip", &self.ip()) + .field("symbol_address", &self.symbol_address()) + .finish() + } +} + +impl fmt::Debug for BacktraceSymbol { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("BacktraceSymbol") + .field("name", &self.name()) + .field("addr", &self.addr()) + .field("filename", &self.filename()) + .field("lineno", &self.lineno()) + .field("colno", &self.colno()) + .finish() + } +} + +#[cfg(feature = "serde")] +mod serde_impls { + use super::*; + use serde::de::Deserializer; + use serde::ser::Serializer; + use serde::{Deserialize, Serialize}; + + #[derive(Serialize, Deserialize)] + struct SerializedFrame { + ip: usize, + symbol_address: usize, + module_base_address: Option<usize>, + symbols: Option<Box<[BacktraceSymbol]>>, + } + + impl Serialize for BacktraceFrame { + fn serialize<S>(&self, s: S) -> Result<S::Ok, S::Error> + where + S: Serializer, + { + let BacktraceFrame { frame, symbols } = self; + SerializedFrame { + ip: frame.ip() as usize, + symbol_address: frame.symbol_address() as usize, + module_base_address: frame.module_base_address().map(|sym_a| sym_a as usize), + symbols: symbols.clone(), + } + .serialize(s) + } + } + + impl<'a> Deserialize<'a> for BacktraceFrame { + fn deserialize<D>(d: D) -> Result<Self, D::Error> + where + D: Deserializer<'a>, + { + let frame: SerializedFrame = SerializedFrame::deserialize(d)?; + Ok(BacktraceFrame { + frame: Frame::Deserialized { + ip: TracePtr::from_addr(frame.ip), + symbol_address: TracePtr::from_addr(frame.symbol_address), + module_base_address: frame.module_base_address.map(TracePtr::from_addr), + }, + symbols: frame.symbols, + }) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_frame_conversion() { + let mut frames = vec![]; + crate::trace(|frame| { + let converted = BacktraceFrame::from(frame.clone()); + frames.push(converted); + true + }); + + let mut manual = Backtrace::from(frames); + manual.resolve(); + let frames = manual.frames(); + + for frame in frames { + println!("{:?}", frame.ip()); + println!("{:?}", frame.symbol_address()); + println!("{:?}", frame.module_base_address()); + println!("{:?}", frame.symbols()); + } + } +} diff --git a/vendor/backtrace/src/dbghelp.rs b/vendor/backtrace/src/dbghelp.rs new file mode 100644 index 00000000..df31fefb --- /dev/null +++ b/vendor/backtrace/src/dbghelp.rs @@ -0,0 +1,487 @@ +//! A module to assist in managing dbghelp bindings on Windows +//! +//! Backtraces on Windows (at least for MSVC) are largely powered through +//! `dbghelp.dll` and the various functions that it contains. These functions +//! are currently loaded *dynamically* rather than linking to `dbghelp.dll` +//! statically. This is currently done by the standard library (and is in theory +//! required there), but is an effort to help reduce the static dll dependencies +//! of a library since backtraces are typically pretty optional. That being +//! said, `dbghelp.dll` almost always successfully loads on Windows. +//! +//! Note though that since we're loading all this support dynamically we can't +//! actually use the raw definitions in `windows_sys`, but rather we need to define +//! the function pointer types ourselves and use that. We don't really want to +//! be in the business of duplicating auto-generated bindings, so we assert that all bindings match +//! those in `windows_sys.rs`. +//! +//! Finally, you'll note here that the dll for `dbghelp.dll` is never unloaded, +//! and that's currently intentional. The thinking is that we can globally cache +//! it and use it between calls to the API, avoiding expensive loads/unloads. If +//! this is a problem for leak detectors or something like that we can cross the +//! bridge when we get there. + +#![allow(non_snake_case)] + +use alloc::vec::Vec; + +use super::windows_sys::*; +use core::ffi::c_void; +use core::mem; +use core::ptr; +use core::slice; + +// This macro is used to define a `Dbghelp` structure which internally contains +// all the function pointers that we might load. +macro_rules! dbghelp { + (extern "system" { + $(fn $name:ident($($arg:ident: $argty:ty),*) -> $ret: ty;)* + }) => ( + pub struct Dbghelp { + /// The loaded DLL for `dbghelp.dll` + dll: HINSTANCE, + + // Each function pointer for each function we might use + $($name: usize,)* + } + + static mut DBGHELP: Dbghelp = Dbghelp { + // Initially we haven't loaded the DLL + dll: ptr::null_mut(), + // Initially all functions are set to zero to say they need to be + // dynamically loaded. + $($name: 0,)* + }; + + // Convenience typedef for each function type. + $(pub type $name = unsafe extern "system" fn($($argty),*) -> $ret;)* + + impl Dbghelp { + /// Attempts to open `dbghelp.dll`. Returns success if it works or + /// error if `LoadLibraryW` fails. + fn ensure_open(&mut self) -> Result<(), ()> { + if !self.dll.is_null() { + return Ok(()) + } + let lib = b"dbghelp.dll\0"; + unsafe { + self.dll = LoadLibraryA(lib.as_ptr()); + if self.dll.is_null() { + Err(()) + } else { + Ok(()) + } + } + } + + // Function for each method we'd like to use. When called it will + // either read the cached function pointer or load it and return the + // loaded value. Loads are asserted to succeed. + $(pub fn $name(&mut self) -> Option<$name> { + // Assert that windows_sys::$name is declared to have the same + // argument types and return type as our declaration, although + // it might be either extern "C" or extern "system". + cfg_if::cfg_if! { + if #[cfg(any(target_arch = "x86", not(windows_raw_dylib)))] { + let _: unsafe extern "system" fn($($argty),*) -> $ret = super::windows_sys::$name; + } else { + let _: unsafe extern "C" fn($($argty),*) -> $ret = super::windows_sys::$name; + } + } + + unsafe { + if self.$name == 0 { + let name = concat!(stringify!($name), "\0"); + self.$name = self.symbol(name.as_bytes())?; + } + Some(mem::transmute::<usize, $name>(self.$name)) + } + })* + + fn symbol(&self, symbol: &[u8]) -> Option<usize> { + unsafe { + GetProcAddress(self.dll, symbol.as_ptr()).map(|address|address as usize) + } + } + } + + // Convenience proxy to use the cleanup locks to reference dbghelp + // functions. + #[allow(dead_code)] + impl Init { + $(pub fn $name(&self) -> $name { + // FIXME: https://github.com/rust-lang/backtrace-rs/issues/678 + #[allow(static_mut_refs)] + unsafe { + DBGHELP.$name().unwrap() + } + })* + + pub fn dbghelp(&self) -> *mut Dbghelp { + #[allow(unused_unsafe)] + unsafe { ptr::addr_of_mut!(DBGHELP) } + } + } + ) + +} + +dbghelp! { + extern "system" { + fn SymGetOptions() -> u32; + fn SymSetOptions(options: u32) -> u32; + fn SymInitializeW( + handle: HANDLE, + path: PCWSTR, + invade: BOOL + ) -> BOOL; + fn SymGetSearchPathW( + hprocess: HANDLE, + searchpatha: PWSTR, + searchpathlength: u32 + ) -> BOOL; + fn SymSetSearchPathW( + hprocess: HANDLE, + searchpatha: PCWSTR + ) -> BOOL; + fn EnumerateLoadedModulesW64( + hprocess: HANDLE, + enumloadedmodulescallback: PENUMLOADED_MODULES_CALLBACKW64, + usercontext: *const c_void + ) -> BOOL; + fn StackWalk64( + MachineType: u32, + hProcess: HANDLE, + hThread: HANDLE, + StackFrame: *mut STACKFRAME64, + ContextRecord: *mut c_void, + ReadMemoryRoutine: PREAD_PROCESS_MEMORY_ROUTINE64, + FunctionTableAccessRoutine: PFUNCTION_TABLE_ACCESS_ROUTINE64, + GetModuleBaseRoutine: PGET_MODULE_BASE_ROUTINE64, + TranslateAddress: PTRANSLATE_ADDRESS_ROUTINE64 + ) -> BOOL; + fn SymFunctionTableAccess64( + hProcess: HANDLE, + AddrBase: u64 + ) -> *mut c_void; + fn SymGetModuleBase64( + hProcess: HANDLE, + AddrBase: u64 + ) -> u64; + fn SymFromAddrW( + hProcess: HANDLE, + Address: u64, + Displacement: *mut u64, + Symbol: *mut SYMBOL_INFOW + ) -> BOOL; + fn SymGetLineFromAddrW64( + hProcess: HANDLE, + dwAddr: u64, + pdwDisplacement: *mut u32, + Line: *mut IMAGEHLP_LINEW64 + ) -> BOOL; + fn StackWalkEx( + MachineType: u32, + hProcess: HANDLE, + hThread: HANDLE, + StackFrame: *mut STACKFRAME_EX, + ContextRecord: *mut c_void, + ReadMemoryRoutine: PREAD_PROCESS_MEMORY_ROUTINE64, + FunctionTableAccessRoutine: PFUNCTION_TABLE_ACCESS_ROUTINE64, + GetModuleBaseRoutine: PGET_MODULE_BASE_ROUTINE64, + TranslateAddress: PTRANSLATE_ADDRESS_ROUTINE64, + Flags: u32 + ) -> BOOL; + fn SymFromInlineContextW( + hProcess: HANDLE, + Address: u64, + InlineContext: u32, + Displacement: *mut u64, + Symbol: *mut SYMBOL_INFOW + ) -> BOOL; + fn SymGetLineFromInlineContextW( + hProcess: HANDLE, + dwAddr: u64, + InlineContext: u32, + qwModuleBaseAddress: u64, + pdwDisplacement: *mut u32, + Line: *mut IMAGEHLP_LINEW64 + ) -> BOOL; + fn SymAddrIncludeInlineTrace( + hProcess: HANDLE, + Address: u64 + ) -> u32; + fn SymQueryInlineTrace( + hProcess: HANDLE, + StartAddress: u64, + StartContext: u32, + StartRetAddress: u64, + CurAddress: u64, + CurContext: *mut u32, + CurFrameIndex: *mut u32 + ) -> BOOL; + } +} + +pub struct Init { + lock: HANDLE, +} + +/// Initialize all support necessary to access `dbghelp` API functions from this +/// crate. +/// +/// Note that this function is **safe**, it internally has its own +/// synchronization. Also note that it is safe to call this function multiple +/// times recursively. +pub fn init() -> Result<Init, ()> { + use core::sync::atomic::{AtomicPtr, Ordering::SeqCst}; + + // Helper function for generating a name that's unique to the process. + fn mutex_name() -> [u8; 33] { + let mut name: [u8; 33] = *b"Local\\RustBacktraceMutex00000000\0"; + let mut id = unsafe { GetCurrentProcessId() }; + // Quick and dirty no alloc u32 to hex. + let mut index = name.len() - 1; + while id > 0 { + name[index - 1] = match (id & 0xF) as u8 { + h @ 0..=9 => b'0' + h, + h => b'A' + (h - 10), + }; + id >>= 4; + index -= 1; + } + name + } + + unsafe { + // First thing we need to do is to synchronize this function. This can + // be called concurrently from other threads or recursively within one + // thread. Note that it's trickier than that though because what we're + // using here, `dbghelp`, *also* needs to be synchronized with all other + // callers to `dbghelp` in this process. + // + // Typically there aren't really that many calls to `dbghelp` within the + // same process and we can probably safely assume that we're the only + // ones accessing it. There is, however, one primary other user we have + // to worry about which is ironically ourselves, but in the standard + // library. The Rust standard library depends on this crate for + // backtrace support, and this crate also exists on crates.io. This + // means that if the standard library is printing a panic backtrace it + // may race with this crate coming from crates.io, causing segfaults. + // + // To help solve this synchronization problem we employ a + // Windows-specific trick here (it is, after all, a Windows-specific + // restriction about synchronization). We create a *session-local* named + // mutex to protect this call. The intention here is that the standard + // library and this crate don't have to share Rust-level APIs to + // synchronize here but can instead work behind the scenes to make sure + // they're synchronizing with one another. That way when this function + // is called through the standard library or through crates.io we can be + // sure that the same mutex is being acquired. + // + // So all of that is to say that the first thing we do here is we + // atomically create a `HANDLE` which is a named mutex on Windows. We + // synchronize a bit with other threads sharing this function + // specifically and ensure that only one handle is created per instance + // of this function. Note that the handle is never closed once it's + // stored in the global. + // + // After we've actually go the lock we simply acquire it, and our `Init` + // handle we hand out will be responsible for dropping it eventually. + static LOCK: AtomicPtr<c_void> = AtomicPtr::new(ptr::null_mut()); + let mut lock = LOCK.load(SeqCst); + if lock.is_null() { + let name = mutex_name(); + lock = CreateMutexA(ptr::null_mut(), FALSE, name.as_ptr()); + if lock.is_null() { + return Err(()); + } + if let Err(other) = LOCK.compare_exchange(ptr::null_mut(), lock, SeqCst, SeqCst) { + debug_assert!(!other.is_null()); + CloseHandle(lock); + lock = other; + } + } + debug_assert!(!lock.is_null()); + let r = WaitForSingleObjectEx(lock, INFINITE, FALSE); + debug_assert_eq!(r, 0); + let ret = Init { lock }; + + // Ok, phew! Now that we're all safely synchronized, let's actually + // start processing everything. First up we need to ensure that + // `dbghelp.dll` is actually loaded in this process. We do this + // dynamically to avoid a static dependency. This has historically been + // done to work around weird linking issues and is intended at making + // binaries a bit more portable since this is largely just a debugging + // utility. + // + // Once we've opened `dbghelp.dll` we need to call some initialization + // functions in it, and that's detailed more below. We only do this + // once, though, so we've got a global boolean indicating whether we're + // done yet or not. + // FIXME: https://github.com/rust-lang/backtrace-rs/issues/678 + #[allow(static_mut_refs)] + DBGHELP.ensure_open()?; + + static mut INITIALIZED: bool = false; + if !INITIALIZED { + set_optional_options(ret.dbghelp()); + INITIALIZED = true; + } + Ok(ret) + } +} +unsafe fn set_optional_options(dbghelp: *mut Dbghelp) -> Option<()> { + unsafe { + let orig = (*dbghelp).SymGetOptions()?(); + + // Ensure that the `SYMOPT_DEFERRED_LOADS` flag is set, because + // according to MSVC's own docs about this: "This is the fastest, most + // efficient way to use the symbol handler.", so let's do that! + (*dbghelp).SymSetOptions()?(orig | SYMOPT_DEFERRED_LOADS); + + // Actually initialize symbols with MSVC. Note that this can fail, but we + // ignore it. There's not a ton of prior art for this per se, but LLVM + // internally seems to ignore the return value here and one of the + // sanitizer libraries in LLVM prints a scary warning if this fails but + // basically ignores it in the long run. + // + // One case this comes up a lot for Rust is that the standard library and + // this crate on crates.io both want to compete for `SymInitializeW`. The + // standard library historically wanted to initialize then cleanup most of + // the time, but now that it's using this crate it means that someone will + // get to initialization first and the other will pick up that + // initialization. + (*dbghelp).SymInitializeW()?(GetCurrentProcess(), ptr::null_mut(), TRUE); + + // The default search path for dbghelp will only look in the current working + // directory and (possibly) `_NT_SYMBOL_PATH` and `_NT_ALT_SYMBOL_PATH`. + // However, we also want to look in the directory of the executable + // and each DLL that is loaded. To do this, we need to update the search path + // to include these directories. + // + // See https://learn.microsoft.com/cpp/build/reference/pdbpath for an + // example of where symbols are usually searched for. + let mut search_path_buf = Vec::new(); + search_path_buf.resize(1024, 0); + + // Prefill the buffer with the current search path. + if (*dbghelp).SymGetSearchPathW()?( + GetCurrentProcess(), + search_path_buf.as_mut_ptr(), + search_path_buf.len() as _, + ) == TRUE + { + // Trim the buffer to the actual length of the string. + let len = lstrlenW(search_path_buf.as_mut_ptr()); + assert!(len >= 0); + search_path_buf.truncate(len as usize); + } else { + // If getting the search path fails, at least include the current directory. + search_path_buf.clear(); + search_path_buf.push(utf16_char('.')); + search_path_buf.push(utf16_char(';')); + } + + let mut search_path = SearchPath::new(search_path_buf); + + // Update the search path to include the directory of the executable and each DLL. + (*dbghelp).EnumerateLoadedModulesW64()?( + GetCurrentProcess(), + Some(enum_loaded_modules_callback), + ((&mut search_path) as *mut SearchPath) as *mut c_void, + ); + + let new_search_path = search_path.finalize(); + + // Set the new search path. + (*dbghelp).SymSetSearchPathW()?(GetCurrentProcess(), new_search_path.as_ptr()); + } + Some(()) +} + +struct SearchPath { + search_path_utf16: Vec<u16>, +} + +fn utf16_char(c: char) -> u16 { + let buf = &mut [0u16; 2]; + let buf = c.encode_utf16(buf); + assert!(buf.len() == 1); + buf[0] +} + +impl SearchPath { + fn new(initial_search_path: Vec<u16>) -> Self { + Self { + search_path_utf16: initial_search_path, + } + } + + /// Add a path to the search path if it is not already present. + fn add(&mut self, path: &[u16]) { + let sep = utf16_char(';'); + + // We could deduplicate in a case-insensitive way, but case-sensitivity + // can be configured by directory on Windows, so let's not do that. + // https://learn.microsoft.com/windows/wsl/case-sensitivity + if !self + .search_path_utf16 + .split(|&c| c == sep) + .any(|p| p == path) + { + if self.search_path_utf16.last() != Some(&sep) { + self.search_path_utf16.push(sep); + } + self.search_path_utf16.extend_from_slice(path); + } + } + + fn finalize(mut self) -> Vec<u16> { + // Add a null terminator. + self.search_path_utf16.push(0); + self.search_path_utf16 + } +} + +extern "system" fn enum_loaded_modules_callback( + module_name: PCWSTR, + _: u64, + _: u32, + user_context: *const c_void, +) -> BOOL { + // `module_name` is an absolute path like `C:\path\to\module.dll` + // or `C:\path\to\module.exe` + let len: usize = unsafe { lstrlenW(module_name).try_into().unwrap() }; + + if len == 0 { + // This should not happen, but if it does, we can just ignore it. + return TRUE; + } + + let module_name = unsafe { slice::from_raw_parts(module_name, len) }; + let path_sep = utf16_char('\\'); + let alt_path_sep = utf16_char('/'); + + let Some(end_of_directory) = module_name + .iter() + .rposition(|&c| c == path_sep || c == alt_path_sep) + else { + // `module_name` being an absolute path, it should always contain at least one + // path separator. If not, there is nothing we can do. + return TRUE; + }; + + let search_path = unsafe { &mut *(user_context as *mut SearchPath) }; + search_path.add(&module_name[..end_of_directory]); + + TRUE +} + +impl Drop for Init { + fn drop(&mut self) { + unsafe { + let r = ReleaseMutex(self.lock); + debug_assert!(r != 0); + } + } +} diff --git a/vendor/backtrace/src/lib.rs b/vendor/backtrace/src/lib.rs new file mode 100644 index 00000000..13ca6b58 --- /dev/null +++ b/vendor/backtrace/src/lib.rs @@ -0,0 +1,251 @@ +//! A library for acquiring a backtrace at runtime +//! +//! This library is meant to supplement the `RUST_BACKTRACE=1` support of the +//! standard library by allowing an acquisition of a backtrace at runtime +//! programmatically. The backtraces generated by this library do not need to be +//! parsed, for example, and expose the functionality of multiple backend +//! implementations. +//! +//! # Usage +//! +//! First, add this to your Cargo.toml +//! +//! ```toml +//! [dependencies] +//! backtrace = "0.3" +//! ``` +//! +//! Next: +//! +//! ``` +//! # // Unsafe here so test passes on no_std. +//! # #[cfg(feature = "std")] { +//! backtrace::trace(|frame| { +//! let ip = frame.ip(); +//! let symbol_address = frame.symbol_address(); +//! +//! // Resolve this instruction pointer to a symbol name +//! backtrace::resolve_frame(frame, |symbol| { +//! if let Some(name) = symbol.name() { +//! // ... +//! } +//! if let Some(filename) = symbol.filename() { +//! // ... +//! } +//! }); +//! +//! true // keep going to the next frame +//! }); +//! # } +//! ``` +//! +//! # Backtrace accuracy +//! +//! This crate implements best-effort attempts to get the native backtrace. This +//! is not always guaranteed to work, and some platforms don't return any +//! backtrace at all. If your application requires accurate backtraces then it's +//! recommended to closely evaluate this crate to see whether it's suitable +//! for your use case on your target platforms. +//! +//! Even on supported platforms, there's a number of reasons that backtraces may +//! be less-than-accurate, including but not limited to: +//! +//! * Unwind information may not be available. This crate primarily implements +//! backtraces by unwinding the stack, but not all functions may have +//! unwinding information (e.g. DWARF unwinding information). +//! +//! * Rust code may be compiled without unwinding information for some +//! functions. This can also happen for Rust code compiled with +//! `-Cpanic=abort`. You can remedy this, however, with +//! `-Cforce-unwind-tables` as a compiler option. +//! +//! * Unwind information may be inaccurate or corrupt. In the worst case +//! inaccurate unwind information can lead this library to segfault. In the +//! best case inaccurate information will result in a truncated stack trace. +//! +//! * Backtraces may not report filenames/line numbers correctly due to missing +//! or corrupt debug information. This won't lead to segfaults unlike corrupt +//! unwinding information, but missing or malformed debug information will +//! mean that filenames and line numbers will not be available. This may be +//! because debug information wasn't generated by the compiler, or it's just +//! missing on the filesystem. +//! +//! * Not all platforms are supported. For example there's no way to get a +//! backtrace on WebAssembly at the moment. +//! +//! * Crate features may be disabled. Currently this crate supports using Gimli +//! libbacktrace on non-Windows platforms for reading debuginfo for +//! backtraces. If both crate features are disabled, however, then these +//! platforms will generate a backtrace but be unable to generate symbols for +//! it. +//! +//! In most standard workflows for most standard platforms you generally don't +//! need to worry about these caveats. We'll try to fix ones where we can over +//! time, but otherwise it's important to be aware of the limitations of +//! unwinding-based backtraces! + +#![deny(missing_docs)] +#![no_std] +#![cfg_attr( + all(feature = "std", target_env = "sgx", target_vendor = "fortanix"), + feature(sgx_platform) +)] +#![warn(rust_2018_idioms)] +// When we're building as part of libstd, silence all warnings since they're +// irrelevant as this crate is developed out-of-tree. +#![cfg_attr(backtrace_in_libstd, allow(warnings))] +#![cfg_attr(not(feature = "std"), allow(dead_code))] + +#[cfg(feature = "std")] +#[macro_use] +extern crate std; + +// This is only used for gimli right now, which is only used on some platforms, and miri +// so don't worry if it's unused in other configurations. +#[allow(unused_extern_crates)] +extern crate alloc; + +pub use self::backtrace::{trace_unsynchronized, Frame}; +mod backtrace; + +pub use self::symbolize::resolve_frame_unsynchronized; +pub use self::symbolize::{resolve_unsynchronized, Symbol, SymbolName}; +mod symbolize; + +pub use self::types::BytesOrWideString; +mod types; + +#[cfg(feature = "std")] +pub use self::symbolize::clear_symbol_cache; + +mod print; +pub use print::{BacktraceFmt, BacktraceFrameFmt, PrintFmt}; + +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + pub use self::backtrace::trace; + pub use self::symbolize::{resolve, resolve_frame}; + pub use self::capture::{Backtrace, BacktraceFrame, BacktraceSymbol}; + mod capture; + } +} + +cfg_if::cfg_if! { + if #[cfg(all(target_env = "sgx", target_vendor = "fortanix", not(feature = "std")))] { + pub use self::backtrace::set_image_base; + } +} + +#[cfg(feature = "std")] +mod lock { + use std::boxed::Box; + use std::cell::Cell; + use std::ptr; + use std::sync::{Mutex, MutexGuard, Once}; + + /// A "Maybe" LockGuard + pub struct LockGuard(Option<MutexGuard<'static, ()>>); + + /// The global lock, lazily allocated on first use + static mut LOCK: *mut Mutex<()> = ptr::null_mut(); + static INIT: Once = Once::new(); + // Whether this thread is the one that holds the lock + thread_local!(static LOCK_HELD: Cell<bool> = const { Cell::new(false) }); + + impl Drop for LockGuard { + fn drop(&mut self) { + // Don't do anything if we're a LockGuard(None) + if self.0.is_some() { + LOCK_HELD.with(|slot| { + // Immediately crash if we somehow aren't the thread holding this lock + assert!(slot.get()); + // We are no longer the thread holding this lock + slot.set(false); + }); + } + // lock implicitly released here, if we're a LockGuard(Some(..)) + } + } + + /// Acquire a partially unsound(!!!) global re-entrant lock over + /// backtrace's internals. + /// + /// That is, this lock can be acquired as many times as you want + /// on a single thread without deadlocking, allowing one thread + /// to acquire exclusive access to the ability to make backtraces. + /// Calls to this locking function are freely sprinkled in every place + /// where that needs to be enforced. + /// + /// + /// # Why + /// + /// This was first introduced to guard uses of Windows' dbghelp API, + /// which isn't threadsafe. It's unclear if other things now rely on + /// this locking. + /// + /// + /// # How + /// + /// The basic idea is to have a single global mutex, and a thread_local + /// boolean saying "yep this is the thread that acquired the mutex". + /// + /// The first time a thread acquires the lock, it is handed a + /// `LockGuard(Some(..))` that will actually release the lock on Drop. + /// All subsequence attempts to lock on the same thread will see + /// that their thread acquired the lock, and get `LockGuard(None)` + /// which will do nothing when dropped. + /// + /// + /// # Safety + /// + /// As long as you only ever assign the returned LockGuard to a freshly + /// declared local variable, it will do its job correctly, as the "first" + /// LockGuard will strictly outlive all subsequent LockGuards and + /// properly release the lock when the thread is done with backtracing. + /// + /// However if you ever attempt to store a LockGuard beyond the scope + /// it was acquired in, it might actually be a `LockGuard(None)` that + /// doesn't actually hold the lock! In this case another thread might + /// acquire the lock and you'll get races this system was intended to + /// avoid! + /// + /// This is why this is "partially unsound". As a public API this would + /// be unacceptable, but this is crate-private, and if you use this in + /// the most obvious and simplistic way it Just Worksâ„¢. + /// + /// Note however that std specifically bypasses this lock, and uses + /// the `*_unsynchronized` backtrace APIs. This is "fine" because + /// it wraps its own calls to backtrace in a non-reentrant Mutex + /// that prevents two backtraces from getting interleaved during printing. + pub fn lock() -> LockGuard { + // If we're the thread holding this lock, pretend to acquire the lock + // again by returning a LockGuard(None) + if LOCK_HELD.with(|l| l.get()) { + return LockGuard(None); + } + // Insist that we totally are the thread holding the lock + // (our thread will block until we are) + LOCK_HELD.with(|s| s.set(true)); + unsafe { + // lazily allocate the lock if necessary + INIT.call_once(|| { + LOCK = Box::into_raw(Box::new(Mutex::new(()))); + }); + // ok *actually* try to acquire the lock, blocking as necessary + LockGuard(Some((*LOCK).lock().unwrap())) + } + } +} + +#[cfg(all( + windows, + any( + target_env = "msvc", + all(target_env = "gnu", any(target_arch = "x86", target_arch = "arm")) + ), + not(target_vendor = "uwp") +))] +mod dbghelp; +// Auto-generated by windows-bindgen/riddle +#[cfg(any(windows, target_os = "cygwin"))] +mod windows_sys; diff --git a/vendor/backtrace/src/print.rs b/vendor/backtrace/src/print.rs new file mode 100644 index 00000000..888840ee --- /dev/null +++ b/vendor/backtrace/src/print.rs @@ -0,0 +1,310 @@ +#[cfg(feature = "std")] +use super::{BacktraceFrame, BacktraceSymbol}; +use super::{BytesOrWideString, Frame, SymbolName}; +use core::ffi::c_void; +use core::fmt; + +const HEX_WIDTH: usize = 2 + 2 * core::mem::size_of::<usize>(); + +#[cfg(target_os = "fuchsia")] +mod fuchsia; + +/// A formatter for backtraces. +/// +/// This type can be used to print a backtrace regardless of where the backtrace +/// itself comes from. If you have a `Backtrace` type then its `Debug` +/// implementation already uses this printing format. +pub struct BacktraceFmt<'a, 'b> { + fmt: &'a mut fmt::Formatter<'b>, + frame_index: usize, + format: PrintFmt, + print_path: + &'a mut (dyn FnMut(&mut fmt::Formatter<'_>, BytesOrWideString<'_>) -> fmt::Result + 'b), +} + +/// The styles of printing that we can print +#[derive(Copy, Clone, Eq, PartialEq)] +#[non_exhaustive] +pub enum PrintFmt { + /// Prints a terser backtrace which ideally only contains relevant information + Short, + /// Prints a backtrace that contains all possible information + Full, +} + +impl<'a, 'b> BacktraceFmt<'a, 'b> { + /// Create a new `BacktraceFmt` which will write output to the provided + /// `fmt`. + /// + /// The `format` argument will control the style in which the backtrace is + /// printed, and the `print_path` argument will be used to print the + /// `BytesOrWideString` instances of filenames. This type itself doesn't do + /// any printing of filenames, but this callback is required to do so. + pub fn new( + fmt: &'a mut fmt::Formatter<'b>, + format: PrintFmt, + print_path: &'a mut (dyn FnMut(&mut fmt::Formatter<'_>, BytesOrWideString<'_>) -> fmt::Result + + 'b), + ) -> Self { + BacktraceFmt { + fmt, + frame_index: 0, + format, + print_path, + } + } + + /// Prints a preamble for the backtrace about to be printed. + /// + /// This is required on some platforms for backtraces to be fully + /// symbolicated later, and otherwise this should just be the first method + /// you call after creating a `BacktraceFmt`. + pub fn add_context(&mut self) -> fmt::Result { + #[cfg(target_os = "fuchsia")] + fuchsia::print_dso_context(self.fmt)?; + Ok(()) + } + + /// Adds a frame to the backtrace output. + /// + /// This commit returns an RAII instance of a `BacktraceFrameFmt` which can be used + /// to actually print a frame, and on destruction it will increment the + /// frame counter. + pub fn frame(&mut self) -> BacktraceFrameFmt<'_, 'a, 'b> { + BacktraceFrameFmt { + fmt: self, + symbol_index: 0, + } + } + + /// Completes the backtrace output. + /// + /// This is currently a no-op but is added for future compatibility with + /// backtrace formats. + pub fn finish(&mut self) -> fmt::Result { + #[cfg(target_os = "fuchsia")] + fuchsia::finish_context(self.fmt)?; + Ok(()) + } + + /// Inserts a message in the backtrace output. + /// + /// This allows information to be inserted between frames, + /// and won't increment the `frame_index` unlike the `frame` + /// method. + pub fn message(&mut self, msg: &str) -> fmt::Result { + self.fmt.write_str(msg) + } + + /// Return the inner formatter. + /// + /// This is used for writing custom information between frames with `write!` and `writeln!`, + /// and won't increment the `frame_index` unlike the `frame` method. + pub fn formatter(&mut self) -> &mut fmt::Formatter<'b> { + self.fmt + } +} + +/// A formatter for just one frame of a backtrace. +/// +/// This type is created by the `BacktraceFmt::frame` function. +pub struct BacktraceFrameFmt<'fmt, 'a, 'b> { + fmt: &'fmt mut BacktraceFmt<'a, 'b>, + symbol_index: usize, +} + +impl BacktraceFrameFmt<'_, '_, '_> { + /// Prints a `BacktraceFrame` with this frame formatter. + /// + /// This will recursively print all `BacktraceSymbol` instances within the + /// `BacktraceFrame`. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + #[cfg(feature = "std")] + pub fn backtrace_frame(&mut self, frame: &BacktraceFrame) -> fmt::Result { + let symbols = frame.symbols(); + for symbol in symbols { + self.backtrace_symbol(frame, symbol)?; + } + if symbols.is_empty() { + self.print_raw(frame.ip(), None, None, None)?; + } + Ok(()) + } + + /// Prints a `BacktraceSymbol` within a `BacktraceFrame`. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + #[cfg(feature = "std")] + pub fn backtrace_symbol( + &mut self, + frame: &BacktraceFrame, + symbol: &BacktraceSymbol, + ) -> fmt::Result { + self.print_raw_with_column( + frame.ip(), + symbol.name(), + // TODO: this isn't great that we don't end up printing anything + // with non-utf8 filenames. Thankfully almost everything is utf8 so + // this shouldn't be too bad. + symbol + .filename() + .and_then(|p| Some(BytesOrWideString::Bytes(p.to_str()?.as_bytes()))), + symbol.lineno(), + symbol.colno(), + )?; + Ok(()) + } + + /// Prints a raw traced `Frame` and `Symbol`, typically from within the raw + /// callbacks of this crate. + pub fn symbol(&mut self, frame: &Frame, symbol: &super::Symbol) -> fmt::Result { + self.print_raw_with_column( + frame.ip(), + symbol.name(), + symbol.filename_raw(), + symbol.lineno(), + symbol.colno(), + )?; + Ok(()) + } + + /// Adds a raw frame to the backtrace output. + /// + /// This method, unlike the previous, takes the raw arguments in case + /// they're being source from different locations. Note that this may be + /// called multiple times for one frame. + pub fn print_raw( + &mut self, + frame_ip: *mut c_void, + symbol_name: Option<SymbolName<'_>>, + filename: Option<BytesOrWideString<'_>>, + lineno: Option<u32>, + ) -> fmt::Result { + self.print_raw_with_column(frame_ip, symbol_name, filename, lineno, None) + } + + /// Adds a raw frame to the backtrace output, including column information. + /// + /// This method, like the previous, takes the raw arguments in case + /// they're being source from different locations. Note that this may be + /// called multiple times for one frame. + pub fn print_raw_with_column( + &mut self, + frame_ip: *mut c_void, + symbol_name: Option<SymbolName<'_>>, + filename: Option<BytesOrWideString<'_>>, + lineno: Option<u32>, + colno: Option<u32>, + ) -> fmt::Result { + // Fuchsia is unable to symbolize within a process so it has a special + // format which can be used to symbolize later. Print that instead of + // printing addresses in our own format here. + if cfg!(target_os = "fuchsia") { + self.print_raw_fuchsia(frame_ip)?; + } else { + self.print_raw_generic(frame_ip, symbol_name, filename, lineno, colno)?; + } + self.symbol_index += 1; + Ok(()) + } + + #[allow(unused_mut)] + fn print_raw_generic( + &mut self, + frame_ip: *mut c_void, + symbol_name: Option<SymbolName<'_>>, + filename: Option<BytesOrWideString<'_>>, + lineno: Option<u32>, + colno: Option<u32>, + ) -> fmt::Result { + // No need to print "null" frames, it basically just means that the + // system backtrace was a bit eager to trace back super far. + if let PrintFmt::Short = self.fmt.format { + if frame_ip.is_null() { + return Ok(()); + } + } + + // Print the index of the frame as well as the optional instruction + // pointer of the frame. If we're beyond the first symbol of this frame + // though we just print appropriate whitespace. + if self.symbol_index == 0 { + write!(self.fmt.fmt, "{:4}: ", self.fmt.frame_index)?; + if let PrintFmt::Full = self.fmt.format { + write!(self.fmt.fmt, "{frame_ip:HEX_WIDTH$?} - ")?; + } + } else { + write!(self.fmt.fmt, " ")?; + if let PrintFmt::Full = self.fmt.format { + write!(self.fmt.fmt, "{:1$}", "", HEX_WIDTH + 3)?; + } + } + + // Next up write out the symbol name, using the alternate formatting for + // more information if we're a full backtrace. Here we also handle + // symbols which don't have a name, + match (symbol_name, &self.fmt.format) { + (Some(name), PrintFmt::Short) => write!(self.fmt.fmt, "{name:#}")?, + (Some(name), PrintFmt::Full) => write!(self.fmt.fmt, "{name}")?, + (None, _) => write!(self.fmt.fmt, "<unknown>")?, + } + self.fmt.fmt.write_str("\n")?; + + // And last up, print out the filename/line number if they're available. + if let (Some(file), Some(line)) = (filename, lineno) { + self.print_fileline(file, line, colno)?; + } + + Ok(()) + } + + fn print_fileline( + &mut self, + file: BytesOrWideString<'_>, + line: u32, + colno: Option<u32>, + ) -> fmt::Result { + // Filename/line are printed on lines under the symbol name, so print + // some appropriate whitespace to sort of right-align ourselves. + if let PrintFmt::Full = self.fmt.format { + write!(self.fmt.fmt, "{:1$}", "", HEX_WIDTH)?; + } + write!(self.fmt.fmt, " at ")?; + + // Delegate to our internal callback to print the filename and then + // print out the line number. + (self.fmt.print_path)(self.fmt.fmt, file)?; + write!(self.fmt.fmt, ":{line}")?; + + // Add column number, if available. + if let Some(colno) = colno { + write!(self.fmt.fmt, ":{colno}")?; + } + + writeln!(self.fmt.fmt)?; + Ok(()) + } + + fn print_raw_fuchsia(&mut self, frame_ip: *mut c_void) -> fmt::Result { + // We only care about the first symbol of a frame + if self.symbol_index == 0 { + self.fmt.fmt.write_str("{{{bt:")?; + write!(self.fmt.fmt, "{}:{:?}", self.fmt.frame_index, frame_ip)?; + self.fmt.fmt.write_str("}}}\n")?; + } + Ok(()) + } +} + +impl Drop for BacktraceFrameFmt<'_, '_, '_> { + fn drop(&mut self) { + self.fmt.frame_index += 1; + } +} diff --git a/vendor/backtrace/src/print/fuchsia.rs b/vendor/backtrace/src/print/fuchsia.rs new file mode 100644 index 00000000..01821fd9 --- /dev/null +++ b/vendor/backtrace/src/print/fuchsia.rs @@ -0,0 +1,441 @@ +use core::fmt::{self, Write}; +use core::mem::{size_of, transmute}; +use core::slice::from_raw_parts; +use libc::c_char; + +unsafe extern "C" { + // dl_iterate_phdr takes a callback that will receive a dl_phdr_info pointer + // for every DSO that has been linked into the process. dl_iterate_phdr also + // ensures that the dynamic linker is locked from start to finish of the + // iteration. If the callback returns a non-zero value the iteration is + // terminated early. 'data' will be passed as the third argument to the + // callback on each call. 'size' gives the size of the dl_phdr_info. + #[allow(improper_ctypes)] + fn dl_iterate_phdr( + f: extern "C" fn(info: &dl_phdr_info, size: usize, data: &mut DsoPrinter<'_, '_>) -> i32, + data: &mut DsoPrinter<'_, '_>, + ) -> i32; +} + +// We need to parse out the build ID and some basic program header data +// which means that we need a bit of stuff from the ELF spec as well. + +const PT_LOAD: u32 = 1; +const PT_NOTE: u32 = 4; + +// Now we have to replicate, bit for bit, the structure of the dl_phdr_info +// type used by fuchsia's current dynamic linker. Chromium also has this ABI +// boundary as well as crashpad. Eventually we'd like to move these cases to +// use elf-search but we'd need to provide that in the SDK and that has not +// yet been done. Thus we (and they) are stuck having to use this method +// which incurs a tight coupling with the fuchsia libc. + +#[allow(non_camel_case_types)] +#[repr(C)] +struct dl_phdr_info { + addr: *const u8, + name: *const c_char, + phdr: *const Elf_Phdr, + phnum: u16, + adds: u64, + subs: u64, + tls_modid: usize, + tls_data: *const u8, +} + +impl dl_phdr_info { + fn program_headers(&self) -> PhdrIter<'_> { + PhdrIter { + phdrs: self.phdr_slice(), + base: self.addr, + } + } + // We have no way of knowing of checking if e_phoff and e_phnum are valid. + // libc should ensure this for us however so it's safe to form a slice here. + fn phdr_slice(&self) -> &[Elf_Phdr] { + unsafe { from_raw_parts(self.phdr, self.phnum as usize) } + } +} + +struct PhdrIter<'a> { + phdrs: &'a [Elf_Phdr], + base: *const u8, +} + +impl<'a> Iterator for PhdrIter<'a> { + type Item = Phdr<'a>; + fn next(&mut self) -> Option<Self::Item> { + self.phdrs.split_first().map(|(phdr, new_phdrs)| { + self.phdrs = new_phdrs; + Phdr { + phdr, + base: self.base, + } + }) + } +} + +// Elf_Phdr represents a 64-bit ELF program header in the endianness of the target +// architecture. +#[allow(non_camel_case_types)] +#[derive(Clone, Debug)] +#[repr(C)] +struct Elf_Phdr { + p_type: u32, + p_flags: u32, + p_offset: u64, + p_vaddr: u64, + p_paddr: u64, + p_filesz: u64, + p_memsz: u64, + p_align: u64, +} + +// Phdr represents a valid ELF program header and its contents. +struct Phdr<'a> { + phdr: &'a Elf_Phdr, + base: *const u8, +} + +impl<'a> Phdr<'a> { + // We have no way of checking if p_addr or p_memsz are valid. Fuchsia's libc + // parses the notes first however so by virtue of being here these headers + // must be valid. NoteIter does not require the underlying data to be valid + // but it does require the bounds to be valid. We trust that libc has ensured + // that this is the case for us here. + fn notes(&self) -> NoteIter<'a> { + unsafe { + NoteIter::new( + self.base.add(self.phdr.p_offset as usize), + self.phdr.p_memsz as usize, + ) + } + } +} + +// The note type for build IDs. +const NT_GNU_BUILD_ID: u32 = 3; + +// Elf_Nhdr represents an ELF note header in the endianness of the target. +#[allow(non_camel_case_types)] +#[repr(C)] +struct Elf_Nhdr { + n_namesz: u32, + n_descsz: u32, + n_type: u32, +} + +// Note represents an ELF note (header + contents). The name is left as a u8 +// slice because it is not always null terminated and rust makes it easy enough +// to check that the bytes match eitherway. +struct Note<'a> { + name: &'a [u8], + desc: &'a [u8], + tipe: u32, +} + +// NoteIter lets you safely iterate over a note segment. It terminates as soon +// as an error occurs or there are no more notes. If you iterate over invalid +// data it will function as though no notes were found. +struct NoteIter<'a> { + base: &'a [u8], + error: bool, +} + +impl<'a> NoteIter<'a> { + // It is an invariant of function that the pointer and size given denote a + // valid range of bytes that can all be read. The contents of these bytes + // can be anything but the range must be valid for this to be safe. + unsafe fn new(base: *const u8, size: usize) -> Self { + NoteIter { + base: unsafe { from_raw_parts(base, size) }, + error: false, + } + } +} + +// align_to aligns 'x' to 'to'-byte alignment assuming 'to' is a power of 2. +// This follows a standard pattern in C/C++ ELF parsing code where +// (x + to - 1) & -to is used. Rust does not let you negate usize so I use +// 2's-complement conversion to recreate that. +fn align_to(x: usize, to: usize) -> usize { + (x + to - 1) & (!to + 1) +} + +// take_bytes_align4 consumes num bytes from the slice (if present) and +// additionally ensures that the final slice is properlly aligned. If an +// either the number of bytes requested is too large or the slice can't be +// realigned afterwards due to not enough remaining bytes existing, None is +// returned and the slice is not modified. +fn take_bytes_align4<'a>(num: usize, bytes: &mut &'a [u8]) -> Option<&'a [u8]> { + if bytes.len() < align_to(num, 4) { + return None; + } + let (out, bytes_new) = bytes.split_at(num); + *bytes = &bytes_new[align_to(num, 4) - num..]; + Some(out) +} + +// This function has no real invariants the caller must uphold other than +// perhaps that 'bytes' should be aligned for performance (and on some +// architectures correctness). The values in the Elf_Nhdr fields might +// be nonsense but this function ensures no such thing. +fn take_nhdr<'a>(bytes: &mut &'a [u8]) -> Option<&'a Elf_Nhdr> { + if size_of::<Elf_Nhdr>() > bytes.len() { + return None; + } + // This is safe as long as there is enough space and we just confirmed that + // in the if statement above so this should not be unsafe. + let out = unsafe { transmute::<*const u8, &'a Elf_Nhdr>(bytes.as_ptr()) }; + // Note that sice_of::<Elf_Nhdr>() is always 4-byte aligned. + *bytes = &bytes[size_of::<Elf_Nhdr>()..]; + Some(out) +} + +impl<'a> Iterator for NoteIter<'a> { + type Item = Note<'a>; + fn next(&mut self) -> Option<Self::Item> { + // Check if we've reached the end. + if self.base.is_empty() || self.error { + return None; + } + // We transmute out an nhdr but we carefully consider the resulting + // struct. We don't trust the namesz or descsz and we make no unsafe + // decisions based on the type. So even if we get out complete garbage + // we should still be safe. + let nhdr = take_nhdr(&mut self.base)?; + let name = take_bytes_align4(nhdr.n_namesz as usize, &mut self.base)?; + let desc = take_bytes_align4(nhdr.n_descsz as usize, &mut self.base)?; + Some(Note { + name: name, + desc: desc, + tipe: nhdr.n_type, + }) + } +} + +struct Perm(u32); + +/// Indicates that a segment is executable. +const PERM_X: u32 = 0b00000001; +/// Indicates that a segment is writable. +const PERM_W: u32 = 0b00000010; +/// Indicates that a segment is readable. +const PERM_R: u32 = 0b00000100; + +impl core::fmt::Display for Perm { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let v = self.0; + if v & PERM_R != 0 { + f.write_char('r')? + } + if v & PERM_W != 0 { + f.write_char('w')? + } + if v & PERM_X != 0 { + f.write_char('x')? + } + Ok(()) + } +} + +/// Represents an ELF segment at runtime. +struct Segment { + /// Gives the runtime virtual address of this segment's contents. + addr: usize, + /// Gives the memory size of this segment's contents. + size: usize, + /// Gives the module virtual address of this segment with the ELF file. + mod_rel_addr: usize, + /// Gives the permissions found in the ELF file. These permissions are not + /// necessarily the permissions present at runtime however. + flags: Perm, +} + +/// Lets one iterate over Segments from a DSO. +struct SegmentIter<'a> { + phdrs: &'a [Elf_Phdr], + base: usize, +} + +impl Iterator for SegmentIter<'_> { + type Item = Segment; + + fn next(&mut self) -> Option<Self::Item> { + self.phdrs.split_first().and_then(|(phdr, new_phdrs)| { + self.phdrs = new_phdrs; + if phdr.p_type != PT_LOAD { + self.next() + } else { + Some(Segment { + addr: phdr.p_vaddr as usize + self.base, + size: phdr.p_memsz as usize, + mod_rel_addr: phdr.p_vaddr as usize, + flags: Perm(phdr.p_flags), + }) + } + }) + } +} + +/// Represents an ELF DSO (Dynamic Shared Object). This type references +/// the data stored in the actual DSO rather than making its own copy. +struct Dso<'a> { + /// The dynamic linker always gives us a name, even if the name is empty. + /// In the case of the main executable this name will be empty. In the case + /// of a shared object it will be the soname (see DT_SONAME). + name: &'a str, + /// On Fuchsia virtually all binaries have build IDs but this is not a strict + /// requirement. There's no way to match up DSO information with a real ELF + /// file afterwards if there is no build_id so we require that every DSO + /// have one here. DSO's without a build_id are ignored. + build_id: &'a [u8], + + base: usize, + phdrs: &'a [Elf_Phdr], +} + +impl Dso<'_> { + /// Returns an iterator over Segments in this DSO. + fn segments(&self) -> SegmentIter<'_> { + SegmentIter { + phdrs: self.phdrs.as_ref(), + base: self.base, + } + } +} + +struct HexSlice<'a> { + bytes: &'a [u8], +} + +impl fmt::Display for HexSlice<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for byte in self.bytes { + write!(f, "{byte:02x}")?; + } + Ok(()) + } +} + +fn get_build_id<'a>(info: &'a dl_phdr_info) -> Option<&'a [u8]> { + for phdr in info.program_headers() { + if phdr.phdr.p_type == PT_NOTE { + for note in phdr.notes() { + if note.tipe == NT_GNU_BUILD_ID && (note.name == b"GNU\0" || note.name == b"GNU") { + return Some(note.desc); + } + } + } + } + None +} + +/// These errors encode issues that arise while parsing information about +/// each DSO. +enum Error { + /// NameError means that an error occurred while converting a C style string + /// into a rust string. + NameError, + /// BuildIDError means that we didn't find a build ID. This could either be + /// because the DSO had no build ID or because the segment containing the + /// build ID was malformed. + BuildIDError, +} + +/// Calls either 'dso' or 'error' for each DSO linked into the process by the +/// dynamic linker. +/// +/// # Arguments +/// +/// * `visitor` - A DsoPrinter that will have one of eats methods called foreach DSO. +fn for_each_dso(mut visitor: &mut DsoPrinter<'_, '_>) { + extern "C" fn callback( + info: &dl_phdr_info, + _size: usize, + visitor: &mut DsoPrinter<'_, '_>, + ) -> i32 { + // dl_iterate_phdr ensures that info.name will point to a valid + // location. + let name_len = unsafe { libc::strlen(info.name) }; + let name_slice: &[u8] = + unsafe { core::slice::from_raw_parts(info.name.cast::<u8>(), name_len) }; + let name = match core::str::from_utf8(name_slice) { + Ok(name) => name, + Err(_) => { + return visitor.error(Error::NameError) as i32; + } + }; + let build_id = match get_build_id(info) { + Some(build_id) => build_id, + None => { + return visitor.error(Error::BuildIDError) as i32; + } + }; + visitor.dso(Dso { + name: name, + build_id: build_id, + phdrs: info.phdr_slice(), + base: info.addr as usize, + }) as i32 + } + unsafe { dl_iterate_phdr(callback, &mut visitor) }; +} + +struct DsoPrinter<'a, 'b> { + writer: &'a mut core::fmt::Formatter<'b>, + module_count: usize, + error: core::fmt::Result, +} + +impl DsoPrinter<'_, '_> { + fn dso(&mut self, dso: Dso<'_>) -> bool { + let mut write = || { + write!( + self.writer, + "{{{{{{module:{:#x}:{}:elf:{}}}}}}}\n", + self.module_count, + dso.name, + HexSlice { + bytes: dso.build_id.as_ref() + } + )?; + for seg in dso.segments() { + write!( + self.writer, + "{{{{{{mmap:{:#x}:{:#x}:load:{:#x}:{}:{:#x}}}}}}}\n", + seg.addr, seg.size, self.module_count, seg.flags, seg.mod_rel_addr + )?; + } + self.module_count += 1; + Ok(()) + }; + match write() { + Ok(()) => false, + Err(err) => { + self.error = Err(err); + true + } + } + } + fn error(&mut self, _error: Error) -> bool { + false + } +} + +/// This function prints the Fuchsia symbolizer markup for all information contained in a DSO. +pub fn print_dso_context(out: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + out.write_str("{{{reset:begin}}}\n")?; + let mut visitor = DsoPrinter { + writer: out, + module_count: 0, + error: Ok(()), + }; + for_each_dso(&mut visitor); + visitor.error +} + +/// This function prints the Fuchsia symbolizer markup to end the backtrace. +pub fn finish_context(out: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + out.write_str("{{{reset:end}}}\n") +} diff --git a/vendor/backtrace/src/symbolize/dbghelp.rs b/vendor/backtrace/src/symbolize/dbghelp.rs new file mode 100644 index 00000000..d3b688f8 --- /dev/null +++ b/vendor/backtrace/src/symbolize/dbghelp.rs @@ -0,0 +1,317 @@ +//! Symbolication strategy using `dbghelp.dll` on Windows, only used for MSVC +//! +//! This symbolication strategy, like with backtraces, uses dynamically loaded +//! information from `dbghelp.dll`. (see `src/dbghelp.rs` for info about why +//! it's dynamically loaded). +//! +//! This API selects its resolution strategy based on the frame provided or the +//! information we have at hand. If a frame from `StackWalkEx` is given to us +//! then we use similar APIs to generate correct information about inlined +//! functions. Otherwise if all we have is an address or an older stack frame +//! from `StackWalk64` we use the older APIs for symbolication. +//! +//! There's a good deal of support in this module, but a good chunk of it is +//! converting back and forth between Windows types and Rust types. For example +//! symbols come to us as wide strings which we then convert to utf-8 strings if +//! we can. + +#![allow(bad_style)] + +use super::super::{dbghelp, windows_sys::*}; +use super::{BytesOrWideString, ResolveWhat, SymbolName}; +use core::ffi::c_void; +use core::marker; +use core::mem; +use core::ptr; +use core::slice; + +// FIXME: replace with ptr::from_ref once MSRV is high enough +#[inline(always)] +#[must_use] +const fn ptr_from_ref<T: ?Sized>(r: &T) -> *const T { + r +} + +// Store an OsString on std so we can provide the symbol name and filename. +pub struct Symbol<'a> { + name: *const [u8], + addr: *mut c_void, + line: Option<u32>, + filename: Option<*const [u16]>, + #[cfg(feature = "std")] + _filename_cache: Option<::std::ffi::OsString>, + #[cfg(not(feature = "std"))] + _filename_cache: (), + _marker: marker::PhantomData<&'a i32>, +} + +impl Symbol<'_> { + pub fn name(&self) -> Option<SymbolName<'_>> { + Some(SymbolName::new(unsafe { &*self.name })) + } + + pub fn addr(&self) -> Option<*mut c_void> { + Some(self.addr) + } + + pub fn filename_raw(&self) -> Option<BytesOrWideString<'_>> { + self.filename + .map(|slice| unsafe { BytesOrWideString::Wide(&*slice) }) + } + + pub fn colno(&self) -> Option<u32> { + None + } + + pub fn lineno(&self) -> Option<u32> { + self.line + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { + use std::path::Path; + + self._filename_cache.as_ref().map(Path::new) + } +} + +#[repr(C, align(8))] +struct Aligned8<T>(T); + +#[cfg(not(target_vendor = "win7"))] +pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) { + // Ensure this process's symbols are initialized + let dbghelp = match dbghelp::init() { + Ok(dbghelp) => dbghelp, + Err(()) => return, // oh well... + }; + unsafe { + match what { + ResolveWhat::Address(_) => { + resolve_with_inline(&dbghelp, what.address_or_ip(), None, cb) + } + ResolveWhat::Frame(frame) => { + resolve_with_inline(&dbghelp, frame.ip(), frame.inner.inline_context(), cb) + } + }; + } +} + +#[cfg(target_vendor = "win7")] +pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) { + // Ensure this process's symbols are initialized + let dbghelp = match dbghelp::init() { + Ok(dbghelp) => dbghelp, + Err(()) => return, // oh well... + }; + + unsafe { + let resolve_inner = if (*dbghelp.dbghelp()).SymAddrIncludeInlineTrace().is_some() { + // We are on a version of dbghelp 6.2+, which contains the more modern + // Inline APIs. + resolve_with_inline + } else { + // We are on an older version of dbghelp which doesn't contain the Inline + // APIs. + resolve_legacy + }; + match what { + ResolveWhat::Address(_) => resolve_inner(&dbghelp, what.address_or_ip(), None, cb), + ResolveWhat::Frame(frame) => { + resolve_inner(&dbghelp, frame.ip(), frame.inner.inline_context(), cb) + } + }; + } +} + +/// Resolve the address using the legacy dbghelp API. +/// +/// This should work all the way down to Windows XP. The inline context is +/// ignored, since this concept was only introduced in dbghelp 6.2+. +#[cfg(target_vendor = "win7")] +unsafe fn resolve_legacy( + dbghelp: &dbghelp::Init, + addr: *mut c_void, + _inline_context: Option<u32>, + cb: &mut dyn FnMut(&super::Symbol), +) -> Option<()> { + let addr = super::adjust_ip(addr) as u64; + unsafe { + do_resolve( + |info| dbghelp.SymFromAddrW()(GetCurrentProcess(), addr, &mut 0, info), + |line| dbghelp.SymGetLineFromAddrW64()(GetCurrentProcess(), addr, &mut 0, line), + cb, + ); + } + Some(()) +} + +/// Resolve the address using the modern dbghelp APIs. +/// +/// Note that calling this function requires having dbghelp 6.2+ loaded - and +/// will panic otherwise. +unsafe fn resolve_with_inline( + dbghelp: &dbghelp::Init, + addr: *mut c_void, + inline_context: Option<u32>, + cb: &mut dyn FnMut(&super::Symbol), +) -> Option<()> { + unsafe { + let current_process = GetCurrentProcess(); + // Ensure we have the functions we need. Return if any aren't found. + let SymFromInlineContextW = (*dbghelp.dbghelp()).SymFromInlineContextW()?; + let SymGetLineFromInlineContextW = (*dbghelp.dbghelp()).SymGetLineFromInlineContextW()?; + + let addr = super::adjust_ip(addr) as u64; + + let (inlined_frame_count, inline_context) = if let Some(ic) = inline_context { + (0, ic) + } else { + let SymAddrIncludeInlineTrace = (*dbghelp.dbghelp()).SymAddrIncludeInlineTrace()?; + let SymQueryInlineTrace = (*dbghelp.dbghelp()).SymQueryInlineTrace()?; + + let mut inlined_frame_count = SymAddrIncludeInlineTrace(current_process, addr); + + let mut inline_context = 0; + + // If there is are inlined frames but we can't load them for some reason OR if there are no + // inlined frames, then we disregard inlined_frame_count and inline_context. + if (inlined_frame_count > 0 + && SymQueryInlineTrace( + current_process, + addr, + 0, + addr, + addr, + &mut inline_context, + &mut 0, + ) != TRUE) + || inlined_frame_count == 0 + { + inlined_frame_count = 0; + inline_context = 0; + } + + (inlined_frame_count, inline_context) + }; + + let last_inline_context = inline_context + 1 + inlined_frame_count; + + for inline_context in inline_context..last_inline_context { + do_resolve( + |info| SymFromInlineContextW(current_process, addr, inline_context, &mut 0, info), + |line| { + SymGetLineFromInlineContextW( + current_process, + addr, + inline_context, + 0, + &mut 0, + line, + ) + }, + cb, + ); + } + } + Some(()) +} + +unsafe fn do_resolve( + sym_from_addr: impl FnOnce(*mut SYMBOL_INFOW) -> BOOL, + get_line_from_addr: impl FnOnce(&mut IMAGEHLP_LINEW64) -> BOOL, + cb: &mut dyn FnMut(&super::Symbol), +) { + const SIZE: usize = 2 * MAX_SYM_NAME as usize + mem::size_of::<SYMBOL_INFOW>(); + let mut data = Aligned8([0u8; SIZE]); + let info = unsafe { &mut *data.0.as_mut_ptr().cast::<SYMBOL_INFOW>() }; + info.MaxNameLen = MAX_SYM_NAME as u32; + // the struct size in C. the value is different to + // `size_of::<SYMBOL_INFOW>() - MAX_SYM_NAME + 1` (== 81) + // due to struct alignment. + info.SizeOfStruct = 88; + + if sym_from_addr(info) != TRUE { + return; + } + + // If the symbol name is greater than MaxNameLen, SymFromAddrW will + // give a buffer of (MaxNameLen - 1) characters and set NameLen to + // the real value. + let name_len = ::core::cmp::min(info.NameLen as usize, info.MaxNameLen as usize - 1); + let name_ptr = info.Name.as_ptr().cast::<u16>(); + + // Reencode the utf-16 symbol to utf-8 so we can use `SymbolName::new` like + // all other platforms + let mut name_buffer = [0_u8; 256]; + let mut name_len = unsafe { + WideCharToMultiByte( + CP_UTF8, + 0, + name_ptr, + name_len as i32, + name_buffer.as_mut_ptr(), + name_buffer.len() as i32, + core::ptr::null_mut(), + core::ptr::null_mut(), + ) as usize + }; + if name_len == 0 { + // If the returned length is zero that means the buffer wasn't big enough. + // However, the buffer will be filled with as much as will fit. + name_len = name_buffer.len(); + } else if name_len > name_buffer.len() { + // This can't happen. + return; + } + let name = ptr::addr_of!(name_buffer[..name_len]); + + let mut line = IMAGEHLP_LINEW64 { + SizeOfStruct: 0, + Key: core::ptr::null_mut(), + LineNumber: 0, + FileName: core::ptr::null_mut(), + Address: 0, + }; + line.SizeOfStruct = mem::size_of::<IMAGEHLP_LINEW64>() as u32; + + let mut filename = None; + let mut lineno = None; + if get_line_from_addr(&mut line) == TRUE { + lineno = Some(line.LineNumber); + + let base = line.FileName; + let mut len = 0; + while unsafe { *base.offset(len) != 0 } { + len += 1; + } + + let len = len as usize; + + unsafe { + filename = Some(ptr_from_ref(slice::from_raw_parts(base, len))); + } + } + + cb(&super::Symbol { + inner: Symbol { + name, + addr: info.Address as *mut _, + line: lineno, + filename, + _filename_cache: unsafe { cache(filename) }, + _marker: marker::PhantomData, + }, + }) +} + +#[cfg(feature = "std")] +unsafe fn cache(filename: Option<*const [u16]>) -> Option<::std::ffi::OsString> { + use std::os::windows::ffi::OsStringExt; + unsafe { filename.map(|f| ::std::ffi::OsString::from_wide(&*f)) } +} + +#[cfg(not(feature = "std"))] +unsafe fn cache(_filename: Option<*const [u16]>) {} + +pub unsafe fn clear_symbol_cache() {} diff --git a/vendor/backtrace/src/symbolize/gimli.rs b/vendor/backtrace/src/symbolize/gimli.rs new file mode 100644 index 00000000..19756c17 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli.rs @@ -0,0 +1,563 @@ +//! Support for symbolication using the `gimli` crate on crates.io +//! +//! This is the default symbolication implementation for Rust. + +use self::gimli::read::EndianSlice; +use self::gimli::NativeEndian as Endian; +use self::mmap::Mmap; +use self::stash::Stash; +use super::BytesOrWideString; +use super::ResolveWhat; +use super::SymbolName; +use addr2line::gimli; +use core::convert::TryInto; +use core::mem; +use libc::c_void; +use mystd::ffi::OsString; +use mystd::fs::File; +use mystd::path::Path; +use mystd::prelude::v1::*; + +#[cfg(backtrace_in_libstd)] +mod mystd { + pub use crate::*; +} +#[cfg(not(backtrace_in_libstd))] +extern crate std as mystd; + +cfg_if::cfg_if! { + if #[cfg(windows)] { + #[path = "gimli/mmap_windows.rs"] + mod mmap; + } else if #[cfg(target_vendor = "apple")] { + #[path = "gimli/mmap_unix.rs"] + mod mmap; + } else if #[cfg(any( + target_os = "android", + target_os = "freebsd", + target_os = "fuchsia", + target_os = "haiku", + target_os = "hurd", + target_os = "linux", + target_os = "openbsd", + target_os = "solaris", + target_os = "illumos", + target_os = "aix", + target_os = "cygwin", + ))] { + #[path = "gimli/mmap_unix.rs"] + mod mmap; + } else { + #[path = "gimli/mmap_fake.rs"] + mod mmap; + } +} + +mod lru; +mod stash; + +use lru::Lru; + +const MAPPINGS_CACHE_SIZE: usize = 4; + +struct Mapping { + // 'static lifetime is a lie to hack around lack of support for self-referential structs. + cx: Context<'static>, + _map: Mmap, + stash: Stash, +} + +enum Either<A, B> { + #[allow(dead_code)] + A(A), + B(B), +} + +impl Mapping { + /// Creates a `Mapping` by ensuring that the `data` specified is used to + /// create a `Context` and it can only borrow from that or the `Stash` of + /// decompressed sections or auxiliary data. + fn mk<F>(data: Mmap, mk: F) -> Option<Mapping> + where + F: for<'a> FnOnce(&'a [u8], &'a Stash) -> Option<Context<'a>>, + { + Mapping::mk_or_other(data, move |data, stash| { + let cx = mk(data, stash)?; + Some(Either::B(cx)) + }) + } + + /// Creates a `Mapping` from `data`, or if the closure decides to, returns a + /// different mapping. + fn mk_or_other<F>(data: Mmap, mk: F) -> Option<Mapping> + where + F: for<'a> FnOnce(&'a [u8], &'a Stash) -> Option<Either<Mapping, Context<'a>>>, + { + let stash = Stash::new(); + let cx = match mk(&data, &stash)? { + Either::A(mapping) => return Some(mapping), + Either::B(cx) => cx, + }; + Some(Mapping { + // Convert to 'static lifetimes since the symbols should + // only borrow `map` and `stash` and we're preserving them below. + cx: unsafe { core::mem::transmute::<Context<'_>, Context<'static>>(cx) }, + _map: data, + stash, + }) + } +} + +struct Context<'a> { + dwarf: addr2line::Context<EndianSlice<'a, Endian>>, + object: Object<'a>, + package: Option<gimli::DwarfPackage<EndianSlice<'a, Endian>>>, +} + +impl<'data> Context<'data> { + fn new( + stash: &'data Stash, + object: Object<'data>, + sup: Option<Object<'data>>, + dwp: Option<Object<'data>>, + ) -> Option<Context<'data>> { + let mut sections = gimli::Dwarf::load(|id| -> Result<_, ()> { + if cfg!(not(target_os = "aix")) { + let data = object.section(stash, id.name()).unwrap_or(&[]); + Ok(EndianSlice::new(data, Endian)) + } else if let Some(name) = id.xcoff_name() { + let data = object.section(stash, name).unwrap_or(&[]); + Ok(EndianSlice::new(data, Endian)) + } else { + Ok(EndianSlice::new(&[], Endian)) + } + }) + .ok()?; + + if let Some(sup) = sup { + sections + .load_sup(|id| -> Result<_, ()> { + let data = sup.section(stash, id.name()).unwrap_or(&[]); + Ok(EndianSlice::new(data, Endian)) + }) + .ok()?; + } + let dwarf = addr2line::Context::from_dwarf(sections).ok()?; + + let mut package = None; + if let Some(dwp) = dwp { + package = Some( + gimli::DwarfPackage::load( + |id| -> Result<_, gimli::Error> { + let data = id + .dwo_name() + .and_then(|name| dwp.section(stash, name)) + .unwrap_or(&[]); + Ok(EndianSlice::new(data, Endian)) + }, + EndianSlice::new(&[], Endian), + ) + .ok()?, + ); + } + + Some(Context { + dwarf, + object, + package, + }) + } + + fn find_frames( + &'_ self, + stash: &'data Stash, + probe: u64, + ) -> gimli::Result<addr2line::FrameIter<'_, EndianSlice<'data, Endian>>> { + use addr2line::{LookupContinuation, LookupResult}; + + let mut l = self.dwarf.find_frames(probe); + loop { + let (load, continuation) = match l { + LookupResult::Output(output) => break output, + LookupResult::Load { load, continuation } => (load, continuation), + }; + + l = continuation.resume(handle_split_dwarf(self.package.as_ref(), stash, load)); + } + } +} + +fn mmap(path: &Path) -> Option<Mmap> { + let file = File::open(path).ok()?; + let len = file.metadata().ok()?.len().try_into().ok()?; + unsafe { Mmap::map(&file, len, 0) } +} + +cfg_if::cfg_if! { + if #[cfg(any(windows, target_os = "cygwin"))] { + mod coff; + use self::coff::{handle_split_dwarf, Object}; + } else if #[cfg(any(target_vendor = "apple"))] { + mod macho; + use self::macho::{handle_split_dwarf, Object}; + } else if #[cfg(target_os = "aix")] { + mod xcoff; + use self::xcoff::{handle_split_dwarf, Object}; + } else { + mod elf; + use self::elf::{handle_split_dwarf, Object}; + } +} + +cfg_if::cfg_if! { + if #[cfg(any(windows, target_os = "cygwin"))] { + mod libs_windows; + use libs_windows::native_libraries; + } else if #[cfg(target_vendor = "apple")] { + mod libs_macos; + use libs_macos::native_libraries; + } else if #[cfg(target_os = "illumos")] { + mod libs_illumos; + use libs_illumos::native_libraries; + } else if #[cfg(all( + any( + target_os = "linux", + target_os = "fuchsia", + target_os = "freebsd", + target_os = "hurd", + target_os = "openbsd", + target_os = "netbsd", + target_os = "nto", + target_os = "android", + ), + not(target_env = "uclibc"), + ))] { + mod libs_dl_iterate_phdr; + use libs_dl_iterate_phdr::native_libraries; + #[path = "gimli/parse_running_mmaps_unix.rs"] + mod parse_running_mmaps; + } else if #[cfg(target_env = "libnx")] { + mod libs_libnx; + use libs_libnx::native_libraries; + } else if #[cfg(target_os = "haiku")] { + mod libs_haiku; + use libs_haiku::native_libraries; + } else if #[cfg(target_os = "aix")] { + mod libs_aix; + use libs_aix::native_libraries; + } else { + // Everything else should doesn't know how to load native libraries. + fn native_libraries() -> Vec<Library> { + Vec::new() + } + } +} + +#[derive(Default)] +struct Cache { + /// All known shared libraries that have been loaded. + libraries: Vec<Library>, + + /// Mappings cache where we retain parsed dwarf information. + /// + /// This list has a fixed capacity for its entire lifetime which never + /// increases. The `usize` element of each pair is an index into `libraries` + /// above where `usize::max_value()` represents the current executable. The + /// `Mapping` is corresponding parsed dwarf information. + /// + /// Note that this is basically an LRU cache and we'll be shifting things + /// around in here as we symbolize addresses. + mappings: Lru<(usize, Mapping), MAPPINGS_CACHE_SIZE>, +} + +struct Library { + name: OsString, + #[cfg(target_os = "android")] + /// On Android, the dynamic linker [can map libraries directly from a + /// ZIP archive][ndk-linker-changes] (typically an `.apk`). + /// + /// The linker requires that these libraries are stored uncompressed + /// and page-aligned. + /// + /// These "embedded" libraries have filepaths of the form + /// `/path/to/my.apk!/lib/mylib.so` (where `/path/to/my.apk` is the archive + /// and `lib/mylib.so` is the name of the library within the archive). + /// + /// This mechanism is present on Android since API level 23. + /// + /// [ndk-linker-changes]: https://android.googlesource.com/platform/bionic/+/main/android-changes-for-ndk-developers.md#opening-shared-libraries-directly-from-an-apk + zip_offset: Option<u64>, + #[cfg(target_os = "aix")] + /// On AIX, the library mmapped can be a member of a big-archive file. + /// For example, with a big-archive named libfoo.a containing libbar.so, + /// one can use `dlopen("libfoo.a(libbar.so)", RTLD_MEMBER | RTLD_LAZY)` + /// to use the `libbar.so` library. In this case, only `libbar.so` is + /// mmapped, not the whole `libfoo.a`. + member_name: OsString, + /// Segments of this library loaded into memory, and where they're loaded. + segments: Vec<LibrarySegment>, + /// The "bias" of this library, typically where it's loaded into memory. + /// This value is added to each segment's stated address to get the actual + /// virtual memory address that the segment is loaded into. Additionally + /// this bias is subtracted from real virtual memory addresses to index into + /// debuginfo and the symbol table. + bias: usize, +} + +struct LibrarySegment { + /// The stated address of this segment in the object file. This is not + /// actually where the segment is loaded, but rather this address plus the + /// containing library's `bias` is where to find it. + stated_virtual_memory_address: usize, + /// The size of this segment in memory. + len: usize, +} + +fn create_mapping(lib: &Library) -> Option<Mapping> { + cfg_if::cfg_if! { + if #[cfg(target_os = "aix")] { + Mapping::new(lib.name.as_ref(), &lib.member_name) + } else if #[cfg(target_os = "android")] { + Mapping::new_android(lib.name.as_ref(), lib.zip_offset) + } else { + Mapping::new(lib.name.as_ref()) + } + } +} + +/// Try to extract the archive path from an "embedded" library path +/// (e.g. `/path/to/my.apk` from `/path/to/my.apk!/mylib.so`). +/// +/// Returns `None` if the path does not contain a `!/` separator. +#[cfg(target_os = "android")] +fn extract_zip_path_android(path: &mystd::ffi::OsStr) -> Option<&mystd::ffi::OsStr> { + use mystd::os::unix::ffi::OsStrExt; + + path.as_bytes() + .windows(2) + .enumerate() + .find(|(_, chunk)| chunk == b"!/") + .map(|(index, _)| mystd::ffi::OsStr::from_bytes(path.as_bytes().split_at(index).0)) +} + +// unsafe because this is required to be externally synchronized +pub unsafe fn clear_symbol_cache() { + unsafe { + Cache::with_global(|cache| cache.mappings.clear()); + } +} + +impl Cache { + fn new() -> Cache { + Cache { + mappings: Lru::default(), + libraries: native_libraries(), + } + } + + // unsafe because this is required to be externally synchronized + unsafe fn with_global(f: impl FnOnce(&mut Self)) { + // A very small, very simple LRU cache for debug info mappings. + // + // The hit rate should be very high, since the typical stack doesn't cross + // between many shared libraries. + // + // The `addr2line::Context` structures are pretty expensive to create. Its + // cost is expected to be amortized by subsequent `locate` queries, which + // leverage the structures built when constructing `addr2line::Context`s to + // get nice speedups. If we didn't have this cache, that amortization would + // never happen, and symbolicating backtraces would be ssssllllooooowwww. + static mut MAPPINGS_CACHE: Option<Cache> = None; + + unsafe { + // FIXME: https://github.com/rust-lang/backtrace-rs/issues/678 + #[allow(static_mut_refs)] + f(MAPPINGS_CACHE.get_or_insert_with(Cache::new)) + } + } + + fn avma_to_svma(&self, addr: *const u8) -> Option<(usize, *const u8)> { + self.libraries + .iter() + .enumerate() + .filter_map(|(i, lib)| { + // First up, test if this `lib` has any segment containing the + // `addr` (handling relocation). If this check passes then we + // can continue below and actually translate the address. + // + // Note that we're using `wrapping_add` here to avoid overflow + // checks. It's been seen in the wild that the SVMA + bias + // computation overflows. It seems a bit odd that would happen + // but there's not a huge amount we can do about it other than + // probably just ignore those segments since they're likely + // pointing off into space. This originally came up in + // rust-lang/backtrace-rs#329. + if !lib.segments.iter().any(|s| { + let svma = s.stated_virtual_memory_address; + let start = svma.wrapping_add(lib.bias); + let end = start.wrapping_add(s.len); + let address = addr as usize; + start <= address && address < end + }) { + return None; + } + + // Now that we know `lib` contains `addr`, we can offset with + // the bias to find the stated virtual memory address. + let svma = (addr as usize).wrapping_sub(lib.bias); + Some((i, svma as *const u8)) + }) + .next() + } + + fn mapping_for_lib<'a>(&'a mut self, lib: usize) -> Option<(&'a mut Context<'a>, &'a Stash)> { + let cache_idx = self.mappings.iter().position(|(lib_id, _)| *lib_id == lib); + + let cache_entry = if let Some(idx) = cache_idx { + self.mappings.move_to_front(idx) + } else { + // When the mapping is not in the cache, create a new mapping and insert it, + // which will also evict the oldest entry. + create_mapping(&self.libraries[lib]) + .and_then(|mapping| self.mappings.push_front((lib, mapping))) + }; + + let (_, mapping) = cache_entry?; + let cx: &'a mut Context<'static> = &mut mapping.cx; + let stash: &'a Stash = &mapping.stash; + // don't leak the `'static` lifetime, make sure it's scoped to just + // ourselves + Some(( + unsafe { mem::transmute::<&'a mut Context<'static>, &'a mut Context<'a>>(cx) }, + stash, + )) + } +} + +pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) { + let addr = what.address_or_ip(); + let mut call = |sym: Symbol<'_>| { + // Extend the lifetime of `sym` to `'static` since we are unfortunately + // required to here, but it's only ever going out as a reference so no + // reference to it should be persisted beyond this frame anyway. + // SAFETY: praying the above is correct + let sym = unsafe { mem::transmute::<Symbol<'_>, Symbol<'static>>(sym) }; + (cb)(&super::Symbol { inner: sym }); + }; + + unsafe { + Cache::with_global(|cache| { + let (lib, addr) = match cache.avma_to_svma(addr.cast_const().cast::<u8>()) { + Some(pair) => pair, + None => return, + }; + + // Finally, get a cached mapping or create a new mapping for this file, and + // evaluate the DWARF info to find the file/line/name for this address. + let (cx, stash) = match cache.mapping_for_lib(lib) { + Some((cx, stash)) => (cx, stash), + None => return, + }; + let mut any_frames = false; + if let Ok(mut frames) = cx.find_frames(stash, addr as u64) { + while let Ok(Some(frame)) = frames.next() { + any_frames = true; + let name = match frame.function { + Some(f) => Some(f.name.slice()), + None => cx.object.search_symtab(addr as u64), + }; + call(Symbol::Frame { + addr: addr as *mut c_void, + location: frame.location, + name, + }); + } + } + if !any_frames { + if let Some((object_cx, object_addr)) = cx.object.search_object_map(addr as u64) { + if let Ok(mut frames) = object_cx.find_frames(stash, object_addr) { + while let Ok(Some(frame)) = frames.next() { + any_frames = true; + call(Symbol::Frame { + addr: addr as *mut c_void, + location: frame.location, + name: frame.function.map(|f| f.name.slice()), + }); + } + } + } + } + if !any_frames { + if let Some(name) = cx.object.search_symtab(addr as u64) { + call(Symbol::Symtab { name }); + } + } + }); + } +} + +pub enum Symbol<'a> { + /// We were able to locate frame information for this symbol, and + /// `addr2line`'s frame internally has all the nitty gritty details. + Frame { + addr: *mut c_void, + location: Option<addr2line::Location<'a>>, + name: Option<&'a [u8]>, + }, + /// Couldn't find debug information, but we found it in the symbol table of + /// the elf executable. + Symtab { name: &'a [u8] }, +} + +impl Symbol<'_> { + pub fn name(&self) -> Option<SymbolName<'_>> { + match self { + Symbol::Frame { name, .. } => { + let name = name.as_ref()?; + Some(SymbolName::new(name)) + } + Symbol::Symtab { name, .. } => Some(SymbolName::new(name)), + } + } + + pub fn addr(&self) -> Option<*mut c_void> { + match self { + Symbol::Frame { addr, .. } => Some(*addr), + Symbol::Symtab { .. } => None, + } + } + + pub fn filename_raw(&self) -> Option<BytesOrWideString<'_>> { + match self { + Symbol::Frame { location, .. } => { + let file = location.as_ref()?.file?; + Some(BytesOrWideString::Bytes(file.as_bytes())) + } + Symbol::Symtab { .. } => None, + } + } + + pub fn filename(&self) -> Option<&Path> { + match self { + Symbol::Frame { location, .. } => { + let file = location.as_ref()?.file?; + Some(Path::new(file)) + } + Symbol::Symtab { .. } => None, + } + } + + pub fn lineno(&self) -> Option<u32> { + match self { + Symbol::Frame { location, .. } => location.as_ref()?.line, + Symbol::Symtab { .. } => None, + } + } + + pub fn colno(&self) -> Option<u32> { + match self { + Symbol::Frame { location, .. } => location.as_ref()?.column, + Symbol::Symtab { .. } => None, + } + } +} diff --git a/vendor/backtrace/src/symbolize/gimli/coff.rs b/vendor/backtrace/src/symbolize/gimli/coff.rs new file mode 100644 index 00000000..031afea0 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/coff.rs @@ -0,0 +1,116 @@ +use super::mystd::path::Path; +use super::{gimli, Context, Endian, EndianSlice, Mapping, Stash}; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::convert::TryFrom; +use object::pe::{ImageDosHeader, ImageSymbol}; +use object::read::coff::ImageSymbol as _; +use object::read::pe::{ImageNtHeaders, ImageOptionalHeader, SectionTable}; +use object::read::StringTable; +use object::LittleEndian as LE; + +#[cfg(target_pointer_width = "32")] +type Pe = object::pe::ImageNtHeaders32; +#[cfg(target_pointer_width = "64")] +type Pe = object::pe::ImageNtHeaders64; + +impl Mapping { + pub fn new(path: &Path) -> Option<Mapping> { + let map = super::mmap(path)?; + Mapping::mk(map, |data, stash| { + Context::new(stash, Object::parse(data)?, None, None) + }) + } +} + +pub struct Object<'a> { + data: &'a [u8], + sections: SectionTable<'a>, + symbols: Vec<(usize, &'a ImageSymbol)>, + strings: StringTable<'a>, +} + +pub fn get_image_base(data: &[u8]) -> Option<usize> { + let dos_header = ImageDosHeader::parse(data).ok()?; + let mut offset = dos_header.nt_headers_offset().into(); + let (nt_headers, _) = Pe::parse(data, &mut offset).ok()?; + usize::try_from(nt_headers.optional_header().image_base()).ok() +} + +impl<'a> Object<'a> { + fn parse(data: &'a [u8]) -> Option<Object<'a>> { + let dos_header = ImageDosHeader::parse(data).ok()?; + let mut offset = dos_header.nt_headers_offset().into(); + let (nt_headers, _) = Pe::parse(data, &mut offset).ok()?; + let sections = nt_headers.sections(data, offset).ok()?; + let symtab = nt_headers.symbols(data).ok()?; + let strings = symtab.strings(); + let image_base = usize::try_from(nt_headers.optional_header().image_base()).ok()?; + + // Collect all the symbols into a local vector which is sorted + // by address and contains enough data to learn about the symbol + // name. Note that we only look at function symbols and also + // note that the sections are 1-indexed because the zero section + // is special (apparently). + let mut symbols = Vec::new(); + for (_, sym) in symtab.iter() { + if sym.derived_type() != object::pe::IMAGE_SYM_DTYPE_FUNCTION { + continue; + } + let Some(section_index) = sym.section() else { + continue; + }; + let addr = usize::try_from(sym.value.get(LE)).ok()?; + let section = sections.section(section_index).ok()?; + let va = usize::try_from(section.virtual_address.get(LE)).ok()?; + symbols.push((addr + va + image_base, sym)); + } + symbols.sort_unstable_by_key(|x| x.0); + Some(Object { + data, + sections, + strings, + symbols, + }) + } + + pub fn section(&self, _: &Stash, name: &str) -> Option<&'a [u8]> { + Some( + self.sections + .section_by_name(self.strings, name.as_bytes())? + .1 + .pe_data(self.data) + .ok()?, + ) + } + + pub fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> { + // Note that unlike other formats COFF doesn't embed the size of + // each symbol. As a last ditch effort search for the *closest* + // symbol to a particular address and return that one. This gets + // really wonky once symbols start getting removed because the + // symbols returned here can be totally incorrect, but we have + // no idea of knowing how to detect that. + let addr = usize::try_from(addr).ok()?; + let i = match self.symbols.binary_search_by_key(&addr, |p| p.0) { + Ok(i) => i, + // typically `addr` isn't in the array, but `i` is where + // we'd insert it, so the previous position must be the + // greatest less than `addr` + Err(i) => i.checked_sub(1)?, + }; + self.symbols[i].1.name(self.strings).ok() + } + + pub(super) fn search_object_map(&self, _addr: u64) -> Option<(&Context<'_>, u64)> { + None + } +} + +pub(super) fn handle_split_dwarf<'data>( + _package: Option<&gimli::DwarfPackage<EndianSlice<'data, Endian>>>, + _stash: &'data Stash, + _load: addr2line::SplitDwarfLoad<EndianSlice<'data, Endian>>, +) -> Option<Arc<gimli::Dwarf<EndianSlice<'data, Endian>>>> { + None +} diff --git a/vendor/backtrace/src/symbolize/gimli/elf.rs b/vendor/backtrace/src/symbolize/gimli/elf.rs new file mode 100644 index 00000000..a3ddc9df --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/elf.rs @@ -0,0 +1,568 @@ +#![allow(clippy::useless_conversion)] + +use super::mystd::ffi::OsStr; +use super::mystd::fs; +use super::mystd::os::unix::ffi::OsStrExt; +use super::mystd::path::{Path, PathBuf}; +use super::Either; +use super::{gimli, Context, Endian, EndianSlice, Mapping, Stash}; +use alloc::string::String; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::convert::{TryFrom, TryInto}; +use core::str; +#[cfg(feature = "ruzstd")] +use object::elf::ELFCOMPRESS_ZSTD; +use object::elf::{ELFCOMPRESS_ZLIB, ELF_NOTE_GNU, NT_GNU_BUILD_ID, SHF_COMPRESSED}; +use object::read::elf::{CompressionHeader, FileHeader, SectionHeader, SectionTable, Sym}; +use object::read::StringTable; +use object::{BigEndian, Bytes, NativeEndian}; + +#[cfg(target_pointer_width = "32")] +type Elf = object::elf::FileHeader32<NativeEndian>; +#[cfg(target_pointer_width = "64")] +type Elf = object::elf::FileHeader64<NativeEndian>; + +impl Mapping { + pub fn new(path: &Path) -> Option<Mapping> { + let map = super::mmap(path)?; + Mapping::mk_or_other(map, |map, stash| { + let object = Object::parse(map)?; + + // Try to locate an external debug file using the build ID. + if let Some(path_debug) = object.build_id().and_then(locate_build_id) { + if let Some(mapping) = Mapping::new_debug(path, path_debug, None) { + return Some(Either::A(mapping)); + } + } + + // Try to locate an external debug file using the GNU debug link section. + if let Some((path_debug, crc)) = object.gnu_debuglink_path(path) { + if let Some(mapping) = Mapping::new_debug(path, path_debug, Some(crc)) { + return Some(Either::A(mapping)); + } + } + + let dwp = Mapping::load_dwarf_package(path, stash); + + Context::new(stash, object, None, dwp).map(Either::B) + }) + } + + /// On Android, shared objects can be loaded directly from a ZIP archive + /// (see: [`super::Library::zip_offset`]). + /// + /// If `zip_offset` is not None, we interpret the `path` as an + /// "embedded" library path, and the value of `zip_offset` tells us where + /// in the ZIP archive the library data starts. + /// + /// We expect `zip_offset` to be page-aligned because the dynamic linker + /// requires this. Otherwise, loading the embedded library will fail. + /// + /// If we fail to load an embedded library for any reason, we fallback to + /// interpreting the path as a literal file on disk (same as calling [`Self::new`]). + #[cfg(target_os = "android")] + pub fn new_android(path: &Path, zip_offset: Option<u64>) -> Option<Mapping> { + fn map_embedded_library(path: &Path, zip_offset: u64) -> Option<Mapping> { + // get path of ZIP archive (delimited by `!/`) + let zip_path = Path::new(super::extract_zip_path_android(path.as_os_str())?); + + let file = fs::File::open(zip_path).ok()?; + let len = file.metadata().ok()?.len(); + + // NOTE: we map the remainder of the entire archive instead of just the library so we don't have to determine its length + // NOTE: mmap will fail if `zip_offset` is not page-aligned + let map = unsafe { + super::mmap::Mmap::map(&file, usize::try_from(len - zip_offset).ok()?, zip_offset) + }?; + + Mapping::mk(map, |map, stash| { + Context::new(stash, Object::parse(&map)?, None, None) + }) + } + + // if ZIP offset is given, try mapping as a ZIP-embedded library + // otherwise, fallback to mapping as a literal filepath + if let Some(zip_offset) = zip_offset { + map_embedded_library(path, zip_offset).or_else(|| Self::new(path)) + } else { + Self::new(path) + } + } + + /// Load debuginfo from an external debug file. + fn new_debug(original_path: &Path, path: PathBuf, crc: Option<u32>) -> Option<Mapping> { + let map = super::mmap(&path)?; + Mapping::mk(map, |map, stash| { + let object = Object::parse(map)?; + + if let Some(_crc) = crc { + // TODO: check crc + } + + // Try to locate a supplementary object file. + let mut sup = None; + if let Some((path_sup, build_id_sup)) = object.gnu_debugaltlink_path(&path) { + if let Some(map_sup) = super::mmap(&path_sup) { + let map_sup = stash.cache_mmap(map_sup); + if let Some(sup_) = Object::parse(map_sup) { + if sup_.build_id() == Some(build_id_sup) { + sup = Some(sup_); + } + } + } + } + + let dwp = Mapping::load_dwarf_package(original_path, stash); + + Context::new(stash, object, sup, dwp) + }) + } + + /// Try to locate a DWARF package file. + fn load_dwarf_package<'data>(path: &Path, stash: &'data Stash) -> Option<Object<'data>> { + let mut path_dwp = path.to_path_buf(); + let dwp_extension = path + .extension() + .map(|previous_extension| { + let mut previous_extension = previous_extension.to_os_string(); + previous_extension.push(".dwp"); + previous_extension + }) + .unwrap_or_else(|| "dwp".into()); + path_dwp.set_extension(dwp_extension); + if let Some(map_dwp) = super::mmap(&path_dwp) { + let map_dwp = stash.cache_mmap(map_dwp); + if let Some(dwp_) = Object::parse(map_dwp) { + return Some(dwp_); + } + } + + None + } +} + +struct ParsedSym { + address: u64, + size: u64, + name: u32, +} + +pub struct Object<'a> { + /// Zero-sized type representing the native endianness. + /// + /// We could use a literal instead, but this helps ensure correctness. + endian: NativeEndian, + /// The entire file data. + data: &'a [u8], + sections: SectionTable<'a, Elf>, + strings: StringTable<'a>, + /// List of pre-parsed and sorted symbols by base address. + syms: Vec<ParsedSym>, +} + +impl<'a> Object<'a> { + fn parse(data: &'a [u8]) -> Option<Object<'a>> { + let elf = Elf::parse(data).ok()?; + let endian = elf.endian().ok()?; + let sections = elf.sections(endian, data).ok()?; + let mut syms = sections + .symbols(endian, data, object::elf::SHT_SYMTAB) + .ok()?; + if syms.is_empty() { + syms = sections + .symbols(endian, data, object::elf::SHT_DYNSYM) + .ok()?; + } + let strings = syms.strings(); + + let mut syms = syms + .iter() + // Only look at function/object symbols. This mirrors what + // libbacktrace does and in general we're only symbolicating + // function addresses in theory. Object symbols correspond + // to data, and maybe someone's crazy enough to have a + // function go into static data? + .filter(|sym| { + let st_type = sym.st_type(); + st_type == object::elf::STT_FUNC || st_type == object::elf::STT_OBJECT + }) + // skip anything that's in an undefined section header, + // since it means it's an imported function and we're only + // symbolicating with locally defined functions. + .filter(|sym| sym.st_shndx(endian) != object::elf::SHN_UNDEF) + .map(|sym| { + let address = sym.st_value(endian).into(); + let size = sym.st_size(endian).into(); + let name = sym.st_name(endian); + ParsedSym { + address, + size, + name, + } + }) + .collect::<Vec<_>>(); + syms.sort_unstable_by_key(|s| s.address); + Some(Object { + endian, + data, + sections, + strings, + syms, + }) + } + + pub fn section(&self, stash: &'a Stash, name: &str) -> Option<&'a [u8]> { + if let Some(section) = self.section_header(name) { + let mut data = Bytes(section.data(self.endian, self.data).ok()?); + + // Check for DWARF-standard (gABI) compression, i.e., as generated + // by ld's `--compress-debug-sections=zlib-gabi` and + // `--compress-debug-sections=zstd` flags. + let flags: u64 = section.sh_flags(self.endian).into(); + if (flags & u64::from(SHF_COMPRESSED)) == 0 { + // Not compressed. + return Some(data.0); + } + + let header = data.read::<<Elf as FileHeader>::CompressionHeader>().ok()?; + match header.ch_type(self.endian) { + ELFCOMPRESS_ZLIB => { + let size = usize::try_from(header.ch_size(self.endian)).ok()?; + let buf = stash.allocate(size); + decompress_zlib(data.0, buf)?; + return Some(buf); + } + #[cfg(feature = "ruzstd")] + ELFCOMPRESS_ZSTD => { + let size = usize::try_from(header.ch_size(self.endian)).ok()?; + let buf = stash.allocate(size); + decompress_zstd(data.0, buf)?; + return Some(buf); + } + _ => return None, // Unknown compression type. + } + } + + // Check for the nonstandard GNU compression format, i.e., as generated + // by ld's `--compress-debug-sections=zlib-gnu` flag. This means that if + // we're actually asking for `.debug_info` then we need to look up a + // section named `.zdebug_info`. + if !name.starts_with(".debug_") { + return None; + } + let debug_name = name[7..].as_bytes(); + let compressed_section = self + .sections + .iter() + .filter_map(|header| { + let name = self.sections.section_name(self.endian, header).ok()?; + if name.starts_with(b".zdebug_") && &name[8..] == debug_name { + Some(header) + } else { + None + } + }) + .next()?; + let mut data = Bytes(compressed_section.data(self.endian, self.data).ok()?); + if data.read_bytes(8).ok()?.0 != b"ZLIB\0\0\0\0" { + return None; + } + let size = usize::try_from(data.read::<object::U32Bytes<_>>().ok()?.get(BigEndian)).ok()?; + let buf = stash.allocate(size); + decompress_zlib(data.0, buf)?; + Some(buf) + } + + fn section_header(&self, name: &str) -> Option<&<Elf as FileHeader>::SectionHeader> { + self.sections + .section_by_name(self.endian, name.as_bytes()) + .map(|(_index, section)| section) + } + + pub fn search_symtab(&self, addr: u64) -> Option<&[u8]> { + // Same sort of binary search as Windows above + let i = match self.syms.binary_search_by_key(&addr, |sym| sym.address) { + Ok(i) => i, + Err(i) => i.checked_sub(1)?, + }; + let sym = self.syms.get(i)?; + if sym.address <= addr && addr <= sym.address + sym.size { + self.strings.get(sym.name).ok() + } else { + None + } + } + + pub(super) fn search_object_map(&self, _addr: u64) -> Option<(&Context<'_>, u64)> { + None + } + + fn build_id(&self) -> Option<&'a [u8]> { + for section in self.sections.iter() { + if let Ok(Some(mut notes)) = section.notes(self.endian, self.data) { + while let Ok(Some(note)) = notes.next() { + if note.name() == ELF_NOTE_GNU && note.n_type(self.endian) == NT_GNU_BUILD_ID { + return Some(note.desc()); + } + } + } + } + None + } + + // The contents of the ".gnu_debuglink" section is documented at: + // https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html + fn gnu_debuglink_path(&self, path: &Path) -> Option<(PathBuf, u32)> { + let section = self.section_header(".gnu_debuglink")?; + let data = section.data(self.endian, self.data).ok()?; + let len = data.iter().position(|x| *x == 0)?; + let filename = OsStr::from_bytes(&data[..len]); + let offset = (len + 1 + 3) & !3; + let crc_bytes = data + .get(offset..offset + 4) + .and_then(|bytes| bytes.try_into().ok())?; + let crc = u32::from_ne_bytes(crc_bytes); + let path_debug = locate_debuglink(path, filename)?; + Some((path_debug, crc)) + } + + // The format of the ".gnu_debugaltlink" section is based on gdb. + fn gnu_debugaltlink_path(&self, path: &Path) -> Option<(PathBuf, &'a [u8])> { + let section = self.section_header(".gnu_debugaltlink")?; + let data = section.data(self.endian, self.data).ok()?; + let len = data.iter().position(|x| *x == 0)?; + let filename = OsStr::from_bytes(&data[..len]); + let build_id = &data[len + 1..]; + let path_sup = locate_debugaltlink(path, filename, build_id)?; + Some((path_sup, build_id)) + } +} + +fn decompress_zlib(input: &[u8], output: &mut [u8]) -> Option<()> { + use miniz_oxide::inflate::core::inflate_flags::{ + TINFL_FLAG_PARSE_ZLIB_HEADER, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF, + }; + use miniz_oxide::inflate::core::{decompress, DecompressorOxide}; + use miniz_oxide::inflate::TINFLStatus; + + let (status, in_read, out_read) = decompress( + &mut DecompressorOxide::new(), + input, + output, + 0, + TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | TINFL_FLAG_PARSE_ZLIB_HEADER, + ); + if status == TINFLStatus::Done && in_read == input.len() && out_read == output.len() { + Some(()) + } else { + None + } +} + +#[cfg(feature = "ruzstd")] +fn decompress_zstd(mut input: &[u8], mut output: &mut [u8]) -> Option<()> { + use ruzstd::frame::ReadFrameHeaderError; + use ruzstd::frame_decoder::FrameDecoderError; + use ruzstd::io::Read; + + while !input.is_empty() { + let mut decoder = match ruzstd::StreamingDecoder::new(&mut input) { + Ok(decoder) => decoder, + Err(FrameDecoderError::ReadFrameHeaderError(ReadFrameHeaderError::SkipFrame { + length, + .. + })) => { + input = &input.get(length as usize..)?; + continue; + } + Err(_) => return None, + }; + loop { + let bytes_written = decoder.read(output).ok()?; + if bytes_written == 0 { + break; + } + output = &mut output[bytes_written..]; + } + } + + if !output.is_empty() { + // Lengths didn't match, something is wrong. + return None; + } + + Some(()) +} + +const DEBUG_PATH: &str = "/usr/lib/debug"; + +fn debug_path_exists() -> bool { + cfg_if::cfg_if! { + if #[cfg(any(target_os = "freebsd", target_os = "hurd", target_os = "linux"))] { + use core::sync::atomic::{AtomicU8, Ordering}; + static DEBUG_PATH_EXISTS: AtomicU8 = AtomicU8::new(0); + + let mut exists = DEBUG_PATH_EXISTS.load(Ordering::Relaxed); + if exists == 0 { + exists = if Path::new(DEBUG_PATH).is_dir() { + 1 + } else { + 2 + }; + DEBUG_PATH_EXISTS.store(exists, Ordering::Relaxed); + } + exists == 1 + } else { + false + } + } +} + +/// Locate a debug file based on its build ID. +/// +/// The format of build id paths is documented at: +/// https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html +fn locate_build_id(build_id: &[u8]) -> Option<PathBuf> { + const BUILD_ID_PATH: &str = "/usr/lib/debug/.build-id/"; + const BUILD_ID_SUFFIX: &str = ".debug"; + + if build_id.len() < 2 { + return None; + } + + if !debug_path_exists() { + return None; + } + + let mut path = + String::with_capacity(BUILD_ID_PATH.len() + BUILD_ID_SUFFIX.len() + build_id.len() * 2 + 1); + path.push_str(BUILD_ID_PATH); + path.push(char::from_digit((build_id[0] >> 4) as u32, 16)?); + path.push(char::from_digit((build_id[0] & 0xf) as u32, 16)?); + path.push('/'); + for byte in &build_id[1..] { + path.push(char::from_digit((byte >> 4) as u32, 16)?); + path.push(char::from_digit((byte & 0xf) as u32, 16)?); + } + path.push_str(BUILD_ID_SUFFIX); + Some(PathBuf::from(path)) +} + +/// Locate a file specified in a `.gnu_debuglink` section. +/// +/// `path` is the file containing the section. +/// `filename` is from the contents of the section. +/// +/// Search order is based on gdb, documented at: +/// https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html +/// +/// gdb also allows the user to customize the debug search path, but we don't. +/// +/// gdb also supports debuginfod, but we don't yet. +fn locate_debuglink(path: &Path, filename: &OsStr) -> Option<PathBuf> { + let path = fs::canonicalize(path).ok()?; + let parent = path.parent()?; + let mut f = + PathBuf::with_capacity(DEBUG_PATH.len() + parent.as_os_str().len() + filename.len() + 2); + let filename = Path::new(filename); + + // Try "/parent/filename" if it differs from "path" + f.push(parent); + f.push(filename); + if f != path && f.is_file() { + return Some(f); + } + + // Try "/parent/.debug/filename" + f.clear(); + f.push(parent); + f.push(".debug"); + f.push(filename); + if f.is_file() { + return Some(f); + } + + if debug_path_exists() { + // Try "/usr/lib/debug/parent/filename" + f.clear(); + f.push(DEBUG_PATH); + f.push(parent.strip_prefix("/").unwrap()); + f.push(filename); + if f.is_file() { + return Some(f); + } + } + + None +} + +/// Locate a file specified in a `.gnu_debugaltlink` section. +/// +/// `path` is the file containing the section. +/// `filename` and `build_id` are the contents of the section. +/// +/// Search order is based on gdb: +/// - filename, which is either absolute or relative to `path` +/// - the build ID path under `BUILD_ID_PATH` +/// +/// gdb also allows the user to customize the debug search path, but we don't. +/// +/// gdb also supports debuginfod, but we don't yet. +fn locate_debugaltlink(path: &Path, filename: &OsStr, build_id: &[u8]) -> Option<PathBuf> { + let filename = Path::new(filename); + if filename.is_absolute() { + if filename.is_file() { + return Some(filename.into()); + } + } else { + let path = fs::canonicalize(path).ok()?; + let parent = path.parent()?; + let mut f = PathBuf::from(parent); + f.push(filename); + if f.is_file() { + return Some(f); + } + } + + locate_build_id(build_id) +} + +pub(super) fn handle_split_dwarf<'data>( + package: Option<&gimli::DwarfPackage<EndianSlice<'data, Endian>>>, + stash: &'data Stash, + load: addr2line::SplitDwarfLoad<EndianSlice<'data, Endian>>, +) -> Option<Arc<gimli::Dwarf<EndianSlice<'data, Endian>>>> { + if let Some(dwp) = package.as_ref() { + if let Ok(Some(cu)) = dwp.find_cu(load.dwo_id, &load.parent) { + return Some(Arc::new(cu)); + } + } + + let mut path = PathBuf::new(); + if let Some(p) = load.comp_dir.as_ref() { + path.push(OsStr::from_bytes(&p)); + } + + path.push(OsStr::from_bytes(&load.path.as_ref()?)); + + if let Some(map_dwo) = super::mmap(&path) { + let map_dwo = stash.cache_mmap(map_dwo); + if let Some(dwo) = Object::parse(map_dwo) { + return gimli::Dwarf::load(|id| -> Result<_, ()> { + let data = id + .dwo_name() + .and_then(|name| dwo.section(stash, name)) + .unwrap_or(&[]); + Ok(EndianSlice::new(data, Endian)) + }) + .ok() + .map(|mut dwo_dwarf| { + dwo_dwarf.make_dwo(&load.parent); + Arc::new(dwo_dwarf) + }); + } + } + + None +} diff --git a/vendor/backtrace/src/symbolize/gimli/libs_aix.rs b/vendor/backtrace/src/symbolize/gimli/libs_aix.rs new file mode 100644 index 00000000..01270a71 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/libs_aix.rs @@ -0,0 +1,85 @@ +use super::mystd::env; +use super::mystd::ffi::OsStr; +use super::mystd::os::unix::prelude::*; +use super::xcoff; +use super::{Library, LibrarySegment}; +use alloc::borrow::ToOwned; +use alloc::vec; +use alloc::vec::Vec; +use core::ffi::{c_int, CStr}; +use core::mem; + +const EXE_IMAGE_BASE: u64 = 0x100000000; + +unsafe extern "C" { + #[link_name = "_Errno"] + fn errno_location() -> *mut c_int; +} + +fn errno() -> i32 { + unsafe { (*errno_location()) as i32 } +} + +/// On AIX, we use `loadquery` with `L_GETINFO` flag to query libraries mmapped. +/// See https://www.ibm.com/docs/en/aix/7.2?topic=l-loadquery-subroutine for +/// detailed information of `loadquery`. +pub(super) fn native_libraries() -> Vec<Library> { + let mut ret = Vec::new(); + unsafe { + let mut buffer = vec![mem::zeroed::<libc::ld_info>(); 64]; + loop { + if libc::loadquery( + libc::L_GETINFO, + buffer.as_mut_ptr().cast::<libc::c_char>(), + (mem::size_of::<libc::ld_info>() * buffer.len()) as u32, + ) != -1 + { + break; + } else { + match errno() { + libc::ENOMEM => { + buffer.resize(buffer.len() * 2, mem::zeroed::<libc::ld_info>()); + } + _ => { + // If other error occurs, return empty libraries. + return Vec::new(); + } + } + } + } + let mut current = buffer.as_mut_ptr(); + loop { + let text_base = (*current).ldinfo_textorg as usize; + let filename_ptr: *const libc::c_char = &(*current).ldinfo_filename[0]; + let bytes = CStr::from_ptr(filename_ptr).to_bytes(); + let member_name_ptr = filename_ptr.offset((bytes.len() + 1) as isize); + let mut filename = OsStr::from_bytes(bytes).to_owned(); + if text_base == EXE_IMAGE_BASE as usize { + if let Ok(exe) = env::current_exe() { + filename = exe.into_os_string(); + } + } + let bytes = CStr::from_ptr(member_name_ptr).to_bytes(); + let member_name = OsStr::from_bytes(bytes).to_owned(); + if let Some(image) = xcoff::parse_image(filename.as_ref(), &member_name) { + ret.push(Library { + name: filename, + member_name, + segments: vec![LibrarySegment { + stated_virtual_memory_address: image.base as usize, + len: image.size, + }], + bias: (text_base + image.offset).wrapping_sub(image.base as usize), + }); + } + if (*current).ldinfo_next == 0 { + break; + } + current = current + .cast::<libc::c_char>() + .offset((*current).ldinfo_next as isize) + .cast::<libc::ld_info>(); + } + } + return ret; +} diff --git a/vendor/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs b/vendor/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs new file mode 100644 index 00000000..d52819f6 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/libs_dl_iterate_phdr.rs @@ -0,0 +1,122 @@ +// Other Unix (e.g. Linux) platforms use ELF as an object file format +// and typically implement an API called `dl_iterate_phdr` to load +// native libraries. + +use super::mystd::env; +use super::mystd::ffi::{OsStr, OsString}; +use super::mystd::os::unix::prelude::*; +use super::{parse_running_mmaps, Library, LibrarySegment}; +use alloc::borrow::ToOwned; +use alloc::vec::Vec; +use core::ffi::CStr; +use core::slice; + +struct CallbackData { + libs: Vec<Library>, + maps: Option<Vec<parse_running_mmaps::MapsEntry>>, +} +pub(super) fn native_libraries() -> Vec<Library> { + let mut cb_data = CallbackData { + libs: Vec::new(), + #[cfg(not(target_os = "hurd"))] + maps: parse_running_mmaps::parse_maps().ok(), + #[cfg(target_os = "hurd")] + maps: None, + }; + unsafe { + libc::dl_iterate_phdr(Some(callback), core::ptr::addr_of_mut!(cb_data).cast()); + } + cb_data.libs +} + +fn infer_current_exe( + maps: &Option<Vec<parse_running_mmaps::MapsEntry>>, + base_addr: usize, +) -> OsString { + #[cfg(not(target_os = "hurd"))] + if let Some(entries) = maps { + let opt_path = entries + .iter() + .find(|e| e.ip_matches(base_addr) && e.pathname().len() > 0) + .map(|e| e.pathname()) + .cloned(); + if let Some(path) = opt_path { + return path; + } + } + + env::current_exe().map(|e| e.into()).unwrap_or_default() +} + +/// # Safety +/// `info` must be a valid pointer. +/// `data` must be a valid pointer to `CallbackData`. +#[forbid(unsafe_op_in_unsafe_fn)] +unsafe extern "C" fn callback( + info: *mut libc::dl_phdr_info, + _size: libc::size_t, + data: *mut libc::c_void, +) -> libc::c_int { + // SAFETY: We are guaranteed these fields: + let dlpi_addr = unsafe { (*info).dlpi_addr }; + let dlpi_name = unsafe { (*info).dlpi_name }; + let dlpi_phdr = unsafe { (*info).dlpi_phdr }; + let dlpi_phnum = unsafe { (*info).dlpi_phnum }; + // SAFETY: We assured this. + let CallbackData { libs, maps } = unsafe { &mut *data.cast::<CallbackData>() }; + // most implementations give us the main program first + let is_main = libs.is_empty(); + // we may be statically linked, which means we are main and mostly one big blob of code + let is_static = dlpi_addr == 0; + // sometimes we get a null or 0-len CStr, based on libc's whims, but these mean the same thing + let no_given_name = dlpi_name.is_null() + // SAFETY: we just checked for null + || unsafe { *dlpi_name == 0 }; + let name = if is_static { + // don't try to look up our name from /proc/self/maps, it'll get silly + env::current_exe().unwrap_or_default().into_os_string() + } else if is_main && no_given_name { + infer_current_exe(&maps, dlpi_addr as usize) + } else { + // this fallback works even if we are main, because some platforms give the name anyways + if dlpi_name.is_null() { + OsString::new() + } else { + // SAFETY: we just checked for nullness + OsStr::from_bytes(unsafe { CStr::from_ptr(dlpi_name) }.to_bytes()).to_owned() + } + }; + #[cfg(target_os = "android")] + let zip_offset: Option<u64> = { + // only check for ZIP-embedded file if we have data from /proc/self/maps + maps.as_ref().and_then(|maps| { + // check if file is embedded within a ZIP archive by searching for `!/` + super::extract_zip_path_android(&name).and_then(|_| { + // find MapsEntry matching library's base address and get its file offset + maps.iter() + .find(|m| m.ip_matches(dlpi_addr as usize)) + .map(|m| m.offset()) + }) + }) + }; + let headers = if dlpi_phdr.is_null() || dlpi_phnum == 0 { + &[] + } else { + // SAFETY: We just checked for nullness or 0-len slices + unsafe { slice::from_raw_parts(dlpi_phdr, dlpi_phnum as usize) } + }; + libs.push(Library { + name, + #[cfg(target_os = "android")] + zip_offset, + segments: headers + .iter() + .map(|header| LibrarySegment { + len: header.p_memsz as usize, + stated_virtual_memory_address: header.p_vaddr as usize, + }) + .collect(), + bias: dlpi_addr as usize, + }); + 0 +} diff --git a/vendor/backtrace/src/symbolize/gimli/libs_haiku.rs b/vendor/backtrace/src/symbolize/gimli/libs_haiku.rs new file mode 100644 index 00000000..ddfd6b47 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/libs_haiku.rs @@ -0,0 +1,50 @@ +// Haiku implements the image_info struct and the get_next_image_info() +// functions to iterate through the loaded executable images. The +// image_info struct contains a pointer to the start of the .text +// section within the virtual address space, as well as the size of +// that section. All the read-only segments of the ELF-binary are in +// that part of the address space. + +use super::mystd::ffi::OsStr; +use super::mystd::os::unix::prelude::*; +use super::{Library, LibrarySegment}; +use alloc::borrow::ToOwned; +use alloc::vec::Vec; +use core::ffi::CStr; +use core::mem::MaybeUninit; + +pub(super) fn native_libraries() -> Vec<Library> { + let mut libraries: Vec<Library> = Vec::new(); + + unsafe { + let mut info = MaybeUninit::<libc::image_info>::zeroed(); + let mut cookie: i32 = 0; + // Load the first image to get a valid info struct + let mut status = + libc::get_next_image_info(libc::B_CURRENT_TEAM, &mut cookie, info.as_mut_ptr()); + if status != libc::B_OK { + return libraries; + } + let mut info = info.assume_init(); + + while status == libc::B_OK { + let mut segments = Vec::new(); + segments.push(LibrarySegment { + stated_virtual_memory_address: 0, + len: info.text_size as usize, + }); + + let bytes = CStr::from_ptr(info.name.as_ptr()).to_bytes(); + let name = OsStr::from_bytes(bytes).to_owned(); + libraries.push(Library { + name: name, + segments: segments, + bias: info.text as usize, + }); + + status = libc::get_next_image_info(libc::B_CURRENT_TEAM, &mut cookie, &mut info); + } + } + + libraries +} diff --git a/vendor/backtrace/src/symbolize/gimli/libs_illumos.rs b/vendor/backtrace/src/symbolize/gimli/libs_illumos.rs new file mode 100644 index 00000000..025eb250 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/libs_illumos.rs @@ -0,0 +1,101 @@ +use super::mystd::ffi::OsStr; +use super::mystd::os::unix::prelude::*; +use super::{Library, LibrarySegment}; +use alloc::borrow::ToOwned; +use alloc::vec::Vec; +use core::ffi::CStr; +use core::mem; +use object::NativeEndian; + +#[cfg(target_pointer_width = "64")] +use object::elf::{FileHeader64 as FileHeader, ProgramHeader64 as ProgramHeader}; + +type EHdr = FileHeader<NativeEndian>; +type PHdr = ProgramHeader<NativeEndian>; + +#[repr(C)] +struct LinkMap { + l_addr: libc::c_ulong, + l_name: *const libc::c_char, + l_ld: *const libc::c_void, + l_next: *const LinkMap, + l_prev: *const LinkMap, + l_refname: *const libc::c_char, +} + +const RTLD_SELF: *const libc::c_void = -3isize as *const libc::c_void; +const RTLD_DI_LINKMAP: libc::c_int = 2; + +unsafe extern "C" { + fn dlinfo( + handle: *const libc::c_void, + request: libc::c_int, + p: *mut libc::c_void, + ) -> libc::c_int; +} + +pub(super) fn native_libraries() -> Vec<Library> { + let mut libs = Vec::new(); + + // Request the current link map from the runtime linker: + let map = unsafe { + let mut map: *const LinkMap = mem::zeroed(); + if dlinfo( + RTLD_SELF, + RTLD_DI_LINKMAP, + core::ptr::addr_of_mut!(map).cast::<libc::c_void>(), + ) != 0 + { + return libs; + } + map + }; + + // Each entry in the link map represents a loaded object: + let mut l = map; + while !l.is_null() { + // Fetch the fully qualified path of the loaded object: + let bytes = unsafe { CStr::from_ptr((*l).l_name) }.to_bytes(); + let name = OsStr::from_bytes(bytes).to_owned(); + + // The base address of the object loaded into memory: + let addr = unsafe { (*l).l_addr }; + + // Use the ELF header for this object to locate the program + // header: + let e: *const EHdr = unsafe { (*l).l_addr as *const EHdr }; + let phoff = unsafe { (*e).e_phoff }.get(NativeEndian); + let phnum = unsafe { (*e).e_phnum }.get(NativeEndian); + let etype = unsafe { (*e).e_type }.get(NativeEndian); + + let phdr: *const PHdr = (addr + phoff) as *const PHdr; + let phdr = unsafe { core::slice::from_raw_parts(phdr, phnum as usize) }; + + libs.push(Library { + name, + segments: phdr + .iter() + .map(|p| { + let memsz = p.p_memsz.get(NativeEndian); + let vaddr = p.p_vaddr.get(NativeEndian); + LibrarySegment { + len: memsz as usize, + stated_virtual_memory_address: vaddr as usize, + } + }) + .collect(), + bias: if etype == object::elf::ET_EXEC { + // Program header addresses for the base executable are + // already absolute. + 0 + } else { + // Other addresses are relative to the object base. + addr as usize + }, + }); + + l = unsafe { (*l).l_next }; + } + + libs +} diff --git a/vendor/backtrace/src/symbolize/gimli/libs_libnx.rs b/vendor/backtrace/src/symbolize/gimli/libs_libnx.rs new file mode 100644 index 00000000..7f280780 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/libs_libnx.rs @@ -0,0 +1,28 @@ +use super::{Library, LibrarySegment}; +use alloc::vec::Vec; + +// DevkitA64 doesn't natively support debug info, but the build system will +// place debug info at the path `romfs:/debug_info.elf`. +pub(super) fn native_libraries() -> Vec<Library> { + unsafe extern "C" { + static __start__: u8; + } + + let bias = core::ptr::addr_of!(__start__) as usize; + + let mut ret = Vec::new(); + let mut segments = Vec::new(); + segments.push(LibrarySegment { + stated_virtual_memory_address: 0, + len: usize::max_value() - bias, + }); + + let path = "romfs:/debug_info.elf"; + ret.push(Library { + name: path.into(), + segments, + bias, + }); + + ret +} diff --git a/vendor/backtrace/src/symbolize/gimli/libs_macos.rs b/vendor/backtrace/src/symbolize/gimli/libs_macos.rs new file mode 100644 index 00000000..f65811b2 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/libs_macos.rs @@ -0,0 +1,154 @@ +#![allow(deprecated)] + +use super::mystd::ffi::OsStr; +use super::mystd::os::unix::prelude::*; +use super::mystd::prelude::v1::*; +use super::{Library, LibrarySegment}; +use core::convert::TryInto; +use core::ffi::CStr; +use core::mem; + +// FIXME: replace with ptr::from_ref once MSRV is high enough +#[inline(always)] +#[must_use] +const fn ptr_from_ref<T: ?Sized>(r: &T) -> *const T { + r +} + +pub(super) fn native_libraries() -> Vec<Library> { + let mut ret = Vec::new(); + let images = unsafe { libc::_dyld_image_count() }; + for i in 0..images { + ret.extend(native_library(i)); + } + return ret; +} + +fn native_library(i: u32) -> Option<Library> { + use object::macho; + use object::read::macho::{MachHeader, Segment}; + use object::NativeEndian; + + // Fetch the name of this library which corresponds to the path of + // where to load it as well. + let name = unsafe { + let name = libc::_dyld_get_image_name(i); + if name.is_null() { + return None; + } + CStr::from_ptr(name) + }; + + // Load the image header of this library and delegate to `object` to + // parse all the load commands so we can figure out all the segments + // involved here. + let (mut load_commands, endian) = unsafe { + let header = libc::_dyld_get_image_header(i); + if header.is_null() { + return None; + } + match (*header).magic { + macho::MH_MAGIC => { + let endian = NativeEndian; + let header = &*header.cast::<macho::MachHeader32<NativeEndian>>(); + let data = core::slice::from_raw_parts( + ptr_from_ref(header).cast::<u8>(), + mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize, + ); + (header.load_commands(endian, data, 0).ok()?, endian) + } + macho::MH_MAGIC_64 => { + let endian = NativeEndian; + let header = &*header.cast::<macho::MachHeader64<NativeEndian>>(); + let data = core::slice::from_raw_parts( + ptr_from_ref(header).cast::<u8>(), + mem::size_of_val(header) + header.sizeofcmds.get(endian) as usize, + ); + (header.load_commands(endian, data, 0).ok()?, endian) + } + _ => return None, + } + }; + + // Iterate over the segments and register known regions for segments + // that we find. Additionally record information bout text segments + // for processing later, see comments below. + let mut segments = Vec::new(); + let mut first_text = 0; + let mut text_fileoff_zero = false; + while let Some(cmd) = load_commands.next().ok()? { + if let Some((seg, _)) = cmd.segment_32().ok()? { + if seg.name() == b"__TEXT" { + first_text = segments.len(); + if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { + text_fileoff_zero = true; + } + } + segments.push(LibrarySegment { + len: seg.vmsize(endian).try_into().ok()?, + stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, + }); + } + if let Some((seg, _)) = cmd.segment_64().ok()? { + if seg.name() == b"__TEXT" { + first_text = segments.len(); + if seg.fileoff(endian) == 0 && seg.filesize(endian) > 0 { + text_fileoff_zero = true; + } + } + segments.push(LibrarySegment { + len: seg.vmsize(endian).try_into().ok()?, + stated_virtual_memory_address: seg.vmaddr(endian).try_into().ok()?, + }); + } + } + + // Determine the "slide" for this library which ends up being the + // bias we use to figure out where in memory objects are loaded. + // This is a bit of a weird computation though and is the result of + // trying a few things in the wild and seeing what sticks. + // + // The general idea is that the `bias` plus a segment's + // `stated_virtual_memory_address` is going to be where in the + // actual address space the segment resides. The other thing we rely + // on though is that a real address minus the `bias` is the index to + // look up in the symbol table and debuginfo. + // + // It turns out, though, that for system loaded libraries these + // calculations are incorrect. For native executables, however, it + // appears correct. Lifting some logic from LLDB's source it has + // some special-casing for the first `__TEXT` section loaded from + // file offset 0 with a nonzero size. For whatever reason when this + // is present it appears to mean that the symbol table is relative + // to just the vmaddr slide for the library. If it's *not* present + // then the symbol table is relative to the vmaddr slide plus the + // segment's stated address. + // + // To handle this situation if we *don't* find a text section at + // file offset zero then we increase the bias by the first text + // sections's stated address and decrease all stated addresses by + // that amount as well. That way the symbol table is always appears + // relative to the library's bias amount. This appears to have the + // right results for symbolizing via the symbol table. + // + // Honestly I'm not entirely sure whether this is right or if + // there's something else that should indicate how to do this. For + // now though this seems to work well enough (?) and we should + // always be able to tweak this over time if necessary. + // + // For some more information see #318 + let mut slide = unsafe { libc::_dyld_get_image_vmaddr_slide(i) as usize }; + if !text_fileoff_zero { + let adjust = segments[first_text].stated_virtual_memory_address; + for segment in segments.iter_mut() { + segment.stated_virtual_memory_address -= adjust; + } + slide += adjust; + } + + Some(Library { + name: OsStr::from_bytes(name.to_bytes()).to_owned(), + segments, + bias: slide, + }) +} diff --git a/vendor/backtrace/src/symbolize/gimli/libs_windows.rs b/vendor/backtrace/src/symbolize/gimli/libs_windows.rs new file mode 100644 index 00000000..1d9a74cc --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/libs_windows.rs @@ -0,0 +1,158 @@ +use super::super::super::windows_sys::*; +use super::mystd::ffi::OsString; +use super::{coff, mmap, Library, LibrarySegment}; +use alloc::vec; +use alloc::vec::Vec; +use core::mem; +use core::mem::MaybeUninit; + +// For loading native libraries on Windows, see some discussion on +// rust-lang/rust#71060 for the various strategies here. +pub(super) fn native_libraries() -> Vec<Library> { + let mut ret = Vec::new(); + unsafe { + add_loaded_images(&mut ret); + } + return ret; +} + +unsafe fn add_loaded_images(ret: &mut Vec<Library>) { + unsafe { + let snap = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, 0); + if snap == INVALID_HANDLE_VALUE { + return; + } + + // huge struct, probably should avoid manually initializing it even if we can + let mut me = MaybeUninit::<MODULEENTRY32W>::zeroed().assume_init(); + me.dwSize = mem::size_of_val(&me) as u32; + if Module32FirstW(snap, &mut me) == TRUE { + loop { + if let Some(lib) = load_library(&me) { + ret.push(lib); + } + + if Module32NextW(snap, &mut me) != TRUE { + break; + } + } + } + + CloseHandle(snap); + } +} + +// Safety: long_path should be null-terminated +#[cfg(target_os = "cygwin")] +unsafe fn get_posix_path(long_path: &[u16]) -> Option<OsString> { + use super::mystd::os::unix::ffi::OsStringExt; + + unsafe extern "C" { + // Doc: https://cygwin.com/cygwin-api/func-cygwin-conv-path.html + // Src: https://github.com/cygwin/cygwin/blob/718a15ba50e0d01c79800bd658c2477f9a603540/winsup/cygwin/path.cc#L3902 + // Safety: + // * `what` should be `CCP_WIN_W_TO_POSIX` here + // * `from` is null-terminated UTF-16 path + // * `to` is buffer, the buffer size is `size`. + fn cygwin_conv_path( + what: libc::c_uint, + from: *const u16, + to: *mut u8, + size: libc::size_t, + ) -> libc::ssize_t; + } + const CCP_WIN_W_TO_POSIX: libc::c_uint = 3; + + // If `size` is 0, returns needed buffer size, including null terminator; + // or -1 if error. + // Safety: if `size` is 0, `to` is not used. + let name_len = unsafe { + cygwin_conv_path( + CCP_WIN_W_TO_POSIX, + long_path.as_ptr(), + core::ptr::null_mut(), + 0, + ) + }; + // Expect at least 1 for null terminator. + // It's not likely to return error here. + if name_len < 1 { + return None; + } + let name_len = name_len as usize; + let mut name_buffer = Vec::with_capacity(name_len); + // Safety: `name_buffer` is large enough. + let res = unsafe { + cygwin_conv_path( + CCP_WIN_W_TO_POSIX, + long_path.as_ptr(), + name_buffer.as_mut_ptr(), + name_len, + ) + }; + // It's not likely to return error here. + if res != 0 { + return None; + } + // Remove the null terminator. + unsafe { name_buffer.set_len(name_len - 1) }; + let name = OsString::from_vec(name_buffer); + Some(name) +} + +unsafe fn load_library(me: &MODULEENTRY32W) -> Option<Library> { + #[cfg(windows)] + let name = { + use super::mystd::os::windows::prelude::*; + let pos = me + .szExePath + .iter() + .position(|i| *i == 0) + .unwrap_or(me.szExePath.len()); + OsString::from_wide(&me.szExePath[..pos]) + }; + #[cfg(target_os = "cygwin")] + // Safety: the path with max length MAX_PATH always contains a null + // terminator. Don't slice it. + let name = unsafe { get_posix_path(&me.szExePath[..])? }; + + // MinGW libraries currently don't support ASLR + // (rust-lang/rust#16514), but DLLs can still be relocated around in + // the address space. It appears that addresses in debug info are + // all as-if this library was loaded at its "image base", which is a + // field in its COFF file headers. Since this is what debuginfo + // seems to list we parse the symbol table and store addresses as if + // the library was loaded at "image base" as well. + // + // The library may not be loaded at "image base", however. + // (presumably something else may be loaded there?) This is where + // the `bias` field comes into play, and we need to figure out the + // value of `bias` here. Unfortunately though it's not clear how to + // acquire this from a loaded module. What we do have, however, is + // the actual load address (`modBaseAddr`). + // + // As a bit of a cop-out for now we mmap the file, read the file + // header information, then drop the mmap. This is wasteful because + // we'll probably reopen the mmap later, but this should work well + // enough for now. + // + // Once we have the `image_base` (desired load location) and the + // `base_addr` (actual load location) we can fill in the `bias` + // (difference between the actual and desired) and then the stated + // address of each segment is the `image_base` since that's what the + // file says. + // + // For now it appears that unlike ELF/MachO we can make do with one + // segment per library, using `modBaseSize` as the whole size. + let mmap = mmap(name.as_ref())?; + let image_base = coff::get_image_base(&mmap)?; + let base_addr = me.modBaseAddr as usize; + Some(Library { + name, + bias: base_addr.wrapping_sub(image_base), + segments: vec![LibrarySegment { + stated_virtual_memory_address: image_base, + len: me.modBaseSize as usize, + }], + }) +} diff --git a/vendor/backtrace/src/symbolize/gimli/lru.rs b/vendor/backtrace/src/symbolize/gimli/lru.rs new file mode 100644 index 00000000..b7cf5a5b --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/lru.rs @@ -0,0 +1,75 @@ +use core::mem::{self, MaybeUninit}; +use core::ptr; + +/// least-recently-used cache with static size +pub(crate) struct Lru<T, const N: usize> { + // SAFETY: len <= initialized values + len: usize, + arr: [MaybeUninit<T>; N], +} + +impl<T, const N: usize> Default for Lru<T, N> { + fn default() -> Self { + Lru { + len: 0, + arr: [const { MaybeUninit::uninit() }; N], + } + } +} + +impl<T, const N: usize> Lru<T, N> { + #[inline] + pub fn clear(&mut self) { + let len = self.len; + self.len = 0; + // SAFETY: we can't touch these values again due to setting self.len = 0 + unsafe { ptr::drop_in_place(ptr::addr_of_mut!(self.arr[0..len]) as *mut [T]) } + } + + #[inline] + pub fn iter(&self) -> impl Iterator<Item = &T> { + self.arr[0..self.len] + .iter() + // SAFETY: we only iterate initialized values due to our len invariant + .map(|init| unsafe { init.assume_init_ref() }) + } + + #[inline] + pub fn push_front(&mut self, value: T) -> Option<&mut T> { + if N == 0 { + return None; + } else if self.len == N { + self.len = N - 1; + // SAFETY: we maintain len invariant and bail on N == 0 + unsafe { ptr::drop_in_place(self.arr.as_mut_ptr().cast::<T>().add(N - 1)) }; + }; + let len_to_init = self.len + 1; + let mut last = MaybeUninit::new(value); + for elem in self.arr[0..len_to_init].iter_mut() { + // OPT(size): using `mem::swap` allows surprising size regressions + last = mem::replace(elem, last); + } + self.len = len_to_init; + + self.arr + .first_mut() + // SAFETY: we just pushed it + .map(|elem| unsafe { elem.assume_init_mut() }) + } + + #[inline] + pub fn move_to_front(&mut self, idx: usize) -> Option<&mut T> { + let elem = self.arr[0..self.len].get_mut(idx)?; + // SAFETY: we already bailed if the index is bad, so our slicing will be infallible, + // so it is permissible to allow the len invariant to decay, as we always restore it + let mut last = mem::replace(elem, MaybeUninit::uninit()); + for elem in self.arr[0..=idx].iter_mut() { + // OPT(size): using `mem::swap` allows surprising size regressions + last = mem::replace(elem, last); + } + self.arr + .first_mut() + // SAFETY: we have restored the len invariant + .map(|elem| unsafe { elem.assume_init_mut() }) + } +} diff --git a/vendor/backtrace/src/symbolize/gimli/macho.rs b/vendor/backtrace/src/symbolize/gimli/macho.rs new file mode 100644 index 00000000..fcbe6098 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/macho.rs @@ -0,0 +1,318 @@ +use super::mystd::path::Path; +use super::{gimli, Context, Endian, EndianSlice, Mapping, Stash}; +use alloc::boxed::Box; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::convert::TryInto; +use object::macho; +use object::read::macho::{MachHeader, Nlist, Section, Segment as _}; +use object::{Bytes, NativeEndian}; + +#[cfg(target_pointer_width = "32")] +type Mach = object::macho::MachHeader32<NativeEndian>; +#[cfg(target_pointer_width = "64")] +type Mach = object::macho::MachHeader64<NativeEndian>; +type MachSegment = <Mach as MachHeader>::Segment; +type MachSection = <Mach as MachHeader>::Section; +type MachNlist = <Mach as MachHeader>::Nlist; + +impl Mapping { + // The loading path for macOS is so different we just have a completely + // different implementation of the function here. On macOS we need to go + // probing the filesystem for a bunch of files. + pub fn new(path: &Path) -> Option<Mapping> { + // First up we need to load the unique UUID which is stored in the macho + // header of the file we're reading, specified at `path`. + let map = super::mmap(path)?; + let (macho, data) = find_header(&map)?; + let endian = macho.endian().ok()?; + let uuid = macho.uuid(endian, data, 0).ok()?; + + // Next we need to look for a `*.dSYM` file. For now we just probe the + // containing directory and look around for something that matches + // `*.dSYM`. Once it's found we root through the dwarf resources that it + // contains and try to find a macho file which has a matching UUID as + // the one of our own file. If we find a match that's the dwarf file we + // want to return. + if let Some(uuid) = uuid { + if let Some(parent) = path.parent() { + if let Some(mapping) = Mapping::load_dsym(parent, uuid) { + return Some(mapping); + } + } + } + + // Looks like nothing matched our UUID, so let's at least return our own + // file. This should have the symbol table for at least some + // symbolication purposes. + Mapping::mk(map, |data, stash| { + let (macho, data) = find_header(data)?; + let endian = macho.endian().ok()?; + let obj = Object::parse(macho, endian, data)?; + Context::new(stash, obj, None, None) + }) + } + + fn load_dsym(dir: &Path, uuid: [u8; 16]) -> Option<Mapping> { + for entry in dir.read_dir().ok()? { + let entry = entry.ok()?; + let filename = match entry.file_name().into_string() { + Ok(name) => name, + Err(_) => continue, + }; + if !filename.ends_with(".dSYM") { + continue; + } + let candidates = entry.path().join("Contents/Resources/DWARF"); + if let Some(mapping) = Mapping::try_dsym_candidate(&candidates, uuid) { + return Some(mapping); + } + } + None + } + + fn try_dsym_candidate(dir: &Path, uuid: [u8; 16]) -> Option<Mapping> { + // Look for files in the `DWARF` directory which have a matching uuid to + // the original object file. If we find one then we found the debug + // information. + for entry in dir.read_dir().ok()? { + let entry = entry.ok()?; + let map = super::mmap(&entry.path())?; + let candidate = Mapping::mk(map, |data, stash| { + let (macho, data) = find_header(data)?; + let endian = macho.endian().ok()?; + let entry_uuid = macho.uuid(endian, data, 0).ok()??; + if entry_uuid != uuid { + return None; + } + let obj = Object::parse(macho, endian, data)?; + Context::new(stash, obj, None, None) + }); + if let Some(candidate) = candidate { + return Some(candidate); + } + } + + None + } +} + +fn find_header(data: &'_ [u8]) -> Option<(&'_ Mach, &'_ [u8])> { + use object::endian::BigEndian; + + let desired_cpu = || { + if cfg!(target_arch = "x86") { + Some(macho::CPU_TYPE_X86) + } else if cfg!(target_arch = "x86_64") { + Some(macho::CPU_TYPE_X86_64) + } else if cfg!(target_arch = "arm") { + Some(macho::CPU_TYPE_ARM) + } else if cfg!(target_arch = "aarch64") { + Some(macho::CPU_TYPE_ARM64) + } else { + None + } + }; + + let mut data = Bytes(data); + match data + .clone() + .read::<object::endian::U32<NativeEndian>>() + .ok()? + .get(NativeEndian) + { + macho::MH_MAGIC_64 | macho::MH_CIGAM_64 | macho::MH_MAGIC | macho::MH_CIGAM => {} + + macho::FAT_MAGIC | macho::FAT_CIGAM => { + let mut header_data = data; + let endian = BigEndian; + let header = header_data.read::<macho::FatHeader>().ok()?; + let nfat = header.nfat_arch.get(endian); + let arch = (0..nfat) + .filter_map(|_| header_data.read::<macho::FatArch32>().ok()) + .find(|arch| desired_cpu() == Some(arch.cputype.get(endian)))?; + let offset = arch.offset.get(endian); + let size = arch.size.get(endian); + data = data + .read_bytes_at(offset.try_into().ok()?, size.try_into().ok()?) + .ok()?; + } + + macho::FAT_MAGIC_64 | macho::FAT_CIGAM_64 => { + let mut header_data = data; + let endian = BigEndian; + let header = header_data.read::<macho::FatHeader>().ok()?; + let nfat = header.nfat_arch.get(endian); + let arch = (0..nfat) + .filter_map(|_| header_data.read::<macho::FatArch64>().ok()) + .find(|arch| desired_cpu() == Some(arch.cputype.get(endian)))?; + let offset = arch.offset.get(endian); + let size = arch.size.get(endian); + data = data + .read_bytes_at(offset.try_into().ok()?, size.try_into().ok()?) + .ok()?; + } + + _ => return None, + } + + Mach::parse(data.0, 0).ok().map(|h| (h, data.0)) +} + +// This is used both for executables/libraries and source object files. +pub struct Object<'a> { + endian: NativeEndian, + data: &'a [u8], + dwarf: Option<&'a [MachSection]>, + syms: Vec<(&'a [u8], u64)>, + syms_sort_by_name: bool, + // Only set for executables/libraries, and not the source object files. + object_map: Option<object::ObjectMap<'a>>, + // The outer Option is for lazy loading, and the inner Option allows load errors to be cached. + object_mappings: Box<[Option<Option<Mapping>>]>, +} + +impl<'a> Object<'a> { + fn parse(mach: &'a Mach, endian: NativeEndian, data: &'a [u8]) -> Option<Object<'a>> { + let is_object = mach.filetype(endian) == object::macho::MH_OBJECT; + let mut dwarf = None; + let mut syms = Vec::new(); + let mut syms_sort_by_name = false; + let mut commands = mach.load_commands(endian, data, 0).ok()?; + let mut object_map = None; + let mut object_mappings = Vec::new(); + while let Ok(Some(command)) = commands.next() { + if let Some((segment, section_data)) = MachSegment::from_command(command).ok()? { + // Object files should have all sections in a single unnamed segment load command. + if segment.name() == b"__DWARF" || (is_object && segment.name() == b"") { + dwarf = segment.sections(endian, section_data).ok(); + } + } else if let Some(symtab) = command.symtab().ok()? { + let symbols = symtab.symbols::<Mach, _>(endian, data).ok()?; + syms = symbols + .iter() + .filter_map(|nlist: &MachNlist| { + let name = nlist.name(endian, symbols.strings()).ok()?; + if name.len() > 0 && nlist.is_definition() { + Some((name, u64::from(nlist.n_value(endian)))) + } else { + None + } + }) + .collect(); + if is_object { + // We never search object file symbols by address. + // Instead, we already know the symbol name from the executable, and we + // need to search by name to find the matching symbol in the object file. + syms.sort_unstable_by_key(|(name, _)| *name); + syms_sort_by_name = true; + } else { + syms.sort_unstable_by_key(|(_, addr)| *addr); + let map = symbols.object_map(endian); + object_mappings.resize_with(map.objects().len(), || None); + object_map = Some(map); + } + } + } + + Some(Object { + endian, + data, + dwarf, + syms, + syms_sort_by_name, + object_map, + object_mappings: object_mappings.into_boxed_slice(), + }) + } + + pub fn section(&self, _: &Stash, name: &str) -> Option<&'a [u8]> { + let name = name.as_bytes(); + let dwarf = self.dwarf?; + let section = dwarf.into_iter().find(|section| { + let section_name = section.name(); + section_name == name || { + section_name.starts_with(b"__") + && name.starts_with(b".") + && §ion_name[2..] == &name[1..] + } + })?; + Some(section.data(self.endian, self.data).ok()?) + } + + pub fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> { + debug_assert!(!self.syms_sort_by_name); + let i = match self.syms.binary_search_by_key(&addr, |(_, addr)| *addr) { + Ok(i) => i, + Err(i) => i.checked_sub(1)?, + }; + let (sym, _addr) = self.syms.get(i)?; + Some(sym) + } + + /// Try to load a context for an object file. + /// + /// If dsymutil was not run, then the DWARF may be found in the source object files. + pub(super) fn search_object_map<'b>(&'b mut self, addr: u64) -> Option<(&'b Context<'b>, u64)> { + // `object_map` contains a map from addresses to symbols and object paths. + // Look up the address and get a mapping for the object. + let object_map = self.object_map.as_ref()?; + let symbol = object_map.get(addr)?; + let object_index = symbol.object_index(); + let mapping = self.object_mappings.get_mut(object_index)?; + if mapping.is_none() { + // No cached mapping, so create it. + *mapping = Some(object_mapping(object_map.objects().get(object_index)?)); + } + let cx: &'b Context<'static> = &mapping.as_ref()?.as_ref()?.cx; + // Don't leak the `'static` lifetime, make sure it's scoped to just ourselves. + let cx = unsafe { core::mem::transmute::<&'b Context<'static>, &'b Context<'b>>(cx) }; + + // We must translate the address in order to be able to look it up + // in the DWARF in the object file. + debug_assert!(cx.object.syms.is_empty() || cx.object.syms_sort_by_name); + let i = cx + .object + .syms + .binary_search_by_key(&symbol.name(), |(name, _)| *name) + .ok()?; + let object_symbol = cx.object.syms.get(i)?; + let object_addr = addr + .wrapping_sub(symbol.address()) + .wrapping_add(object_symbol.1); + Some((cx, object_addr)) + } +} + +fn object_mapping(file: &object::read::ObjectMapFile<'_>) -> Option<Mapping> { + use super::mystd::ffi::OsStr; + use super::mystd::os::unix::prelude::*; + + let map = super::mmap(Path::new(OsStr::from_bytes(file.path())))?; + let member_name = file.member(); + Mapping::mk(map, |data, stash| { + let data = match member_name { + Some(member_name) => { + let archive = object::read::archive::ArchiveFile::parse(data).ok()?; + let member = archive + .members() + .filter_map(Result::ok) + .find(|m| m.name() == member_name)?; + member.data(data).ok()? + } + None => data, + }; + let (macho, data) = find_header(data)?; + let endian = macho.endian().ok()?; + let obj = Object::parse(macho, endian, data)?; + Context::new(stash, obj, None, None) + }) +} + +pub(super) fn handle_split_dwarf<'data>( + _package: Option<&gimli::DwarfPackage<EndianSlice<'data, Endian>>>, + _stash: &'data Stash, + _load: addr2line::SplitDwarfLoad<EndianSlice<'data, Endian>>, +) -> Option<Arc<gimli::Dwarf<EndianSlice<'data, Endian>>>> { + None +} diff --git a/vendor/backtrace/src/symbolize/gimli/mmap_fake.rs b/vendor/backtrace/src/symbolize/gimli/mmap_fake.rs new file mode 100644 index 00000000..71697fc3 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/mmap_fake.rs @@ -0,0 +1,27 @@ +use super::mystd::io::{Read, Seek, SeekFrom}; +use super::File; +use alloc::vec::Vec; +use core::ops::Deref; + +pub struct Mmap { + vec: Vec<u8>, +} + +impl Mmap { + pub unsafe fn map(mut file: &File, len: usize, offset: u64) -> Option<Mmap> { + let mut mmap = Mmap { + vec: Vec::with_capacity(len), + }; + file.seek(SeekFrom::Start(offset)); + file.read_to_end(&mut mmap.vec).ok()?; + Some(mmap) + } +} + +impl Deref for Mmap { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + &self.vec[..] + } +} diff --git a/vendor/backtrace/src/symbolize/gimli/mmap_unix.rs b/vendor/backtrace/src/symbolize/gimli/mmap_unix.rs new file mode 100644 index 00000000..24ebeb3c --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/mmap_unix.rs @@ -0,0 +1,51 @@ +use super::mystd::fs::File; +use super::mystd::os::unix::prelude::*; +use core::ops::Deref; +use core::ptr; +use core::slice; + +#[cfg(not(all(target_os = "linux", target_env = "gnu")))] +use libc::mmap as mmap64; +#[cfg(all(target_os = "linux", target_env = "gnu"))] +use libc::mmap64; + +pub struct Mmap { + ptr: *mut libc::c_void, + len: usize, +} + +impl Mmap { + pub unsafe fn map(file: &File, len: usize, offset: u64) -> Option<Mmap> { + let ptr = unsafe { + mmap64( + ptr::null_mut(), + len, + libc::PROT_READ, + libc::MAP_PRIVATE, + file.as_raw_fd(), + offset.try_into().ok()?, + ) + }; + if ptr == libc::MAP_FAILED { + return None; + } + Some(Mmap { ptr, len }) + } +} + +impl Deref for Mmap { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.ptr as *const u8, self.len) } + } +} + +impl Drop for Mmap { + fn drop(&mut self) { + unsafe { + let r = libc::munmap(self.ptr, self.len); + debug_assert_eq!(r, 0); + } + } +} diff --git a/vendor/backtrace/src/symbolize/gimli/mmap_windows.rs b/vendor/backtrace/src/symbolize/gimli/mmap_windows.rs new file mode 100644 index 00000000..d3c02723 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/mmap_windows.rs @@ -0,0 +1,67 @@ +use super::super::super::windows_sys::*; + +use super::mystd::fs::File; +use super::mystd::os::windows::prelude::*; +use core::ffi::c_void; +use core::ops::Deref; +use core::ptr; +use core::slice; + +pub struct Mmap { + // keep the file alive to prevent it from being deleted which would cause + // us to read bad data. + _file: File, + ptr: *mut c_void, + len: usize, +} + +impl Mmap { + pub unsafe fn map(file: &File, len: usize, offset: u64) -> Option<Mmap> { + unsafe { + let file = file.try_clone().ok()?; + let mapping = CreateFileMappingA( + file.as_raw_handle(), + ptr::null_mut(), + PAGE_READONLY, + 0, + 0, + ptr::null(), + ); + if mapping.is_null() { + return None; + } + let ptr = MapViewOfFile( + mapping, + FILE_MAP_READ, + (offset >> 32) as u32, + offset as u32, + len, + ); + CloseHandle(mapping); + if ptr.Value.is_null() { + return None; + } + Some(Mmap { + _file: file, + ptr: ptr.Value, + len, + }) + } + } +} +impl Deref for Mmap { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.ptr.cast_const().cast::<u8>(), self.len) } + } +} + +impl Drop for Mmap { + fn drop(&mut self) { + unsafe { + let r = UnmapViewOfFile(MEMORY_MAPPED_VIEW_ADDRESS { Value: self.ptr }); + debug_assert!(r != 0); + } + } +} diff --git a/vendor/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs b/vendor/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs new file mode 100644 index 00000000..5803d5dc --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/parse_running_mmaps_unix.rs @@ -0,0 +1,304 @@ +// Note: This file is only currently used on targets that call out to the code +// in `mod libs_dl_iterate_phdr` (e.g. linux, freebsd, ...); it may be more +// general purpose, but it hasn't been tested elsewhere. + +use super::mystd::ffi::OsString; +use super::mystd::fs::File; +use super::mystd::io::Read; +use alloc::string::String; +use alloc::vec::Vec; +use core::str::FromStr; + +#[derive(PartialEq, Eq, Debug)] +pub(super) struct MapsEntry { + /// start (inclusive) and limit (exclusive) of address range. + address: (usize, usize), + /// The perms field are the permissions for the entry + /// + /// r = read + /// w = write + /// x = execute + /// s = shared + /// p = private (copy on write) + perms: [char; 4], + /// Offset into the file (or "whatever"). + offset: u64, + /// device (major, minor) + dev: (usize, usize), + /// inode on the device. 0 indicates that no inode is associated with the memory region (e.g. uninitalized data aka BSS). + inode: usize, + /// Usually the file backing the mapping. + /// + /// Note: The man page for proc includes a note about "coordination" by + /// using readelf to see the Offset field in ELF program headers. pnkfelix + /// is not yet sure if that is intended to be a comment on pathname, or what + /// form/purpose such coordination is meant to have. + /// + /// There are also some pseudo-paths: + /// "[stack]": The initial process's (aka main thread's) stack. + /// "[stack:<tid>]": a specific thread's stack. (This was only present for a limited range of Linux verisons; it was determined to be too expensive to provide.) + /// "[vdso]": Virtual dynamically linked shared object + /// "[heap]": The process's heap + /// + /// The pathname can be blank, which means it is an anonymous mapping + /// obtained via mmap. + /// + /// Newlines in pathname are replaced with an octal escape sequence. + /// + /// The pathname may have "(deleted)" appended onto it if the file-backed + /// path has been deleted. + /// + /// Note that modifications like the latter two indicated above imply that + /// in general the pathname may be ambiguous. (I.e. you cannot tell if the + /// denoted filename actually ended with the text "(deleted)", or if that + /// was added by the maps rendering. + pathname: OsString, +} + +pub(super) fn parse_maps() -> Result<Vec<MapsEntry>, &'static str> { + let mut v = Vec::new(); + let mut proc_self_maps = + File::open("/proc/self/maps").map_err(|_| "Couldn't open /proc/self/maps")?; + let mut buf = String::new(); + let _bytes_read = proc_self_maps + .read_to_string(&mut buf) + .map_err(|_| "Couldn't read /proc/self/maps")?; + for line in buf.lines() { + v.push(line.parse()?); + } + + Ok(v) +} + +impl MapsEntry { + pub(super) fn pathname(&self) -> &OsString { + &self.pathname + } + + pub(super) fn ip_matches(&self, ip: usize) -> bool { + self.address.0 <= ip && ip < self.address.1 + } + + #[cfg(target_os = "android")] + pub(super) fn offset(&self) -> u64 { + self.offset + } +} + +impl FromStr for MapsEntry { + type Err = &'static str; + + // Format: address perms offset dev inode pathname + // e.g.: "ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]" + // e.g.: "7f5985f46000-7f5985f48000 rw-p 00039000 103:06 76021795 /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2" + // e.g.: "35b1a21000-35b1a22000 rw-p 00000000 00:00 0" + // + // Note that paths may contain spaces, so we can't use `str::split` for parsing (until + // Split::remainder is stabilized #77998). + fn from_str(s: &str) -> Result<Self, Self::Err> { + let (range_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); + if range_str.is_empty() { + return Err("Couldn't find address"); + } + + let (perms_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); + if perms_str.is_empty() { + return Err("Couldn't find permissions"); + } + + let (offset_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); + if offset_str.is_empty() { + return Err("Couldn't find offset"); + } + + let (dev_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); + if dev_str.is_empty() { + return Err("Couldn't find dev"); + } + + let (inode_str, s) = s.trim_start().split_once(' ').unwrap_or((s, "")); + if inode_str.is_empty() { + return Err("Couldn't find inode"); + } + + // Pathname may be omitted in which case it will be empty + let pathname_str = s.trim_start(); + + let hex = |s| usize::from_str_radix(s, 16).map_err(|_| "Couldn't parse hex number"); + let hex64 = |s| u64::from_str_radix(s, 16).map_err(|_| "Couldn't parse hex number"); + + let address = if let Some((start, limit)) = range_str.split_once('-') { + (hex(start)?, hex(limit)?) + } else { + return Err("Couldn't parse address range"); + }; + let perms: [char; 4] = { + let mut chars = perms_str.chars(); + let mut c = || chars.next().ok_or("insufficient perms"); + let perms = [c()?, c()?, c()?, c()?]; + if chars.next().is_some() { + return Err("too many perms"); + } + perms + }; + let offset = hex64(offset_str)?; + let dev = if let Some((major, minor)) = dev_str.split_once(':') { + (hex(major)?, hex(minor)?) + } else { + return Err("Couldn't parse dev"); + }; + let inode = hex(inode_str)?; + let pathname = pathname_str.into(); + + Ok(MapsEntry { + address, + perms, + offset, + dev, + inode, + pathname, + }) + } +} + +// Make sure we can parse 64-bit sample output if we're on a 64-bit target. +#[cfg(target_pointer_width = "64")] +#[test] +fn check_maps_entry_parsing_64bit() { + assert_eq!( + "ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 \ + [vsyscall]" + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0xffffffffff600000, 0xffffffffff601000), + perms: ['-', '-', 'x', 'p'], + offset: 0x00000000, + dev: (0x00, 0x00), + inode: 0x0, + pathname: "[vsyscall]".into(), + } + ); + + assert_eq!( + "7f5985f46000-7f5985f48000 rw-p 00039000 103:06 76021795 \ + /usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2" + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0x7f5985f46000, 0x7f5985f48000), + perms: ['r', 'w', '-', 'p'], + offset: 0x00039000, + dev: (0x103, 0x06), + inode: 0x76021795, + pathname: "/usr/lib/x86_64-linux-gnu/ld-linux-x86-64.so.2".into(), + } + ); + assert_eq!( + "35b1a21000-35b1a22000 rw-p 00000000 00:00 0" + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0x35b1a21000, 0x35b1a22000), + perms: ['r', 'w', '-', 'p'], + offset: 0x00000000, + dev: (0x00, 0x00), + inode: 0x0, + pathname: Default::default(), + } + ); +} + +// (This output was taken from a 32-bit machine, but will work on any target) +#[test] +fn check_maps_entry_parsing_32bit() { + /* Example snippet of output: + 08056000-08077000 rw-p 00000000 00:00 0 [heap] + b7c79000-b7e02000 r--p 00000000 08:01 60662705 /usr/lib/locale/locale-archive + b7e02000-b7e03000 rw-p 00000000 00:00 0 + */ + assert_eq!( + "08056000-08077000 rw-p 00000000 00:00 0 \ + [heap]" + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0x08056000, 0x08077000), + perms: ['r', 'w', '-', 'p'], + offset: 0x00000000, + dev: (0x00, 0x00), + inode: 0x0, + pathname: "[heap]".into(), + } + ); + + assert_eq!( + "b7c79000-b7e02000 r--p 00000000 08:01 60662705 \ + /usr/lib/locale/locale-archive" + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0xb7c79000, 0xb7e02000), + perms: ['r', '-', '-', 'p'], + offset: 0x00000000, + dev: (0x08, 0x01), + inode: 0x60662705, + pathname: "/usr/lib/locale/locale-archive".into(), + } + ); + assert_eq!( + "b7e02000-b7e03000 rw-p 00000000 00:00 0" + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0xb7e02000, 0xb7e03000), + perms: ['r', 'w', '-', 'p'], + offset: 0x00000000, + dev: (0x00, 0x00), + inode: 0x0, + pathname: Default::default(), + } + ); + assert_eq!( + "b7c79000-b7e02000 r--p 00000000 08:01 60662705 \ + /executable/path/with some spaces" + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0xb7c79000, 0xb7e02000), + perms: ['r', '-', '-', 'p'], + offset: 0x00000000, + dev: (0x08, 0x01), + inode: 0x60662705, + pathname: "/executable/path/with some spaces".into(), + } + ); + assert_eq!( + "b7c79000-b7e02000 r--p 00000000 08:01 60662705 \ + /executable/path/with multiple-continuous spaces " + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0xb7c79000, 0xb7e02000), + perms: ['r', '-', '-', 'p'], + offset: 0x00000000, + dev: (0x08, 0x01), + inode: 0x60662705, + pathname: "/executable/path/with multiple-continuous spaces ".into(), + } + ); + assert_eq!( + " b7c79000-b7e02000 r--p 00000000 08:01 60662705 \ + /executable/path/starts-with-spaces" + .parse::<MapsEntry>() + .unwrap(), + MapsEntry { + address: (0xb7c79000, 0xb7e02000), + perms: ['r', '-', '-', 'p'], + offset: 0x00000000, + dev: (0x08, 0x01), + inode: 0x60662705, + pathname: "/executable/path/starts-with-spaces".into(), + } + ); +} diff --git a/vendor/backtrace/src/symbolize/gimli/stash.rs b/vendor/backtrace/src/symbolize/gimli/stash.rs new file mode 100644 index 00000000..5ec06e24 --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/stash.rs @@ -0,0 +1,51 @@ +#![allow(clippy::all)] +// only used on Linux right now, so allow dead code elsewhere +#![cfg_attr(not(target_os = "linux"), allow(dead_code))] + +use super::Mmap; +use alloc::vec; +use alloc::vec::Vec; +use core::cell::UnsafeCell; + +/// A simple arena allocator for byte buffers. +pub struct Stash { + buffers: UnsafeCell<Vec<Vec<u8>>>, + mmaps: UnsafeCell<Vec<Mmap>>, +} + +impl Stash { + pub fn new() -> Stash { + Stash { + buffers: UnsafeCell::new(Vec::new()), + mmaps: UnsafeCell::new(Vec::new()), + } + } + + /// Allocates a buffer of the specified size and returns a mutable reference + /// to it. + pub fn allocate(&self, size: usize) -> &mut [u8] { + // SAFETY: this is the only function that ever constructs a mutable + // reference to `self.buffers`. + let buffers = unsafe { &mut *self.buffers.get() }; + let i = buffers.len(); + buffers.push(vec![0; size]); + // SAFETY: we never remove elements from `self.buffers`, so a reference + // to the data inside any buffer will live as long as `self` does. + &mut buffers[i] + } + + /// Stores a `Mmap` for the lifetime of this `Stash`, returning a pointer + /// which is scoped to just this lifetime. + pub fn cache_mmap(&self, map: Mmap) -> &[u8] { + // SAFETY: this is the only location for a mutable pointer to + // `mmaps`, and this structure isn't threadsafe to shared across + // threads either. We also never remove elements from `self.mmaps`, + // so a reference to the data inside the map will live as long as + // `self` does. + unsafe { + let mmaps = &mut *self.mmaps.get(); + mmaps.push(map); + mmaps.last().unwrap() + } + } +} diff --git a/vendor/backtrace/src/symbolize/gimli/xcoff.rs b/vendor/backtrace/src/symbolize/gimli/xcoff.rs new file mode 100644 index 00000000..18c61cbd --- /dev/null +++ b/vendor/backtrace/src/symbolize/gimli/xcoff.rs @@ -0,0 +1,188 @@ +use super::mystd::ffi::OsStr; +use super::mystd::os::unix::ffi::OsStrExt; +use super::mystd::path::Path; +use super::{gimli, Context, Endian, EndianSlice, Mapping, Stash}; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::ops::Deref; +use core::str; +use object::read::archive::ArchiveFile; +use object::read::xcoff::{FileHeader, SectionHeader, XcoffFile, XcoffSymbol}; +use object::Object as _; +use object::ObjectSection as _; +use object::ObjectSymbol as _; +use object::SymbolFlags; + +#[cfg(target_pointer_width = "32")] +type Xcoff = object::xcoff::FileHeader32; +#[cfg(target_pointer_width = "64")] +type Xcoff = object::xcoff::FileHeader64; + +impl Mapping { + pub fn new(path: &Path, member_name: &OsStr) -> Option<Mapping> { + let map = super::mmap(path)?; + Mapping::mk(map, |data, stash| { + if member_name.is_empty() { + Context::new(stash, Object::parse(data)?, None, None) + } else { + let archive = ArchiveFile::parse(data).ok()?; + for member in archive + .members() + .filter_map(|m| m.ok()) + .filter(|m| OsStr::from_bytes(m.name()) == member_name) + { + let member_data = member.data(data).ok()?; + if let Some(obj) = Object::parse(member_data) { + return Context::new(stash, obj, None, None); + } + } + None + } + }) + } +} + +struct ParsedSym<'a> { + address: u64, + size: u64, + name: &'a str, +} + +pub struct Object<'a> { + syms: Vec<ParsedSym<'a>>, + file: XcoffFile<'a, Xcoff>, +} + +pub struct Image { + pub offset: usize, + pub base: u64, + pub size: usize, +} + +pub fn parse_xcoff(data: &[u8]) -> Option<Image> { + let mut offset = 0; + let header = Xcoff::parse(data, &mut offset).ok()?; + let _ = header.aux_header(data, &mut offset).ok()?; + let sections = header.sections(data, &mut offset).ok()?; + if let Some(section) = sections.iter().find(|s| { + if let Ok(name) = str::from_utf8(&s.s_name()[0..5]) { + name == ".text" + } else { + false + } + }) { + Some(Image { + offset: section.s_scnptr() as usize, + base: section.s_paddr() as u64, + size: section.s_size() as usize, + }) + } else { + None + } +} + +pub fn parse_image(path: &Path, member_name: &OsStr) -> Option<Image> { + let map = super::mmap(path)?; + let data = map.deref(); + if member_name.is_empty() { + return parse_xcoff(data); + } else { + let archive = ArchiveFile::parse(data).ok()?; + for member in archive + .members() + .filter_map(|m| m.ok()) + .filter(|m| OsStr::from_bytes(m.name()) == member_name) + { + let member_data = member.data(data).ok()?; + if let Some(image) = parse_xcoff(member_data) { + return Some(image); + } + } + None + } +} + +impl<'a> Object<'a> { + fn get_concrete_size(file: &XcoffFile<'a, Xcoff>, sym: &XcoffSymbol<'a, '_, Xcoff>) -> u64 { + match sym.flags() { + SymbolFlags::Xcoff { + n_sclass: _, + x_smtyp: _, + x_smclas: _, + containing_csect: Some(index), + } => { + if let Ok(tgt_sym) = file.symbol_by_index(index) { + Self::get_concrete_size(file, &tgt_sym) + } else { + 0 + } + } + _ => sym.size(), + } + } + + fn parse(data: &'a [u8]) -> Option<Object<'a>> { + let file = XcoffFile::parse(data).ok()?; + let mut syms = file + .symbols() + .filter_map(|sym| { + let name = sym.name().map_or("", |v| v); + let address = sym.address(); + let size = Self::get_concrete_size(&file, &sym); + if name == ".text" || name == ".data" { + // We don't want to include ".text" and ".data" symbols. + // If they are included, since their ranges cover other + // symbols, when searching a symbol for a given address, + // ".text" or ".data" is returned. That's not what we expect. + None + } else { + Some(ParsedSym { + address, + size, + name, + }) + } + }) + .collect::<Vec<_>>(); + syms.sort_by_key(|s| s.address); + Some(Object { syms, file }) + } + + pub fn section(&self, _: &Stash, name: &str) -> Option<&'a [u8]> { + Some(self.file.section_by_name(name)?.data().ok()?) + } + + pub fn search_symtab<'b>(&'b self, addr: u64) -> Option<&'b [u8]> { + // Symbols, except ".text" and ".data", are sorted and are not overlapped each other, + // so we can just perform a binary search here. + let i = match self.syms.binary_search_by_key(&addr, |sym| sym.address) { + Ok(i) => i, + Err(i) => i.checked_sub(1)?, + }; + let sym = self.syms.get(i)?; + if (sym.address..sym.address + sym.size).contains(&addr) { + // On AIX, for a function call, for example, `foo()`, we have + // two symbols `foo` and `.foo`. `foo` references the function + // descriptor and `.foo` references the function entry. + // See https://www.ibm.com/docs/en/xl-fortran-aix/16.1.0?topic=calls-linkage-convention-function + // for more information. + // We trim the prefix `.` here, so that the rust demangler can work + // properly. + Some(sym.name.trim_start_matches(".").as_bytes()) + } else { + None + } + } + + pub(super) fn search_object_map(&self, _addr: u64) -> Option<(&Context<'_>, u64)> { + None + } +} + +pub(super) fn handle_split_dwarf<'data>( + _package: Option<&gimli::DwarfPackage<EndianSlice<'data, Endian>>>, + _stash: &'data Stash, + _load: addr2line::SplitDwarfLoad<EndianSlice<'data, Endian>>, +) -> Option<Arc<gimli::Dwarf<EndianSlice<'data, Endian>>>> { + None +} diff --git a/vendor/backtrace/src/symbolize/miri.rs b/vendor/backtrace/src/symbolize/miri.rs new file mode 100644 index 00000000..5b0dc308 --- /dev/null +++ b/vendor/backtrace/src/symbolize/miri.rs @@ -0,0 +1,56 @@ +use core::ffi::c_void; +use core::marker::PhantomData; + +use super::super::backtrace::miri::{resolve_addr, Frame}; +use super::BytesOrWideString; +use super::{ResolveWhat, SymbolName}; + +pub unsafe fn resolve(what: ResolveWhat<'_>, cb: &mut dyn FnMut(&super::Symbol)) { + let sym = match what { + ResolveWhat::Address(addr) => Symbol { + inner: resolve_addr(addr), + _unused: PhantomData, + }, + ResolveWhat::Frame(frame) => Symbol { + inner: frame.inner.clone(), + _unused: PhantomData, + }, + }; + cb(&super::Symbol { inner: sym }) +} + +pub struct Symbol<'a> { + inner: Frame, + _unused: PhantomData<&'a ()>, +} + +impl<'a> Symbol<'a> { + pub fn name(&self) -> Option<SymbolName<'_>> { + Some(SymbolName::new(&self.inner.inner.name)) + } + + pub fn addr(&self) -> Option<*mut c_void> { + Some(self.inner.addr) + } + + pub fn filename_raw(&self) -> Option<BytesOrWideString<'_>> { + Some(BytesOrWideString::Bytes(&self.inner.inner.filename)) + } + + pub fn lineno(&self) -> Option<u32> { + Some(self.inner.inner.lineno) + } + + pub fn colno(&self) -> Option<u32> { + Some(self.inner.inner.colno) + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&std::path::Path> { + Some(std::path::Path::new( + core::str::from_utf8(&self.inner.inner.filename).unwrap(), + )) + } +} + +pub unsafe fn clear_symbol_cache() {} diff --git a/vendor/backtrace/src/symbolize/mod.rs b/vendor/backtrace/src/symbolize/mod.rs new file mode 100644 index 00000000..137d2ea3 --- /dev/null +++ b/vendor/backtrace/src/symbolize/mod.rs @@ -0,0 +1,450 @@ +use core::{fmt, str}; + +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + use std::path::Path; + use std::prelude::v1::*; + } +} + +use super::backtrace::Frame; +use super::types::BytesOrWideString; +use core::ffi::c_void; +use rustc_demangle::{try_demangle, Demangle}; + +/// Resolve an address to a symbol, passing the symbol to the specified +/// closure. +/// +/// This function will look up the given address in areas such as the local +/// symbol table, dynamic symbol table, or DWARF debug info (depending on the +/// activated implementation) to find symbols to yield. +/// +/// The closure may not be called if resolution could not be performed, and it +/// also may be called more than once in the case of inlined functions. +/// +/// Symbols yielded represent the execution at the specified `addr`, returning +/// file/line pairs for that address (if available). +/// +/// Note that if you have a `Frame` then it's recommended to use the +/// `resolve_frame` function instead of this one. +/// +/// # Required features +/// +/// This function requires the `std` feature of the `backtrace` crate to be +/// enabled, and the `std` feature is enabled by default. +/// +/// # Panics +/// +/// This function strives to never panic, but if the `cb` provided panics then +/// some platforms will force a double panic to abort the process. Some +/// platforms use a C library which internally uses callbacks which cannot be +/// unwound through, so panicking from `cb` may trigger a process abort. +/// +/// # Example +/// +/// ``` +/// extern crate backtrace; +/// +/// fn main() { +/// backtrace::trace(|frame| { +/// let ip = frame.ip(); +/// +/// backtrace::resolve(ip, |symbol| { +/// // ... +/// }); +/// +/// false // only look at the top frame +/// }); +/// } +/// ``` +#[cfg(feature = "std")] +pub fn resolve<F: FnMut(&Symbol)>(addr: *mut c_void, cb: F) { + let _guard = crate::lock::lock(); + unsafe { resolve_unsynchronized(addr, cb) } +} + +/// Resolve a previously captured frame to a symbol, passing the symbol to the +/// specified closure. +/// +/// This function performs the same function as `resolve` except that it takes a +/// `Frame` as an argument instead of an address. This can allow some platform +/// implementations of backtracing to provide more accurate symbol information +/// or information about inline frames for example. It's recommended to use this +/// if you can. +/// +/// # Required features +/// +/// This function requires the `std` feature of the `backtrace` crate to be +/// enabled, and the `std` feature is enabled by default. +/// +/// # Panics +/// +/// This function strives to never panic, but if the `cb` provided panics then +/// some platforms will force a double panic to abort the process. Some +/// platforms use a C library which internally uses callbacks which cannot be +/// unwound through, so panicking from `cb` may trigger a process abort. +/// +/// # Example +/// +/// ``` +/// extern crate backtrace; +/// +/// fn main() { +/// backtrace::trace(|frame| { +/// backtrace::resolve_frame(frame, |symbol| { +/// // ... +/// }); +/// +/// false // only look at the top frame +/// }); +/// } +/// ``` +#[cfg(feature = "std")] +pub fn resolve_frame<F: FnMut(&Symbol)>(frame: &Frame, cb: F) { + let _guard = crate::lock::lock(); + unsafe { resolve_frame_unsynchronized(frame, cb) } +} + +pub enum ResolveWhat<'a> { + Address(*mut c_void), + Frame(&'a Frame), +} + +impl<'a> ResolveWhat<'a> { + #[allow(dead_code)] + fn address_or_ip(&self) -> *mut c_void { + match self { + ResolveWhat::Address(a) => adjust_ip(*a), + ResolveWhat::Frame(f) => adjust_ip(f.ip()), + } + } +} + +// IP values from stack frames are typically (always?) the instruction +// *after* the call that's the actual stack trace. Symbolizing this on +// causes the filename/line number to be one ahead and perhaps into +// the void if it's near the end of the function. +// +// This appears to basically always be the case on all platforms, so we always +// subtract one from a resolved ip to resolve it to the previous call +// instruction instead of the instruction being returned to. +// +// Ideally we would not do this. Ideally we would require callers of the +// `resolve` APIs here to manually do the -1 and account that they want location +// information for the *previous* instruction, not the current. Ideally we'd +// also expose on `Frame` if we are indeed the address of the next instruction +// or the current. +// +// For now though this is a pretty niche concern so we just internally always +// subtract one. Consumers should keep working and getting pretty good results, +// so we should be good enough. +fn adjust_ip(a: *mut c_void) -> *mut c_void { + if a.is_null() { + a + } else { + (a as usize - 1) as *mut c_void + } +} + +/// Same as `resolve`, only unsafe as it's unsynchronized. +/// +/// This function does not have synchronization guarantees but is available when +/// the `std` feature of this crate isn't compiled in. See the `resolve` +/// function for more documentation and examples. +/// +/// # Panics +/// +/// See information on `resolve` for caveats on `cb` panicking. +pub unsafe fn resolve_unsynchronized<F>(addr: *mut c_void, mut cb: F) +where + F: FnMut(&Symbol), +{ + unsafe { imp::resolve(ResolveWhat::Address(addr), &mut cb) } +} + +/// Same as `resolve_frame`, only unsafe as it's unsynchronized. +/// +/// This function does not have synchronization guarantees but is available +/// when the `std` feature of this crate isn't compiled in. See the +/// `resolve_frame` function for more documentation and examples. +/// +/// # Panics +/// +/// See information on `resolve_frame` for caveats on `cb` panicking. +pub unsafe fn resolve_frame_unsynchronized<F>(frame: &Frame, mut cb: F) +where + F: FnMut(&Symbol), +{ + unsafe { imp::resolve(ResolveWhat::Frame(frame), &mut cb) } +} + +/// A trait representing the resolution of a symbol in a file. +/// +/// This trait is yielded as a trait object to the closure given to the +/// `backtrace::resolve` function, and it is virtually dispatched as it's +/// unknown which implementation is behind it. +/// +/// A symbol can give contextual information about a function, for example the +/// name, filename, line number, precise address, etc. Not all information is +/// always available in a symbol, however, so all methods return an `Option`. +pub struct Symbol { + // TODO: this lifetime bound needs to be persisted eventually to `Symbol`, + // but that's currently a breaking change. For now this is safe since + // `Symbol` is only ever handed out by reference and can't be cloned. + inner: imp::Symbol<'static>, +} + +impl Symbol { + /// Returns the name of this function. + /// + /// The returned structure can be used to query various properties about the + /// symbol name: + /// + /// * The `Display` implementation will print out the demangled symbol. + /// * The raw `str` value of the symbol can be accessed (if it's valid + /// utf-8). + /// * The raw bytes for the symbol name can be accessed. + pub fn name(&self) -> Option<SymbolName<'_>> { + self.inner.name() + } + + /// Returns the starting address of this function. + pub fn addr(&self) -> Option<*mut c_void> { + self.inner.addr() + } + + /// Returns the raw filename as a slice. This is mainly useful for `no_std` + /// environments. + pub fn filename_raw(&self) -> Option<BytesOrWideString<'_>> { + self.inner.filename_raw() + } + + /// Returns the column number for where this symbol is currently executing. + /// + /// Only gimli currently provides a value here and even then only if `filename` + /// returns `Some`, and so it is then consequently subject to similar caveats. + pub fn colno(&self) -> Option<u32> { + self.inner.colno() + } + + /// Returns the line number for where this symbol is currently executing. + /// + /// This return value is typically `Some` if `filename` returns `Some`, and + /// is consequently subject to similar caveats. + pub fn lineno(&self) -> Option<u32> { + self.inner.lineno() + } + + /// Returns the file name where this function was defined. + /// + /// This is currently only available when libbacktrace or gimli is being + /// used (e.g. unix platforms other) and when a binary is compiled with + /// debuginfo. If neither of these conditions is met then this will likely + /// return `None`. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + #[cfg(feature = "std")] + #[allow(unreachable_code)] + pub fn filename(&self) -> Option<&Path> { + self.inner.filename() + } +} + +impl fmt::Debug for Symbol { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut d = f.debug_struct("Symbol"); + if let Some(name) = self.name() { + d.field("name", &name); + } + if let Some(addr) = self.addr() { + d.field("addr", &addr); + } + + #[cfg(feature = "std")] + { + if let Some(filename) = self.filename() { + d.field("filename", &filename); + } + } + + if let Some(lineno) = self.lineno() { + d.field("lineno", &lineno); + } + d.finish() + } +} + +cfg_if::cfg_if! { + if #[cfg(feature = "cpp_demangle")] { + // Maybe a parsed C++ symbol, if parsing the mangled symbol as Rust + // failed. + struct OptionCppSymbol<'a>(Option<::cpp_demangle::BorrowedSymbol<'a>>); + + impl<'a> OptionCppSymbol<'a> { + fn parse(input: &'a [u8]) -> OptionCppSymbol<'a> { + OptionCppSymbol(::cpp_demangle::BorrowedSymbol::new(input).ok()) + } + + fn none() -> OptionCppSymbol<'a> { + OptionCppSymbol(None) + } + } + } +} + +/// A wrapper around a symbol name to provide ergonomic accessors to the +/// demangled name, the raw bytes, the raw string, etc. +pub struct SymbolName<'a> { + bytes: &'a [u8], + demangled: Option<Demangle<'a>>, + #[cfg(feature = "cpp_demangle")] + cpp_demangled: OptionCppSymbol<'a>, +} + +impl<'a> SymbolName<'a> { + /// Creates a new symbol name from the raw underlying bytes. + pub fn new(bytes: &'a [u8]) -> SymbolName<'a> { + let str_bytes = str::from_utf8(bytes).ok(); + let demangled = str_bytes.and_then(|s| try_demangle(s).ok()); + + #[cfg(feature = "cpp_demangle")] + let cpp = if demangled.is_none() { + OptionCppSymbol::parse(bytes) + } else { + OptionCppSymbol::none() + }; + + SymbolName { + bytes, + demangled, + #[cfg(feature = "cpp_demangle")] + cpp_demangled: cpp, + } + } + + /// Returns the raw (mangled) symbol name as a `str` if the symbol is valid utf-8. + /// + /// Use the `Display` implementation if you want the demangled version. + pub fn as_str(&self) -> Option<&'a str> { + self.demangled + .as_ref() + .map(|s| s.as_str()) + .or_else(|| str::from_utf8(self.bytes).ok()) + } + + /// Returns the raw symbol name as a list of bytes + pub fn as_bytes(&self) -> &'a [u8] { + self.bytes + } +} + +fn format_symbol_name( + fmt: fn(&str, &mut fmt::Formatter<'_>) -> fmt::Result, + mut bytes: &[u8], + f: &mut fmt::Formatter<'_>, +) -> fmt::Result { + while bytes.len() > 0 { + match str::from_utf8(bytes) { + Ok(name) => { + fmt(name, f)?; + break; + } + Err(err) => { + fmt("\u{FFFD}", f)?; + + match err.error_len() { + Some(len) => bytes = &bytes[err.valid_up_to() + len..], + None => break, + } + } + } + } + Ok(()) +} + +impl<'a> fmt::Display for SymbolName<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(ref s) = self.demangled { + return s.fmt(f); + } + + #[cfg(feature = "cpp_demangle")] + { + if let Some(ref cpp) = self.cpp_demangled.0 { + return cpp.fmt(f); + } + } + + format_symbol_name(fmt::Display::fmt, self.bytes, f) + } +} + +impl<'a> fmt::Debug for SymbolName<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(ref s) = self.demangled { + return s.fmt(f); + } + + #[cfg(all(feature = "std", feature = "cpp_demangle"))] + { + use std::fmt::Write; + + // This may to print if the demangled symbol isn't actually + // valid, so handle the error here gracefully by not propagating + // it outwards. + if let Some(ref cpp) = self.cpp_demangled.0 { + let mut s = String::new(); + if write!(s, "{cpp}").is_ok() { + return s.fmt(f); + } + } + } + + format_symbol_name(fmt::Debug::fmt, self.bytes, f) + } +} + +/// Attempt to reclaim that cached memory used to symbolicate addresses. +/// +/// This method will attempt to release any global data structures that have +/// otherwise been cached globally or in the thread which typically represent +/// parsed DWARF information or similar. +/// +/// # Caveats +/// +/// While this function is always available it doesn't actually do anything on +/// most implementations. Libraries like dbghelp or libbacktrace do not provide +/// facilities to deallocate state and manage the allocated memory. For now the +/// `std` feature of this crate is the only feature where this +/// function has any effect. +#[cfg(feature = "std")] +pub fn clear_symbol_cache() { + let _guard = crate::lock::lock(); + unsafe { + imp::clear_symbol_cache(); + } +} + +cfg_if::cfg_if! { + if #[cfg(miri)] { + mod miri; + use miri as imp; + } else if #[cfg(all(windows, target_env = "msvc", not(target_vendor = "uwp")))] { + mod dbghelp; + use dbghelp as imp; + } else if #[cfg(all( + any(unix, all(windows, target_env = "gnu")), + not(target_vendor = "uwp"), + not(target_os = "emscripten"), + any(not(backtrace_in_libstd), feature = "backtrace"), + ))] { + mod gimli; + use gimli as imp; + } else { + mod noop; + use noop as imp; + } +} diff --git a/vendor/backtrace/src/symbolize/noop.rs b/vendor/backtrace/src/symbolize/noop.rs new file mode 100644 index 00000000..c5333653 --- /dev/null +++ b/vendor/backtrace/src/symbolize/noop.rs @@ -0,0 +1,41 @@ +//! Empty symbolication strategy used to compile for platforms that have no +//! support. + +use super::{BytesOrWideString, ResolveWhat, SymbolName}; +use core::ffi::c_void; +use core::marker; + +pub unsafe fn resolve(_addr: ResolveWhat<'_>, _cb: &mut dyn FnMut(&super::Symbol)) {} + +pub struct Symbol<'a> { + _marker: marker::PhantomData<&'a i32>, +} + +impl Symbol<'_> { + pub fn name(&self) -> Option<SymbolName<'_>> { + None + } + + pub fn addr(&self) -> Option<*mut c_void> { + None + } + + pub fn filename_raw(&self) -> Option<BytesOrWideString<'_>> { + None + } + + #[cfg(feature = "std")] + pub fn filename(&self) -> Option<&::std::path::Path> { + None + } + + pub fn lineno(&self) -> Option<u32> { + None + } + + pub fn colno(&self) -> Option<u32> { + None + } +} + +pub unsafe fn clear_symbol_cache() {} diff --git a/vendor/backtrace/src/types.rs b/vendor/backtrace/src/types.rs new file mode 100644 index 00000000..c419247a --- /dev/null +++ b/vendor/backtrace/src/types.rs @@ -0,0 +1,83 @@ +//! Platform dependent types. + +cfg_if::cfg_if! { + if #[cfg(feature = "std")] { + use std::borrow::Cow; + use std::fmt; + use std::path::PathBuf; + use std::prelude::v1::*; + use std::str; + } +} + +/// A platform independent representation of a string. When working with `std` +/// enabled it is recommended to the convenience methods for providing +/// conversions to `std` types. +#[derive(Debug)] +pub enum BytesOrWideString<'a> { + /// A slice, typically provided on Unix platforms. + Bytes(&'a [u8]), + /// Wide strings typically from Windows. + Wide(&'a [u16]), +} + +#[cfg(feature = "std")] +impl<'a> BytesOrWideString<'a> { + /// Lossy converts to a `Cow<str>`, will allocate if `Bytes` is not valid + /// UTF-8 or if `BytesOrWideString` is `Wide`. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn to_str_lossy(&self) -> Cow<'a, str> { + use self::BytesOrWideString::*; + + match *self { + Bytes(slice) => String::from_utf8_lossy(slice), + Wide(wide) => Cow::Owned(String::from_utf16_lossy(wide)), + } + } + + /// Provides a `Path` representation of `BytesOrWideString`. + /// + /// # Required features + /// + /// This function requires the `std` feature of the `backtrace` crate to be + /// enabled, and the `std` feature is enabled by default. + pub fn into_path_buf(self) -> PathBuf { + #[cfg(unix)] + { + use std::ffi::OsStr; + use std::os::unix::ffi::OsStrExt; + + if let BytesOrWideString::Bytes(slice) = self { + return PathBuf::from(OsStr::from_bytes(slice)); + } + } + + #[cfg(windows)] + { + use std::ffi::OsString; + use std::os::windows::ffi::OsStringExt; + + if let BytesOrWideString::Wide(slice) = self { + return PathBuf::from(OsString::from_wide(slice)); + } + } + + if let BytesOrWideString::Bytes(b) = self { + if let Ok(s) = str::from_utf8(b) { + return PathBuf::from(s); + } + } + unreachable!() + } +} + +#[cfg(feature = "std")] +impl<'a> fmt::Display for BytesOrWideString<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.to_str_lossy().fmt(f) + } +} diff --git a/vendor/backtrace/src/windows_sys.rs b/vendor/backtrace/src/windows_sys.rs new file mode 100644 index 00000000..443a1696 --- /dev/null +++ b/vendor/backtrace/src/windows_sys.rs @@ -0,0 +1,668 @@ +// Bindings generated by `windows-bindgen` 0.58.0 + +#![allow( + non_snake_case, + non_upper_case_globals, + non_camel_case_types, + dead_code, + clippy::all +)] +windows_targets::link!("dbghelp.dll" "system" fn EnumerateLoadedModulesW64(hprocess : HANDLE, enumloadedmodulescallback : PENUMLOADED_MODULES_CALLBACKW64, usercontext : *const core::ffi::c_void) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn StackWalk64(machinetype : u32, hprocess : HANDLE, hthread : HANDLE, stackframe : *mut STACKFRAME64, contextrecord : *mut core::ffi::c_void, readmemoryroutine : PREAD_PROCESS_MEMORY_ROUTINE64, functiontableaccessroutine : PFUNCTION_TABLE_ACCESS_ROUTINE64, getmodulebaseroutine : PGET_MODULE_BASE_ROUTINE64, translateaddress : PTRANSLATE_ADDRESS_ROUTINE64) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn StackWalkEx(machinetype : u32, hprocess : HANDLE, hthread : HANDLE, stackframe : *mut STACKFRAME_EX, contextrecord : *mut core::ffi::c_void, readmemoryroutine : PREAD_PROCESS_MEMORY_ROUTINE64, functiontableaccessroutine : PFUNCTION_TABLE_ACCESS_ROUTINE64, getmodulebaseroutine : PGET_MODULE_BASE_ROUTINE64, translateaddress : PTRANSLATE_ADDRESS_ROUTINE64, flags : u32) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn SymAddrIncludeInlineTrace(hprocess : HANDLE, address : u64) -> u32); +windows_targets::link!("dbghelp.dll" "system" fn SymFromAddrW(hprocess : HANDLE, address : u64, displacement : *mut u64, symbol : *mut SYMBOL_INFOW) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn SymFromInlineContextW(hprocess : HANDLE, address : u64, inlinecontext : u32, displacement : *mut u64, symbol : *mut SYMBOL_INFOW) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn SymFunctionTableAccess64(hprocess : HANDLE, addrbase : u64) -> *mut core::ffi::c_void); +windows_targets::link!("dbghelp.dll" "system" fn SymGetLineFromAddrW64(hprocess : HANDLE, dwaddr : u64, pdwdisplacement : *mut u32, line : *mut IMAGEHLP_LINEW64) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn SymGetLineFromInlineContextW(hprocess : HANDLE, dwaddr : u64, inlinecontext : u32, qwmodulebaseaddress : u64, pdwdisplacement : *mut u32, line : *mut IMAGEHLP_LINEW64) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn SymGetModuleBase64(hprocess : HANDLE, qwaddr : u64) -> u64); +windows_targets::link!("dbghelp.dll" "system" fn SymGetOptions() -> u32); +windows_targets::link!("dbghelp.dll" "system" fn SymGetSearchPathW(hprocess : HANDLE, searchpatha : PWSTR, searchpathlength : u32) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn SymInitializeW(hprocess : HANDLE, usersearchpath : PCWSTR, finvadeprocess : BOOL) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn SymQueryInlineTrace(hprocess : HANDLE, startaddress : u64, startcontext : u32, startretaddress : u64, curaddress : u64, curcontext : *mut u32, curframeindex : *mut u32) -> BOOL); +windows_targets::link!("dbghelp.dll" "system" fn SymSetOptions(symoptions : u32) -> u32); +windows_targets::link!("dbghelp.dll" "system" fn SymSetSearchPathW(hprocess : HANDLE, searchpatha : PCWSTR) -> BOOL); +windows_targets::link!("kernel32.dll" "system" fn CloseHandle(hobject : HANDLE) -> BOOL); +windows_targets::link!("kernel32.dll" "system" fn CreateFileMappingA(hfile : HANDLE, lpfilemappingattributes : *const SECURITY_ATTRIBUTES, flprotect : PAGE_PROTECTION_FLAGS, dwmaximumsizehigh : u32, dwmaximumsizelow : u32, lpname : PCSTR) -> HANDLE); +windows_targets::link!("kernel32.dll" "system" fn CreateMutexA(lpmutexattributes : *const SECURITY_ATTRIBUTES, binitialowner : BOOL, lpname : PCSTR) -> HANDLE); +windows_targets::link!("kernel32.dll" "system" fn CreateToolhelp32Snapshot(dwflags : CREATE_TOOLHELP_SNAPSHOT_FLAGS, th32processid : u32) -> HANDLE); +windows_targets::link!("kernel32.dll" "system" fn GetCurrentProcess() -> HANDLE); +windows_targets::link!("kernel32.dll" "system" fn GetCurrentProcessId() -> u32); +windows_targets::link!("kernel32.dll" "system" fn GetCurrentThread() -> HANDLE); +windows_targets::link!("kernel32.dll" "system" fn GetProcAddress(hmodule : HMODULE, lpprocname : PCSTR) -> FARPROC); +windows_targets::link!("kernel32.dll" "system" fn LoadLibraryA(lplibfilename : PCSTR) -> HMODULE); +windows_targets::link!("kernel32.dll" "system" fn MapViewOfFile(hfilemappingobject : HANDLE, dwdesiredaccess : FILE_MAP, dwfileoffsethigh : u32, dwfileoffsetlow : u32, dwnumberofbytestomap : usize) -> MEMORY_MAPPED_VIEW_ADDRESS); +windows_targets::link!("kernel32.dll" "system" fn Module32FirstW(hsnapshot : HANDLE, lpme : *mut MODULEENTRY32W) -> BOOL); +windows_targets::link!("kernel32.dll" "system" fn Module32NextW(hsnapshot : HANDLE, lpme : *mut MODULEENTRY32W) -> BOOL); +windows_targets::link!("kernel32.dll" "system" fn ReleaseMutex(hmutex : HANDLE) -> BOOL); +windows_targets::link!("kernel32.dll" "system" fn RtlCaptureContext(contextrecord : *mut CONTEXT)); +#[cfg(target_arch = "aarch64")] +windows_targets::link!("kernel32.dll" "system" fn RtlLookupFunctionEntry(controlpc : usize, imagebase : *mut usize, historytable : *mut UNWIND_HISTORY_TABLE) -> *mut IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY); +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +windows_targets::link!("kernel32.dll" "system" fn RtlLookupFunctionEntry(controlpc : u64, imagebase : *mut u64, historytable : *mut UNWIND_HISTORY_TABLE) -> *mut IMAGE_RUNTIME_FUNCTION_ENTRY); +#[cfg(target_arch = "aarch64")] +windows_targets::link!("kernel32.dll" "system" fn RtlVirtualUnwind(handlertype : RTL_VIRTUAL_UNWIND_HANDLER_TYPE, imagebase : usize, controlpc : usize, functionentry : *const IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY, contextrecord : *mut CONTEXT, handlerdata : *mut *mut core::ffi::c_void, establisherframe : *mut usize, contextpointers : *mut KNONVOLATILE_CONTEXT_POINTERS) -> EXCEPTION_ROUTINE); +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +windows_targets::link!("kernel32.dll" "system" fn RtlVirtualUnwind(handlertype : RTL_VIRTUAL_UNWIND_HANDLER_TYPE, imagebase : u64, controlpc : u64, functionentry : *const IMAGE_RUNTIME_FUNCTION_ENTRY, contextrecord : *mut CONTEXT, handlerdata : *mut *mut core::ffi::c_void, establisherframe : *mut u64, contextpointers : *mut KNONVOLATILE_CONTEXT_POINTERS) -> EXCEPTION_ROUTINE); +windows_targets::link!("kernel32.dll" "system" fn UnmapViewOfFile(lpbaseaddress : MEMORY_MAPPED_VIEW_ADDRESS) -> BOOL); +windows_targets::link!("kernel32.dll" "system" fn WaitForSingleObjectEx(hhandle : HANDLE, dwmilliseconds : u32, balertable : BOOL) -> WAIT_EVENT); +windows_targets::link!("kernel32.dll" "system" fn WideCharToMultiByte(codepage : u32, dwflags : u32, lpwidecharstr : PCWSTR, cchwidechar : i32, lpmultibytestr : PSTR, cbmultibyte : i32, lpdefaultchar : PCSTR, lpuseddefaultchar : *mut BOOL) -> i32); +windows_targets::link!("kernel32.dll" "system" fn lstrlenW(lpstring : PCWSTR) -> i32); +#[repr(C)] +#[derive(Clone, Copy)] +pub struct ADDRESS64 { + pub Offset: u64, + pub Segment: u16, + pub Mode: ADDRESS_MODE, +} +pub type ADDRESS_MODE = i32; +#[repr(C)] +#[derive(Clone, Copy)] +pub union ARM64_NT_NEON128 { + pub Anonymous: ARM64_NT_NEON128_0, + pub D: [f64; 2], + pub S: [f32; 4], + pub H: [u16; 8], + pub B: [u8; 16], +} +#[repr(C)] +#[derive(Clone, Copy)] +pub struct ARM64_NT_NEON128_0 { + pub Low: u64, + pub High: i64, +} +pub const AddrModeFlat: ADDRESS_MODE = 3i32; +pub type BOOL = i32; +#[repr(C)] +#[cfg(target_arch = "aarch64")] +#[derive(Clone, Copy)] +pub struct CONTEXT { + pub ContextFlags: CONTEXT_FLAGS, + pub Cpsr: u32, + pub Anonymous: CONTEXT_0, + pub Sp: u64, + pub Pc: u64, + pub V: [ARM64_NT_NEON128; 32], + pub Fpcr: u32, + pub Fpsr: u32, + pub Bcr: [u32; 8], + pub Bvr: [u64; 8], + pub Wcr: [u32; 2], + pub Wvr: [u64; 2], +} +#[repr(C)] +#[cfg(target_arch = "aarch64")] +#[derive(Clone, Copy)] +pub union CONTEXT_0 { + pub Anonymous: CONTEXT_0_0, + pub X: [u64; 31], +} +#[repr(C)] +#[cfg(target_arch = "aarch64")] +#[derive(Clone, Copy)] +pub struct CONTEXT_0_0 { + pub X0: u64, + pub X1: u64, + pub X2: u64, + pub X3: u64, + pub X4: u64, + pub X5: u64, + pub X6: u64, + pub X7: u64, + pub X8: u64, + pub X9: u64, + pub X10: u64, + pub X11: u64, + pub X12: u64, + pub X13: u64, + pub X14: u64, + pub X15: u64, + pub X16: u64, + pub X17: u64, + pub X18: u64, + pub X19: u64, + pub X20: u64, + pub X21: u64, + pub X22: u64, + pub X23: u64, + pub X24: u64, + pub X25: u64, + pub X26: u64, + pub X27: u64, + pub X28: u64, + pub Fp: u64, + pub Lr: u64, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub struct CONTEXT { + pub P1Home: u64, + pub P2Home: u64, + pub P3Home: u64, + pub P4Home: u64, + pub P5Home: u64, + pub P6Home: u64, + pub ContextFlags: CONTEXT_FLAGS, + pub MxCsr: u32, + pub SegCs: u16, + pub SegDs: u16, + pub SegEs: u16, + pub SegFs: u16, + pub SegGs: u16, + pub SegSs: u16, + pub EFlags: u32, + pub Dr0: u64, + pub Dr1: u64, + pub Dr2: u64, + pub Dr3: u64, + pub Dr6: u64, + pub Dr7: u64, + pub Rax: u64, + pub Rcx: u64, + pub Rdx: u64, + pub Rbx: u64, + pub Rsp: u64, + pub Rbp: u64, + pub Rsi: u64, + pub Rdi: u64, + pub R8: u64, + pub R9: u64, + pub R10: u64, + pub R11: u64, + pub R12: u64, + pub R13: u64, + pub R14: u64, + pub R15: u64, + pub Rip: u64, + pub Anonymous: CONTEXT_0, + pub VectorRegister: [M128A; 26], + pub VectorControl: u64, + pub DebugControl: u64, + pub LastBranchToRip: u64, + pub LastBranchFromRip: u64, + pub LastExceptionToRip: u64, + pub LastExceptionFromRip: u64, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub union CONTEXT_0 { + pub FltSave: XSAVE_FORMAT, + pub Anonymous: CONTEXT_0_0, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub struct CONTEXT_0_0 { + pub Header: [M128A; 2], + pub Legacy: [M128A; 8], + pub Xmm0: M128A, + pub Xmm1: M128A, + pub Xmm2: M128A, + pub Xmm3: M128A, + pub Xmm4: M128A, + pub Xmm5: M128A, + pub Xmm6: M128A, + pub Xmm7: M128A, + pub Xmm8: M128A, + pub Xmm9: M128A, + pub Xmm10: M128A, + pub Xmm11: M128A, + pub Xmm12: M128A, + pub Xmm13: M128A, + pub Xmm14: M128A, + pub Xmm15: M128A, +} +#[repr(C)] +#[cfg(target_arch = "x86")] +#[derive(Clone, Copy)] +pub struct CONTEXT { + pub ContextFlags: CONTEXT_FLAGS, + pub Dr0: u32, + pub Dr1: u32, + pub Dr2: u32, + pub Dr3: u32, + pub Dr6: u32, + pub Dr7: u32, + pub FloatSave: FLOATING_SAVE_AREA, + pub SegGs: u32, + pub SegFs: u32, + pub SegEs: u32, + pub SegDs: u32, + pub Edi: u32, + pub Esi: u32, + pub Ebx: u32, + pub Edx: u32, + pub Ecx: u32, + pub Eax: u32, + pub Ebp: u32, + pub Eip: u32, + pub SegCs: u32, + pub EFlags: u32, + pub Esp: u32, + pub SegSs: u32, + pub ExtendedRegisters: [u8; 512], +} + +pub type CONTEXT_FLAGS = u32; +pub const CP_UTF8: u32 = 65001u32; +pub type CREATE_TOOLHELP_SNAPSHOT_FLAGS = u32; +pub type EXCEPTION_DISPOSITION = i32; +#[repr(C)] +#[derive(Clone, Copy)] +pub struct EXCEPTION_RECORD { + pub ExceptionCode: NTSTATUS, + pub ExceptionFlags: u32, + pub ExceptionRecord: *mut EXCEPTION_RECORD, + pub ExceptionAddress: *mut core::ffi::c_void, + pub NumberParameters: u32, + pub ExceptionInformation: [usize; 15], +} +pub type EXCEPTION_ROUTINE = Option< + unsafe extern "system" fn( + exceptionrecord: *mut EXCEPTION_RECORD, + establisherframe: *const core::ffi::c_void, + contextrecord: *mut CONTEXT, + dispatchercontext: *const core::ffi::c_void, + ) -> EXCEPTION_DISPOSITION, +>; +pub const FALSE: BOOL = 0i32; +pub type FARPROC = Option<unsafe extern "system" fn() -> isize>; +pub type FILE_MAP = u32; +pub const FILE_MAP_READ: FILE_MAP = 4u32; +#[repr(C)] +#[cfg(any( + target_arch = "aarch64", + target_arch = "arm64ec", + target_arch = "x86_64" +))] +#[derive(Clone, Copy)] +pub struct FLOATING_SAVE_AREA { + pub ControlWord: u32, + pub StatusWord: u32, + pub TagWord: u32, + pub ErrorOffset: u32, + pub ErrorSelector: u32, + pub DataOffset: u32, + pub DataSelector: u32, + pub RegisterArea: [u8; 80], + pub Cr0NpxState: u32, +} +#[repr(C)] +#[cfg(target_arch = "x86")] +#[derive(Clone, Copy)] +pub struct FLOATING_SAVE_AREA { + pub ControlWord: u32, + pub StatusWord: u32, + pub TagWord: u32, + pub ErrorOffset: u32, + pub ErrorSelector: u32, + pub DataOffset: u32, + pub DataSelector: u32, + pub RegisterArea: [u8; 80], + pub Spare0: u32, +} +pub type HANDLE = *mut core::ffi::c_void; +pub type HINSTANCE = *mut core::ffi::c_void; +pub type HMODULE = *mut core::ffi::c_void; +#[repr(C)] +#[derive(Clone, Copy)] +pub struct IMAGEHLP_LINEW64 { + pub SizeOfStruct: u32, + pub Key: *mut core::ffi::c_void, + pub LineNumber: u32, + pub FileName: PWSTR, + pub Address: u64, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub struct IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY { + pub BeginAddress: u32, + pub Anonymous: IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_0, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub union IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_0 { + pub UnwindData: u32, + pub Anonymous: IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_0_0, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub struct IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY_0_0 { + pub _bitfield: u32, +} +pub type IMAGE_FILE_MACHINE = u16; +pub const IMAGE_FILE_MACHINE_I386: IMAGE_FILE_MACHINE = 332u16; +#[repr(C)] +#[derive(Clone, Copy)] +pub struct IMAGE_RUNTIME_FUNCTION_ENTRY { + pub BeginAddress: u32, + pub EndAddress: u32, + pub Anonymous: IMAGE_RUNTIME_FUNCTION_ENTRY_0, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub union IMAGE_RUNTIME_FUNCTION_ENTRY_0 { + pub UnwindInfoAddress: u32, + pub UnwindData: u32, +} +pub const INFINITE: u32 = 4294967295u32; +pub const INVALID_HANDLE_VALUE: HANDLE = -1i32 as _; +#[repr(C)] +#[derive(Clone, Copy)] +pub struct KDHELP64 { + pub Thread: u64, + pub ThCallbackStack: u32, + pub ThCallbackBStore: u32, + pub NextCallback: u32, + pub FramePointer: u32, + pub KiCallUserMode: u64, + pub KeUserCallbackDispatcher: u64, + pub SystemRangeStart: u64, + pub KiUserExceptionDispatcher: u64, + pub StackBase: u64, + pub StackLimit: u64, + pub BuildVersion: u32, + pub RetpolineStubFunctionTableSize: u32, + pub RetpolineStubFunctionTable: u64, + pub RetpolineStubOffset: u32, + pub RetpolineStubSize: u32, + pub Reserved0: [u64; 2], +} +#[repr(C)] +#[cfg(target_arch = "aarch64")] +#[derive(Clone, Copy)] +pub struct KNONVOLATILE_CONTEXT_POINTERS { + pub X19: *mut u64, + pub X20: *mut u64, + pub X21: *mut u64, + pub X22: *mut u64, + pub X23: *mut u64, + pub X24: *mut u64, + pub X25: *mut u64, + pub X26: *mut u64, + pub X27: *mut u64, + pub X28: *mut u64, + pub Fp: *mut u64, + pub Lr: *mut u64, + pub D8: *mut u64, + pub D9: *mut u64, + pub D10: *mut u64, + pub D11: *mut u64, + pub D12: *mut u64, + pub D13: *mut u64, + pub D14: *mut u64, + pub D15: *mut u64, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub struct KNONVOLATILE_CONTEXT_POINTERS { + pub Anonymous1: KNONVOLATILE_CONTEXT_POINTERS_0, + pub Anonymous2: KNONVOLATILE_CONTEXT_POINTERS_1, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub union KNONVOLATILE_CONTEXT_POINTERS_0 { + pub FloatingContext: [*mut M128A; 16], + pub Anonymous: KNONVOLATILE_CONTEXT_POINTERS_0_0, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub struct KNONVOLATILE_CONTEXT_POINTERS_0_0 { + pub Xmm0: *mut M128A, + pub Xmm1: *mut M128A, + pub Xmm2: *mut M128A, + pub Xmm3: *mut M128A, + pub Xmm4: *mut M128A, + pub Xmm5: *mut M128A, + pub Xmm6: *mut M128A, + pub Xmm7: *mut M128A, + pub Xmm8: *mut M128A, + pub Xmm9: *mut M128A, + pub Xmm10: *mut M128A, + pub Xmm11: *mut M128A, + pub Xmm12: *mut M128A, + pub Xmm13: *mut M128A, + pub Xmm14: *mut M128A, + pub Xmm15: *mut M128A, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub union KNONVOLATILE_CONTEXT_POINTERS_1 { + pub IntegerContext: [*mut u64; 16], + pub Anonymous: KNONVOLATILE_CONTEXT_POINTERS_1_0, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub struct KNONVOLATILE_CONTEXT_POINTERS_1_0 { + pub Rax: *mut u64, + pub Rcx: *mut u64, + pub Rdx: *mut u64, + pub Rbx: *mut u64, + pub Rsp: *mut u64, + pub Rbp: *mut u64, + pub Rsi: *mut u64, + pub Rdi: *mut u64, + pub R8: *mut u64, + pub R9: *mut u64, + pub R10: *mut u64, + pub R11: *mut u64, + pub R12: *mut u64, + pub R13: *mut u64, + pub R14: *mut u64, + pub R15: *mut u64, +} +#[repr(C)] +#[cfg(target_arch = "x86")] +#[derive(Clone, Copy)] +pub struct KNONVOLATILE_CONTEXT_POINTERS { + pub Dummy: u32, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub struct M128A { + pub Low: u64, + pub High: i64, +} +pub const MAX_SYM_NAME: u32 = 2000u32; +#[repr(C)] +#[derive(Clone, Copy)] +pub struct MEMORY_MAPPED_VIEW_ADDRESS { + pub Value: *mut core::ffi::c_void, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub struct MODULEENTRY32W { + pub dwSize: u32, + pub th32ModuleID: u32, + pub th32ProcessID: u32, + pub GlblcntUsage: u32, + pub ProccntUsage: u32, + pub modBaseAddr: *mut u8, + pub modBaseSize: u32, + pub hModule: HMODULE, + pub szModule: [u16; 256], + pub szExePath: [u16; 260], +} +pub type NTSTATUS = i32; +pub type PAGE_PROTECTION_FLAGS = u32; +pub const PAGE_READONLY: PAGE_PROTECTION_FLAGS = 2u32; +pub type PCSTR = *const u8; +pub type PCWSTR = *const u16; +pub type PENUMLOADED_MODULES_CALLBACKW64 = Option< + unsafe extern "system" fn( + modulename: PCWSTR, + modulebase: u64, + modulesize: u32, + usercontext: *const core::ffi::c_void, + ) -> BOOL, +>; +pub type PFUNCTION_TABLE_ACCESS_ROUTINE64 = + Option<unsafe extern "system" fn(ahprocess: HANDLE, addrbase: u64) -> *mut core::ffi::c_void>; +pub type PGET_MODULE_BASE_ROUTINE64 = + Option<unsafe extern "system" fn(hprocess: HANDLE, address: u64) -> u64>; +pub type PREAD_PROCESS_MEMORY_ROUTINE64 = Option< + unsafe extern "system" fn( + hprocess: HANDLE, + qwbaseaddress: u64, + lpbuffer: *mut core::ffi::c_void, + nsize: u32, + lpnumberofbytesread: *mut u32, + ) -> BOOL, +>; +pub type PSTR = *mut u8; +pub type PTRANSLATE_ADDRESS_ROUTINE64 = Option< + unsafe extern "system" fn(hprocess: HANDLE, hthread: HANDLE, lpaddr: *const ADDRESS64) -> u64, +>; +pub type PWSTR = *mut u16; +pub type RTL_VIRTUAL_UNWIND_HANDLER_TYPE = u32; +#[repr(C)] +#[derive(Clone, Copy)] +pub struct SECURITY_ATTRIBUTES { + pub nLength: u32, + pub lpSecurityDescriptor: *mut core::ffi::c_void, + pub bInheritHandle: BOOL, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub struct STACKFRAME64 { + pub AddrPC: ADDRESS64, + pub AddrReturn: ADDRESS64, + pub AddrFrame: ADDRESS64, + pub AddrStack: ADDRESS64, + pub AddrBStore: ADDRESS64, + pub FuncTableEntry: *mut core::ffi::c_void, + pub Params: [u64; 4], + pub Far: BOOL, + pub Virtual: BOOL, + pub Reserved: [u64; 3], + pub KdHelp: KDHELP64, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub struct STACKFRAME_EX { + pub AddrPC: ADDRESS64, + pub AddrReturn: ADDRESS64, + pub AddrFrame: ADDRESS64, + pub AddrStack: ADDRESS64, + pub AddrBStore: ADDRESS64, + pub FuncTableEntry: *mut core::ffi::c_void, + pub Params: [u64; 4], + pub Far: BOOL, + pub Virtual: BOOL, + pub Reserved: [u64; 3], + pub KdHelp: KDHELP64, + pub StackFrameSize: u32, + pub InlineFrameContext: u32, +} +#[repr(C)] +#[derive(Clone, Copy)] +pub struct SYMBOL_INFOW { + pub SizeOfStruct: u32, + pub TypeIndex: u32, + pub Reserved: [u64; 2], + pub Index: u32, + pub Size: u32, + pub ModBase: u64, + pub Flags: SYMBOL_INFO_FLAGS, + pub Value: u64, + pub Address: u64, + pub Register: u32, + pub Scope: u32, + pub Tag: u32, + pub NameLen: u32, + pub MaxNameLen: u32, + pub Name: [u16; 1], +} +pub type SYMBOL_INFO_FLAGS = u32; +pub const SYMOPT_DEFERRED_LOADS: u32 = 4u32; +pub const TH32CS_SNAPMODULE: CREATE_TOOLHELP_SNAPSHOT_FLAGS = 8u32; +pub const TRUE: BOOL = 1i32; +#[repr(C)] +#[cfg(any( + target_arch = "aarch64", + target_arch = "arm64ec", + target_arch = "x86_64" +))] +#[derive(Clone, Copy)] +pub struct UNWIND_HISTORY_TABLE { + pub Count: u32, + pub LocalHint: u8, + pub GlobalHint: u8, + pub Search: u8, + pub Once: u8, + pub LowAddress: usize, + pub HighAddress: usize, + pub Entry: [UNWIND_HISTORY_TABLE_ENTRY; 12], +} +#[repr(C)] +#[cfg(target_arch = "aarch64")] +#[derive(Clone, Copy)] +pub struct UNWIND_HISTORY_TABLE_ENTRY { + pub ImageBase: usize, + pub FunctionEntry: *mut IMAGE_ARM64_RUNTIME_FUNCTION_ENTRY, +} +#[repr(C)] +#[cfg(any(target_arch = "arm64ec", target_arch = "x86_64"))] +#[derive(Clone, Copy)] +pub struct UNWIND_HISTORY_TABLE_ENTRY { + pub ImageBase: usize, + pub FunctionEntry: *mut IMAGE_RUNTIME_FUNCTION_ENTRY, +} +pub type WAIT_EVENT = u32; +#[repr(C)] +#[cfg(any( + target_arch = "aarch64", + target_arch = "arm64ec", + target_arch = "x86_64" +))] +#[derive(Clone, Copy)] +pub struct XSAVE_FORMAT { + pub ControlWord: u16, + pub StatusWord: u16, + pub TagWord: u8, + pub Reserved1: u8, + pub ErrorOpcode: u16, + pub ErrorOffset: u32, + pub ErrorSelector: u16, + pub Reserved2: u16, + pub DataOffset: u32, + pub DataSelector: u16, + pub Reserved3: u16, + pub MxCsr: u32, + pub MxCsr_Mask: u32, + pub FloatRegisters: [M128A; 8], + pub XmmRegisters: [M128A; 16], + pub Reserved4: [u8; 96], +} +#[repr(C)] +#[cfg(target_arch = "x86")] +#[derive(Clone, Copy)] +pub struct XSAVE_FORMAT { + pub ControlWord: u16, + pub StatusWord: u16, + pub TagWord: u8, + pub Reserved1: u8, + pub ErrorOpcode: u16, + pub ErrorOffset: u32, + pub ErrorSelector: u16, + pub Reserved2: u16, + pub DataOffset: u32, + pub DataSelector: u16, + pub Reserved3: u16, + pub MxCsr: u32, + pub MxCsr_Mask: u32, + pub FloatRegisters: [M128A; 8], + pub XmmRegisters: [M128A; 8], + pub Reserved4: [u8; 224], +} + +#[cfg(target_arch = "arm")] +include!("./windows_sys_arm32_shim.rs"); diff --git a/vendor/backtrace/src/windows_sys_arm32_shim.rs b/vendor/backtrace/src/windows_sys_arm32_shim.rs new file mode 100644 index 00000000..4df9064c --- /dev/null +++ b/vendor/backtrace/src/windows_sys_arm32_shim.rs @@ -0,0 +1,53 @@ +pub const ARM_MAX_BREAKPOINTS: usize = 8; +pub const ARM_MAX_WATCHPOINTS: usize = 1; + +#[repr(C)] +#[derive(Clone, Copy)] +pub struct NEON128 { + pub Low: u64, + pub High: i64, +} + +#[repr(C)] +#[derive(Clone, Copy)] +pub union CONTEXT_FloatRegs { + pub Q: [NEON128; 16], + pub D: [u64; 32], + pub S: [u32; 32], +} + +#[repr(C)] +#[derive(Clone, Copy)] +pub struct CONTEXT { + pub ContextFlags: u32, + pub R0: u32, + pub R1: u32, + pub R2: u32, + pub R3: u32, + pub R4: u32, + pub R5: u32, + pub R6: u32, + pub R7: u32, + pub R8: u32, + pub R9: u32, + pub R10: u32, + pub R11: u32, + pub R12: u32, + // Control registers + pub Sp: u32, + pub Lr: u32, + pub Pc: u32, + pub Cpsr: u32, + // Floating-point registers + pub Fpsrc: u32, + pub Padding: u32, + pub u: CONTEXT_FloatRegs, + // Debug registers + pub Bvr: [u32; ARM_MAX_BREAKPOINTS], + pub Bcr: [u32; ARM_MAX_BREAKPOINTS], + pub Wvr: [u32; ARM_MAX_WATCHPOINTS], + pub Wcr: [u32; ARM_MAX_WATCHPOINTS], + pub Padding2: [u32; 2], +} + +pub const IMAGE_FILE_MACHINE_ARMNT: IMAGE_FILE_MACHINE = 0x01c4; diff --git a/vendor/backtrace/tests/accuracy/auxiliary.rs b/vendor/backtrace/tests/accuracy/auxiliary.rs new file mode 100644 index 00000000..9c8015d9 --- /dev/null +++ b/vendor/backtrace/tests/accuracy/auxiliary.rs @@ -0,0 +1,15 @@ +#[inline(never)] +pub fn callback<F>(f: F) +where + F: FnOnce((&'static str, u32)), +{ + f((file!(), line!())) +} + +#[inline(always)] +pub fn callback_inlined<F>(f: F) +where + F: FnOnce((&'static str, u32)), +{ + f((file!(), line!())) +} diff --git a/vendor/backtrace/tests/accuracy/main.rs b/vendor/backtrace/tests/accuracy/main.rs new file mode 100644 index 00000000..b50e4451 --- /dev/null +++ b/vendor/backtrace/tests/accuracy/main.rs @@ -0,0 +1,121 @@ +#![cfg(dbginfo = "collapsible")] +mod auxiliary; + +macro_rules! pos { + () => { + (file!(), line!()) + }; +} + +#[collapse_debuginfo(yes)] +macro_rules! check { + ($($pos:expr),*) => ({ + verify(&[$($pos,)* pos!()]); + }) +} + +type Pos = (&'static str, u32); + +#[test] +fn doit() { + if + // Skip musl which is by default statically linked and doesn't support + // dynamic libraries. + !cfg!(target_env = "musl") + // Skip Miri, since it doesn't support dynamic libraries. + && !cfg!(miri) + { + // TODO(#238) this shouldn't have to happen first in this function, but + // currently it does. + let mut dir = std::env::current_exe().unwrap(); + dir.pop(); + if cfg!(windows) { + dir.push("dylib_dep.dll"); + } else if cfg!(target_vendor = "apple") { + dir.push("libdylib_dep.dylib"); + } else if cfg!(target_os = "aix") { + dir.push("libdylib_dep.a"); + } else { + dir.push("libdylib_dep.so"); + } + unsafe { + let lib = libloading::Library::new(&dir).unwrap(); + let api = lib.get::<extern "C" fn(Pos, fn(Pos, Pos))>(b"foo").unwrap(); + api(pos!(), |a, b| { + check!(a, b); + }); + } + } + + outer(pos!()); +} + +#[inline(never)] +fn outer(main_pos: Pos) { + inner(main_pos, pos!()); + inner_inlined(main_pos, pos!()); +} + +#[inline(never)] +#[rustfmt::skip] +fn inner(main_pos: Pos, outer_pos: Pos) { + check!(main_pos, outer_pos); + check!(main_pos, outer_pos); + let inner_pos = pos!(); auxiliary::callback(|aux_pos| { + check!(main_pos, outer_pos, inner_pos, aux_pos); + }); + let inner_pos = pos!(); auxiliary::callback_inlined(|aux_pos| { + check!(main_pos, outer_pos, inner_pos, aux_pos); + }); +} + +#[inline(always)] +#[rustfmt::skip] +fn inner_inlined(main_pos: Pos, outer_pos: Pos) { + check!(main_pos, outer_pos); + check!(main_pos, outer_pos); + + #[inline(always)] + fn inner_further_inlined(main_pos: Pos, outer_pos: Pos, inner_pos: Pos) { + check!(main_pos, outer_pos, inner_pos); + } + inner_further_inlined(main_pos, outer_pos, pos!()); + + let inner_pos = pos!(); auxiliary::callback(|aux_pos| { + check!(main_pos, outer_pos, inner_pos, aux_pos); + }); + let inner_pos = pos!(); auxiliary::callback_inlined(|aux_pos| { + check!(main_pos, outer_pos, inner_pos, aux_pos); + }); + + // this tests a distinction between two independent calls to the inlined function. + // (un)fortunately, LLVM somehow merges two consecutive such calls into one node. + inner_further_inlined(main_pos, outer_pos, pos!()); +} + +fn verify(filelines: &[Pos]) { + let trace = backtrace::Backtrace::new(); + println!("-----------------------------------"); + println!("looking for:"); + for (file, line) in filelines.iter().rev() { + println!("\t{file}:{line}"); + } + println!("found:\n{trace:?}"); + let mut symbols = trace.frames().iter().flat_map(|frame| frame.symbols()); + let mut iter = filelines.iter().rev(); + while let Some((file, line)) = iter.next() { + loop { + let sym = match symbols.next() { + Some(sym) => sym, + None => panic!("failed to find {file}:{line}"), + }; + if let Some(filename) = sym.filename() { + if let Some(lineno) = sym.lineno() { + if filename.ends_with(file) && lineno == *line { + break; + } + } + } + } + } +} diff --git a/vendor/backtrace/tests/common/mod.rs b/vendor/backtrace/tests/common/mod.rs new file mode 100644 index 00000000..3c07934f --- /dev/null +++ b/vendor/backtrace/tests/common/mod.rs @@ -0,0 +1,14 @@ +/// Some tests only make sense in contexts where they can re-exec the test +/// itself. Not all contexts support this, so you can call this method to find +/// out which case you are in. +pub fn cannot_reexec_the_test() -> bool { + // These run in docker containers on CI where they can't re-exec the test, + // so just skip these for CI. No other reason this can't run on those + // platforms though. + // Miri does not have support for re-execing a file + cfg!(unix) + && (cfg!(target_arch = "arm") + || cfg!(target_arch = "aarch64") + || cfg!(target_arch = "s390x")) + || cfg!(miri) +} diff --git a/vendor/backtrace/tests/concurrent-panics.rs b/vendor/backtrace/tests/concurrent-panics.rs new file mode 100644 index 00000000..350c247d --- /dev/null +++ b/vendor/backtrace/tests/concurrent-panics.rs @@ -0,0 +1,72 @@ +use std::env; +use std::panic; +use std::process::Command; +use std::sync::atomic::{AtomicBool, Ordering::SeqCst}; +use std::sync::Arc; +use std::thread; + +const PANICS: usize = 100; +const THREADS: usize = 8; +const VAR: &str = "__THE_TEST_YOU_ARE_LUKE"; + +mod common; + +fn main() { + // If we cannot re-exec this test, there's no point in trying to do it. + if common::cannot_reexec_the_test() { + println!("test result: ok"); + return; + } + + if env::var(VAR).is_err() { + parent(); + } else { + child(); + } +} + +fn parent() { + let me = env::current_exe().unwrap(); + let result = Command::new(&me) + .env("RUST_BACKTRACE", "1") + .env(VAR, "1") + .output() + .unwrap(); + if result.status.success() { + println!("test result: ok"); + return; + } + println!("stdout:\n{}", String::from_utf8_lossy(&result.stdout)); + println!("stderr:\n{}", String::from_utf8_lossy(&result.stderr)); + println!("code: {}", result.status); + panic!(); +} + +fn child() { + let done = Arc::new(AtomicBool::new(false)); + let done2 = done.clone(); + let a = thread::spawn(move || loop { + if done2.load(SeqCst) { + break format!("{:?}", backtrace::Backtrace::new()); + } + }); + + let threads = (0..THREADS) + .map(|_| { + thread::spawn(|| { + for _ in 0..PANICS { + assert!(panic::catch_unwind(|| { + panic!(); + }) + .is_err()); + } + }) + }) + .collect::<Vec<_>>(); + for thread in threads { + thread.join().unwrap(); + } + + done.store(true, SeqCst); + a.join().unwrap(); +} diff --git a/vendor/backtrace/tests/current-exe-mismatch.rs b/vendor/backtrace/tests/current-exe-mismatch.rs new file mode 100644 index 00000000..d99c574a --- /dev/null +++ b/vendor/backtrace/tests/current-exe-mismatch.rs @@ -0,0 +1,139 @@ +// rust-lang/rust#101913: when you run your program explicitly via `ld.so`, +// `std::env::current_exe` will return the path of *that* program, and not +// the Rust program itself. + +// This behavior is only known to be supported on Linux and FreeBSD, see +// https://mail-index.netbsd.org/tech-toolchain/2024/07/27/msg004469.html + +use std::io::{BufRead, BufReader}; +use std::path::{Path, PathBuf}; +use std::process::Command; + +mod common; + +fn main() { + if cfg!(target_os = "netbsd") { + // NetBSD doesn't support this silliness, so because this is an fn main test, + // just pass it on there. If we used ui-test or something we'd use + //@ ignore-netbsd + return; + } + + if std::env::var(VAR).is_err() { + // the parent waits for the child; then we then handle either printing + // "test result: ok", "test result: ignored", or panicking. + match parent() { + Ok(()) => { + println!("test result: ok"); + } + Err(EarlyExit::IgnoreTest) => { + println!("test result: ignored"); + } + Err(EarlyExit::IoError(e)) => { + println!("{} parent encountered IoError: {:?}", file!(), e); + panic!(); + } + } + } else { + // println!("{} running child", file!()); + child().unwrap(); + } +} + +const VAR: &str = "__THE_TEST_YOU_ARE_LUKE"; + +#[derive(Debug)] +enum EarlyExit { + IgnoreTest, + IoError(std::io::Error), +} + +impl From<std::io::Error> for EarlyExit { + fn from(e: std::io::Error) -> Self { + EarlyExit::IoError(e) + } +} + +fn parent() -> Result<(), EarlyExit> { + // If we cannot re-exec this test, there's no point in trying to do it. + if common::cannot_reexec_the_test() { + return Err(EarlyExit::IgnoreTest); + } + + let me = std::env::current_exe().unwrap(); + let ld_so = find_interpreter(&me)?; + + // use interp to invoke current exe, yielding child test. + // + // (if you're curious what you might compare this against, you can try + // swapping in the below definition for `result`, which is the easy case of + // not using the ld.so interpreter directly that Rust handled fine even + // prior to resolution of rust-lang/rust#101913.) + // + // let result = Command::new(me).env(VAR, "1").output()?; + let result = Command::new(ld_so).env(VAR, "1").arg(&me).output().unwrap(); + + if result.status.success() { + return Ok(()); + } + println!("stdout:\n{}", String::from_utf8_lossy(&result.stdout)); + println!("stderr:\n{}", String::from_utf8_lossy(&result.stderr)); + println!("code: {}", result.status); + panic!(); +} + +fn child() -> Result<(), EarlyExit> { + let bt = backtrace::Backtrace::new(); + println!("{bt:?}"); + + let mut found_my_name = false; + + let my_filename = file!(); + 'frames: for frame in bt.frames() { + let symbols = frame.symbols(); + if symbols.is_empty() { + continue; + } + + for sym in symbols { + if let Some(filename) = sym.filename() { + if filename.ends_with(my_filename) { + // huzzah! + found_my_name = true; + break 'frames; + } + } + } + } + + assert!(found_my_name); + + Ok(()) +} + +// we use the `readelf` command to extract the path to the interpreter requested +// by our binary. +// +// if we cannot `readelf` for some reason, or if we fail to parse its output, +// then we will just give up on this test (and not treat it as a test failure). +fn find_interpreter(me: &Path) -> Result<PathBuf, EarlyExit> { + let result = Command::new("readelf") + .arg("-l") + .arg(me) + .output() + .map_err(|_| EarlyExit::IgnoreTest)?; + if result.status.success() { + let r = BufReader::new(&result.stdout[..]); + for line in r.lines() { + let line = line?; + let line = line.trim(); + let prefix = "[Requesting program interpreter: "; + if let Some((_, suffix)) = line.split_once(prefix) { + if let Some((found_path, _)) = suffix.rsplit_once("]") { + return Ok(found_path.into()); + } + } + } + } + Err(EarlyExit::IgnoreTest) +} diff --git a/vendor/backtrace/tests/long_fn_name.rs b/vendor/backtrace/tests/long_fn_name.rs new file mode 100644 index 00000000..4a03825b --- /dev/null +++ b/vendor/backtrace/tests/long_fn_name.rs @@ -0,0 +1,48 @@ +use backtrace::Backtrace; + +// 50-character module name +mod _234567890_234567890_234567890_234567890_234567890 { + // 50-character struct name + #[allow(non_camel_case_types)] + pub struct _234567890_234567890_234567890_234567890_234567890<T>(T); + impl<T> _234567890_234567890_234567890_234567890_234567890<T> { + #[allow(dead_code)] + pub fn new() -> crate::Backtrace { + crate::Backtrace::new() + } + } +} + +// Long function names must be truncated to (MAX_SYM_NAME - 1) characters. +// Only run this test for msvc, since gnu prints "<no info>" for all frames. +#[test] +#[cfg(all(windows, target_env = "msvc"))] +fn test_long_fn_name() { + use _234567890_234567890_234567890_234567890_234567890::_234567890_234567890_234567890_234567890_234567890 as S; + + // 10 repetitions of struct name, so fully qualified function name is + // atleast 10 * (50 + 50) * 2 = 2000 characters long. + // It's actually longer since it also includes `::`, `<>` and the + // name of the current module + let bt = S::<S<S<S<S<S<S<S<S<S<i32>>>>>>>>>>::new(); + println!("{bt:?}"); + + let mut found_long_name_frame = false; + + for frame in bt.frames() { + let symbols = frame.symbols(); + if symbols.is_empty() { + continue; + } + + if let Some(function_name) = symbols[0].name() { + let function_name = function_name.as_str().unwrap(); + if function_name.contains("::_234567890_234567890_234567890_234567890_234567890") { + found_long_name_frame = true; + assert!(function_name.len() > 200); + } + } + } + + assert!(found_long_name_frame); +} diff --git a/vendor/backtrace/tests/sgx-image-base.rs b/vendor/backtrace/tests/sgx-image-base.rs new file mode 100644 index 00000000..c29a8b67 --- /dev/null +++ b/vendor/backtrace/tests/sgx-image-base.rs @@ -0,0 +1,56 @@ +#![cfg(all(target_env = "sgx", target_vendor = "fortanix"))] +#![feature(sgx_platform)] + +#[cfg(feature = "std")] +#[test] +fn sgx_image_base_with_std() { + use backtrace::trace; + + let image_base = std::os::fortanix_sgx::mem::image_base(); + + let mut frame_ips = Vec::new(); + trace(|frame| { + frame_ips.push(frame.ip()); + true + }); + + assert!(frame_ips.len() > 0); + for ip in frame_ips { + let ip: u64 = ip as _; + assert!(ip < image_base); + } +} + +#[cfg(not(feature = "std"))] +#[test] +fn sgx_image_base_no_std() { + use backtrace::trace_unsynchronized; + + fn guess_image_base() -> u64 { + let mut top_frame_ip = None; + unsafe { + trace_unsynchronized(|frame| { + top_frame_ip = Some(frame.ip()); + false + }); + } + top_frame_ip.unwrap() as u64 & 0xFFFFFF000000 + } + + let image_base = guess_image_base(); + backtrace::set_image_base(image_base as _); + + let mut frame_ips = Vec::new(); + unsafe { + trace_unsynchronized(|frame| { + frame_ips.push(frame.ip()); + true + }); + } + + assert!(frame_ips.len() > 0); + for ip in frame_ips { + let ip: u64 = ip as _; + assert!(ip < image_base); + } +} diff --git a/vendor/backtrace/tests/skip_inner_frames.rs b/vendor/backtrace/tests/skip_inner_frames.rs new file mode 100644 index 00000000..e62a1603 --- /dev/null +++ b/vendor/backtrace/tests/skip_inner_frames.rs @@ -0,0 +1,51 @@ +use backtrace::Backtrace; +use core::ffi::c_void; + +// This test only works on platforms which have a working `symbol_address` +// function for frames which reports the starting address of a symbol. As a +// result it's only enabled on a few platforms. +const ENABLED: bool = cfg!(all( + // Windows hasn't really been tested, and macOS doesn't support actually + // finding an enclosing frame, so disable this + target_os = "linux", + // On ARM finding the enclosing function is simply returning the ip itself. + not(target_arch = "arm"), +)); + +#[test] +#[inline(never)] +fn backtrace_new_unresolved_should_start_with_call_site_trace() { + if !ENABLED { + return; + } + let mut b = Backtrace::new_unresolved(); + b.resolve(); + println!("{b:?}"); + + assert!(!b.frames().is_empty()); + + let this_ip = backtrace_new_unresolved_should_start_with_call_site_trace as *mut c_void; + println!("this_ip: {:p}", this_ip); + let frame_ip = b.frames().first().unwrap().symbol_address(); + assert_eq!(this_ip, frame_ip); +} + +#[test] +#[inline(never)] +fn backtrace_new_should_start_with_call_site_trace() { + if !ENABLED { + return; + } + let b = Backtrace::new(); + println!("{b:?}"); + + assert!(!b.frames().is_empty()); + + let this_ip = backtrace_new_should_start_with_call_site_trace as *mut c_void; + let frame_ip = b.frames().first().unwrap().symbol_address(); + assert_eq!(this_ip, frame_ip); + + let trace = format!("{b:?}"); + // FIXME: need more stacktrace content tests + assert!(trace.ends_with("\n")); +} diff --git a/vendor/backtrace/tests/smoke.rs b/vendor/backtrace/tests/smoke.rs new file mode 100644 index 00000000..fd5684f9 --- /dev/null +++ b/vendor/backtrace/tests/smoke.rs @@ -0,0 +1,325 @@ +use backtrace::Frame; +use core::ffi::c_void; +use std::ptr; +use std::thread; + +fn get_actual_fn_pointer(fp: *mut c_void) -> *mut c_void { + // On AIX, the function name references a function descriptor. + // A function descriptor consists of (See https://reviews.llvm.org/D62532) + // * The address of the entry point of the function. + // * The TOC base address for the function. + // * The environment pointer. + // Deref `fp` directly so that we can get the address of `fp`'s + // entry point in text section. + // + // For TOC, one can find more information in + // https://www.ibm.com/docs/en/aix/7.2?topic=program-understanding-programming-toc + if cfg!(target_os = "aix") { + unsafe { + let actual_fn_entry = *(fp as *const *mut c_void); + actual_fn_entry + } + } else { + fp as *mut c_void + } +} + +#[test] +// FIXME: shouldn't ignore this test on i686-msvc, unsure why it's failing +#[cfg_attr(all(target_arch = "x86", target_env = "msvc"), ignore)] +#[inline(never)] +#[rustfmt::skip] // we care about line numbers here +fn smoke_test_frames() { + frame_1(line!()); + #[inline(never)] fn frame_1(start_line: u32) { frame_2(start_line) } + #[inline(never)] fn frame_2(start_line: u32) { frame_3(start_line) } + #[inline(never)] fn frame_3(start_line: u32) { frame_4(start_line) } + #[inline(never)] fn frame_4(start_line: u32) { + let mut v = Vec::new(); + backtrace::trace(|cx| { + v.push(cx.clone()); + true + }); + + // Various platforms have various bits of weirdness about their + // backtraces. To find a good starting spot let's search through the + // frames + let target = get_actual_fn_pointer(frame_4 as *mut c_void); + let offset = v + .iter() + .map(|frame| frame.symbol_address()) + .enumerate() + .filter_map(|(i, sym)| { + if sym >= target { + Some((sym, i)) + } else { + None + } + }) + .min() + .unwrap() + .1; + let mut frames = v[offset..].iter(); + + assert_frame( + frames.next().unwrap(), + get_actual_fn_pointer(frame_4 as *mut c_void) as usize, + "frame_4", + "tests/smoke.rs", + start_line + 6, + 9, + ); + assert_frame( + frames.next().unwrap(), + get_actual_fn_pointer(frame_3 as *mut c_void) as usize, + "frame_3", + "tests/smoke.rs", + start_line + 3, + 52, + ); + assert_frame( + frames.next().unwrap(), + get_actual_fn_pointer(frame_2 as *mut c_void) as usize, + "frame_2", + "tests/smoke.rs", + start_line + 2, + 52, + ); + assert_frame( + frames.next().unwrap(), + get_actual_fn_pointer(frame_1 as *mut c_void) as usize, + "frame_1", + "tests/smoke.rs", + start_line + 1, + 52, + ); + assert_frame( + frames.next().unwrap(), + get_actual_fn_pointer(smoke_test_frames as *mut c_void) as usize, + "smoke_test_frames", + "", + 0, + 0, + ); + } + + fn assert_frame( + frame: &Frame, + actual_fn_pointer: usize, + expected_name: &str, + expected_file: &str, + expected_line: u32, + expected_col: u32, + ) { + backtrace::resolve_frame(frame, |sym| { + print!("symbol ip:{:?} address:{:?} ", frame.ip(), frame.symbol_address()); + if let Some(name) = sym.name() { + print!("name:{name} "); + } + if let Some(file) = sym.filename() { + print!("file:{} ", file.display()); + } + if let Some(lineno) = sym.lineno() { + print!("lineno:{lineno} "); + } + if let Some(colno) = sym.colno() { + print!("colno:{colno} "); + } + println!(); + }); + + let ip = frame.ip() as usize; + let sym = frame.symbol_address() as usize; + assert!(ip >= sym); + assert!( + sym >= actual_fn_pointer, + "{:?} < {:?} ({} {}:{}:{})", + sym as *const usize, + actual_fn_pointer as *const usize, + expected_name, + expected_file, + expected_line, + expected_col, + ); + + // windows dbghelp is *quite* liberal (and wrong) in many of its reports + // right now... + // + // This assertion can also fail for release builds, so skip it there + if cfg!(debug_assertions) { + assert!(sym - actual_fn_pointer < 1024); + } + + let mut resolved = 0; + + let mut name = None; + let mut addr = None; + let mut col = None; + let mut line = None; + let mut file = None; + backtrace::resolve_frame(frame, |sym| { + resolved += 1; + name = sym.name().map(|v| v.to_string()); + addr = sym.addr(); + col = sym.colno(); + line = sym.lineno(); + file = sym.filename().map(|v| v.to_path_buf()); + }); + assert!(resolved > 0); + + let name = name.expect("didn't find a name"); + + // in release mode names get weird as functions can get merged + // together with `mergefunc`, so only assert this in debug mode + if cfg!(debug_assertions) { + assert!( + name.contains(expected_name), + "didn't find `{expected_name}` in `{name}`" + ); + } + + addr.expect("didn't find a symbol"); + + if cfg!(debug_assertions) { + let line = line.expect("didn't find a line number"); + let file = file.expect("didn't find a line number"); + if !expected_file.is_empty() { + assert!( + file.ends_with(expected_file), + "{file:?} didn't end with {expected_file:?}" + ); + } + if expected_line != 0 { + assert_eq!( + line, + expected_line, + "bad line number on frame for `{expected_name}`: {line} != {expected_line}"); + } + + // dbghelp on MSVC doesn't support column numbers + if !cfg!(target_env = "msvc") { + let col = col.expect("didn't find a column number"); + if expected_col != 0 { + assert_eq!( + col, + expected_col, + "bad column number on frame for `{expected_name}`: {col} != {expected_col}"); + } + } + } + } +} + +#[test] +fn many_threads() { + let threads = (0..16) + .map(|_| { + thread::spawn(|| { + for _ in 0..16 { + backtrace::trace(|frame| { + backtrace::resolve(frame.ip(), |symbol| { + let _s = symbol.name().map(|s| s.to_string()); + }); + true + }); + } + }) + }) + .collect::<Vec<_>>(); + + for t in threads { + t.join().unwrap() + } +} + +#[test] +#[cfg(feature = "serde")] +fn is_serde() { + extern crate serde; + + fn is_serialize<T: serde::ser::Serialize>() {} + fn is_deserialize<T: serde::de::DeserializeOwned>() {} + + is_serialize::<backtrace::Backtrace>(); + is_deserialize::<backtrace::Backtrace>(); +} + +#[test] +fn sp_smoke_test() { + let mut refs = vec![]; + recursive_stack_references(&mut refs); + return; + + #[inline(never)] + fn recursive_stack_references(refs: &mut Vec<*mut c_void>) { + assert!(refs.len() < 5); + + let mut x = refs.len(); + refs.push(ptr::addr_of_mut!(x).cast()); + + if refs.len() < 5 { + recursive_stack_references(refs); + eprintln!("exiting: {x}"); + return; + } + + backtrace::trace(make_trace_closure(refs)); + eprintln!("exiting: {x}"); + } + + // NB: the following `make_*` functions are pulled out of line, rather than + // defining their results as inline closures at their call sites, so that + // the resulting closures don't have "recursive_stack_references" in their + // mangled names. + + fn make_trace_closure<'a>( + refs: &'a mut Vec<*mut c_void>, + ) -> impl FnMut(&backtrace::Frame) -> bool + 'a { + let mut child_sp = None; + let mut child_ref = None; + move |frame| { + eprintln!("\n=== frame ==================================="); + + let mut is_recursive_stack_references = false; + backtrace::resolve(frame.ip(), |sym| { + is_recursive_stack_references |= + sym.name() + .and_then(|name| name.as_str()) + .map_or(false, |name| { + eprintln!("name = {name}"); + name.contains("recursive_stack_references") + }) + }); + + let sp = frame.sp(); + eprintln!("sp = {sp:p}"); + if sp as usize == 0 { + // If the SP is null, then we don't have an implementation for + // getting the SP on this target. Just keep walking the stack, + // but don't make our assertions about the on-stack pointers and + // SP values. + return true; + } + + // The stack grows down. + if let Some(child_sp) = child_sp { + assert!(child_sp <= sp); + } + + if is_recursive_stack_references { + let r = refs.pop().unwrap(); + eprintln!("ref = {:p}", r); + if sp as usize != 0 { + assert!(r > sp); + if let Some(child_ref) = child_ref { + assert!(sp >= child_ref); + } + } + child_ref = Some(r); + } + + child_sp = Some(sp); + true + } + } +} |
