summaryrefslogtreecommitdiff
path: root/vendor/stacker/src
diff options
context:
space:
mode:
authormo khan <mo@mokhan.ca>2025-07-15 16:37:08 -0600
committermo khan <mo@mokhan.ca>2025-07-17 16:30:22 -0600
commit45df4d0d9b577fecee798d672695fe24ff57fb1b (patch)
tree1b99bf645035b58e0d6db08c7a83521f41f7a75b /vendor/stacker/src
parentf94f79608393d4ab127db63cc41668445ef6b243 (diff)
feat: migrate from Cedar to SpiceDB authorization system
This is a major architectural change that replaces the Cedar policy-based authorization system with SpiceDB's relation-based authorization. Key changes: - Migrate from Rust to Go implementation - Replace Cedar policies with SpiceDB schema and relationships - Switch from envoy `ext_authz` with Cedar to SpiceDB permission checks - Update build system and dependencies for Go ecosystem - Maintain Envoy integration for external authorization This change enables more flexible permission modeling through SpiceDB's Google Zanzibar inspired relation-based system, supporting complex hierarchical permissions that were difficult to express in Cedar. Breaking change: Existing Cedar policies and Rust-based configuration will no longer work and need to be migrated to SpiceDB schema.
Diffstat (limited to 'vendor/stacker/src')
-rw-r--r--vendor/stacker/src/alloc_stack_restore_guard.rs47
-rw-r--r--vendor/stacker/src/arch/asm.h5
-rw-r--r--vendor/stacker/src/arch/windows.c5
-rw-r--r--vendor/stacker/src/backends/fallback.rs4
-rw-r--r--vendor/stacker/src/backends/macos.rs6
-rw-r--r--vendor/stacker/src/backends/mod.rs28
-rw-r--r--vendor/stacker/src/backends/openbsd.rs9
-rw-r--r--vendor/stacker/src/backends/unix.rs40
-rw-r--r--vendor/stacker/src/backends/windows.rs142
-rw-r--r--vendor/stacker/src/lib.rs181
-rw-r--r--vendor/stacker/src/mmap_stack_restore_guard.rs105
11 files changed, 0 insertions, 572 deletions
diff --git a/vendor/stacker/src/alloc_stack_restore_guard.rs b/vendor/stacker/src/alloc_stack_restore_guard.rs
deleted file mode 100644
index ef2babb7..00000000
--- a/vendor/stacker/src/alloc_stack_restore_guard.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-use crate::{get_stack_limit, set_stack_limit};
-
-pub struct StackRestoreGuard {
- new_stack: *mut u8,
- stack_bytes: usize,
- old_stack_limit: Option<usize>,
-}
-
-const ALIGNMENT: usize = 16;
-
-impl StackRestoreGuard {
- pub fn new(stack_bytes: usize) -> StackRestoreGuard {
- // On these platforms we do not use stack guards. this is very unfortunate,
- // but there is not much we can do about it without OS support.
- // We simply allocate the requested size from the global allocator with a suitable
- // alignment.
- let stack_bytes = stack_bytes
- .checked_add(ALIGNMENT - 1)
- .expect("unreasonably large stack requested")
- / ALIGNMENT
- * ALIGNMENT;
- let layout = std::alloc::Layout::from_size_align(stack_bytes, ALIGNMENT).unwrap();
- let ptr = unsafe { std::alloc::alloc(layout) };
- assert!(!ptr.is_null(), "unable to allocate stack");
- StackRestoreGuard {
- new_stack: ptr,
- stack_bytes,
- old_stack_limit: get_stack_limit(),
- }
- }
-
- pub fn stack_area(&self) -> (*mut u8, usize) {
- (self.new_stack, self.stack_bytes)
- }
-}
-
-impl Drop for StackRestoreGuard {
- fn drop(&mut self) {
- unsafe {
- std::alloc::dealloc(
- self.new_stack,
- std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, ALIGNMENT),
- );
- }
- set_stack_limit(self.old_stack_limit);
- }
-}
diff --git a/vendor/stacker/src/arch/asm.h b/vendor/stacker/src/arch/asm.h
deleted file mode 100644
index 56c9d289..00000000
--- a/vendor/stacker/src/arch/asm.h
+++ /dev/null
@@ -1,5 +0,0 @@
-#if defined(APPLE) || (defined(WINDOWS) && defined(X86))
-#define GLOBAL(name) .globl _ ## name; _ ## name
-#else
-#define GLOBAL(name) .globl name; name
-#endif
diff --git a/vendor/stacker/src/arch/windows.c b/vendor/stacker/src/arch/windows.c
deleted file mode 100644
index 89485a0c..00000000
--- a/vendor/stacker/src/arch/windows.c
+++ /dev/null
@@ -1,5 +0,0 @@
-#include <windows.h>
-
-PVOID __stacker_get_current_fiber() {
- return GetCurrentFiber();
-}
diff --git a/vendor/stacker/src/backends/fallback.rs b/vendor/stacker/src/backends/fallback.rs
deleted file mode 100644
index a812cbc9..00000000
--- a/vendor/stacker/src/backends/fallback.rs
+++ /dev/null
@@ -1,4 +0,0 @@
-#[inline(always)]
-pub unsafe fn guess_os_stack_limit() -> Option<usize> {
- None
-}
diff --git a/vendor/stacker/src/backends/macos.rs b/vendor/stacker/src/backends/macos.rs
deleted file mode 100644
index 42b28516..00000000
--- a/vendor/stacker/src/backends/macos.rs
+++ /dev/null
@@ -1,6 +0,0 @@
-pub unsafe fn guess_os_stack_limit() -> Option<usize> {
- Some(
- libc::pthread_get_stackaddr_np(libc::pthread_self()) as usize
- - libc::pthread_get_stacksize_np(libc::pthread_self()) as usize,
- )
-}
diff --git a/vendor/stacker/src/backends/mod.rs b/vendor/stacker/src/backends/mod.rs
deleted file mode 100644
index 8da895d6..00000000
--- a/vendor/stacker/src/backends/mod.rs
+++ /dev/null
@@ -1,28 +0,0 @@
-cfg_if! {
- if #[cfg(miri)] {
- mod fallback;
- pub use fallback::guess_os_stack_limit;
- } else if #[cfg(windows)] {
- pub(crate) mod windows;
- pub use windows::guess_os_stack_limit;
- } else if #[cfg(any(
- target_os = "linux",
- target_os = "solaris",
- target_os = "netbsd",
- target_os = "freebsd",
- target_os = "dragonfly",
- target_os = "illumos"
- ))] {
- mod unix;
- pub use unix::guess_os_stack_limit;
- } else if #[cfg(target_os = "openbsd")] {
- mod openbsd;
- pub use openbsd::guess_os_stack_limit;
- } else if #[cfg(target_os = "macos")] {
- mod macos;
- pub use macos::guess_os_stack_limit;
- } else {
- mod fallback;
- pub use fallback::guess_os_stack_limit;
- }
-}
diff --git a/vendor/stacker/src/backends/openbsd.rs b/vendor/stacker/src/backends/openbsd.rs
deleted file mode 100644
index ee582d84..00000000
--- a/vendor/stacker/src/backends/openbsd.rs
+++ /dev/null
@@ -1,9 +0,0 @@
-pub unsafe fn guess_os_stack_limit() -> Option<usize> {
- let mut stackinfo = std::mem::MaybeUninit::<libc::stack_t>::uninit();
- let res = libc::pthread_stackseg_np(libc::pthread_self(), stackinfo.as_mut_ptr());
- if res != 0 {
- return None;
- }
- let stackinfo = stackinfo.assume_init();
- Some(stackinfo.ss_sp as usize - stackinfo.ss_size)
-}
diff --git a/vendor/stacker/src/backends/unix.rs b/vendor/stacker/src/backends/unix.rs
deleted file mode 100644
index 97ef209f..00000000
--- a/vendor/stacker/src/backends/unix.rs
+++ /dev/null
@@ -1,40 +0,0 @@
-#[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "illumos"))]
-use libc::pthread_attr_get_np as get_attr;
-#[cfg(any(target_os = "linux", target_os = "solaris", target_os = "netbsd"))]
-use libc::pthread_getattr_np as get_attr;
-
-pub unsafe fn guess_os_stack_limit() -> Option<usize> {
- let mut attr = PthreadAttr::new()?;
- (get_attr(libc::pthread_self(), &mut attr.0) == 0).then_some(())?;
- let mut stackaddr = std::ptr::null_mut();
- let mut stacksize = 0;
- (libc::pthread_attr_getstack(&attr.0, &mut stackaddr, &mut stacksize) == 0).then_some(())?;
- Some(stackaddr as usize)
-}
-
-struct PthreadAttr(libc::pthread_attr_t);
-
-impl Drop for PthreadAttr {
- fn drop(&mut self) {
- unsafe {
- let ret = libc::pthread_attr_destroy(&mut self.0);
- if ret != 0 {
- let err = std::io::Error::last_os_error();
- panic!(
- "pthread_attr_destroy failed with error code {}: {}",
- ret, err
- );
- }
- }
- }
-}
-
-impl PthreadAttr {
- unsafe fn new() -> Option<Self> {
- let mut attr = std::mem::MaybeUninit::<libc::pthread_attr_t>::uninit();
- if libc::pthread_attr_init(attr.as_mut_ptr()) != 0 {
- return None;
- }
- Some(PthreadAttr(attr.assume_init()))
- }
-}
diff --git a/vendor/stacker/src/backends/windows.rs b/vendor/stacker/src/backends/windows.rs
deleted file mode 100644
index 69c76133..00000000
--- a/vendor/stacker/src/backends/windows.rs
+++ /dev/null
@@ -1,142 +0,0 @@
-use libc::c_void;
-use std::io;
-use std::ptr;
-use windows_sys::Win32::Foundation::BOOL;
-use windows_sys::Win32::System::Memory::VirtualQuery;
-use windows_sys::Win32::System::Threading::{
- ConvertFiberToThread, ConvertThreadToFiber, CreateFiber, DeleteFiber, IsThreadAFiber,
- SetThreadStackGuarantee, SwitchToFiber,
-};
-
-// Make sure the libstacker.a (implemented in C) is linked.
-// See https://github.com/rust-lang/rust/issues/65610
-#[link(name = "stacker")]
-extern "C" {
- fn __stacker_get_current_fiber() -> *mut c_void;
-}
-
-struct FiberInfo<F> {
- callback: std::mem::MaybeUninit<F>,
- panic: Option<Box<dyn std::any::Any + Send + 'static>>,
- parent_fiber: *mut c_void,
-}
-
-unsafe extern "system" fn fiber_proc<F: FnOnce()>(data: *mut c_void) {
- // This function is the entry point to our inner fiber, and as argument we get an
- // instance of `FiberInfo`. We will set-up the "runtime" for the callback and execute
- // it.
- let data = &mut *(data as *mut FiberInfo<F>);
- let old_stack_limit = crate::get_stack_limit();
- crate::set_stack_limit(guess_os_stack_limit());
- let callback = data.callback.as_ptr();
- data.panic = std::panic::catch_unwind(std::panic::AssertUnwindSafe(callback.read())).err();
-
- // Restore to the previous Fiber
- crate::set_stack_limit(old_stack_limit);
- SwitchToFiber(data.parent_fiber);
-}
-
-pub fn _grow(stack_size: usize, callback: &mut dyn FnMut()) {
- // Fibers (or stackful coroutines) is the only official way to create new stacks on the
- // same thread on Windows. So in order to extend the stack we create fiber and switch
- // to it so we can use it's stack. After running `callback` within our fiber, we switch
- // back to the current stack and destroy the fiber and its associated stack.
- unsafe {
- let was_fiber = IsThreadAFiber() == 1 as BOOL;
- let mut data = FiberInfo {
- callback: std::mem::MaybeUninit::new(callback),
- panic: None,
- parent_fiber: {
- if was_fiber {
- // Get a handle to the current fiber. We need to use a C implementation
- // for this as GetCurrentFiber is an header only function.
- __stacker_get_current_fiber()
- } else {
- // Convert the current thread to a fiber, so we are able to switch back
- // to the current stack. Threads coverted to fibers still act like
- // regular threads, but they have associated fiber data. We later
- // convert it back to a regular thread and free the fiber data.
- ConvertThreadToFiber(ptr::null_mut())
- }
- },
- };
-
- if data.parent_fiber.is_null() {
- panic!(
- "unable to convert thread to fiber: {}",
- io::Error::last_os_error()
- );
- }
-
- let fiber = CreateFiber(
- stack_size as usize,
- Some(fiber_proc::<&mut dyn FnMut()>),
- &mut data as *mut FiberInfo<&mut dyn FnMut()> as *mut _,
- );
- if fiber.is_null() {
- panic!("unable to allocate fiber: {}", io::Error::last_os_error());
- }
-
- // Switch to the fiber we created. This changes stacks and starts executing
- // fiber_proc on it. fiber_proc will run `callback` and then switch back to run the
- // next statement.
- SwitchToFiber(fiber);
- DeleteFiber(fiber);
-
- // Clean-up.
- if !was_fiber && ConvertFiberToThread() == 0 {
- // FIXME: Perhaps should not panic here?
- panic!(
- "unable to convert back to thread: {}",
- io::Error::last_os_error()
- );
- }
-
- if let Some(p) = data.panic {
- std::panic::resume_unwind(p);
- }
- }
-}
-
-#[inline(always)]
-fn get_thread_stack_guarantee() -> Option<usize> {
- let min_guarantee = if cfg!(target_pointer_width = "32") {
- 0x1000
- } else {
- 0x2000
- };
- let mut stack_guarantee = 0;
- unsafe {
- // Read the current thread stack guarantee
- // This is the stack reserved for stack overflow
- // exception handling.
- // This doesn't return the true value so we need
- // some further logic to calculate the real stack
- // guarantee. This logic is what is used on x86-32 and
- // x86-64 Windows 10. Other versions and platforms may differ
- let ret = SetThreadStackGuarantee(&mut stack_guarantee);
- if ret == 0 {
- return None;
- }
- };
- Some(std::cmp::max(stack_guarantee, min_guarantee) as usize + 0x1000)
-}
-
-#[inline(always)]
-pub unsafe fn guess_os_stack_limit() -> Option<usize> {
- // Query the allocation which contains our stack pointer in order
- // to discover the size of the stack
- //
- // FIXME: we could read stack base from the TIB, specifically the 3rd element of it.
- type QueryT = windows_sys::Win32::System::Memory::MEMORY_BASIC_INFORMATION;
- let mut mi = std::mem::MaybeUninit::<QueryT>::uninit();
- let res = VirtualQuery(
- psm::stack_pointer() as *const _,
- mi.as_mut_ptr(),
- std::mem::size_of::<QueryT>() as usize,
- );
- if res == 0 {
- return None;
- }
- Some(mi.assume_init().AllocationBase as usize + get_thread_stack_guarantee()? + 0x1000)
-}
diff --git a/vendor/stacker/src/lib.rs b/vendor/stacker/src/lib.rs
deleted file mode 100644
index ee5803c3..00000000
--- a/vendor/stacker/src/lib.rs
+++ /dev/null
@@ -1,181 +0,0 @@
-//! A library to help grow the stack when it runs out of space.
-//!
-//! This is an implementation of manually instrumented segmented stacks where points in a program's
-//! control flow are annotated with "maybe grow the stack here". Each point of annotation indicates
-//! how far away from the end of the stack it's allowed to be, plus the amount of stack to allocate
-//! if it does reach the end.
-//!
-//! Once a program has reached the end of its stack, a temporary stack on the heap is allocated and
-//! is switched to for the duration of a closure.
-//!
-//! For a set of lower-level primitives, consider the `psm` crate.
-//!
-//! # Examples
-//!
-//! ```
-//! // Grow the stack if we are within the "red zone" of 32K, and if we allocate
-//! // a new stack allocate 1MB of stack space.
-//! //
-//! // If we're already in bounds, just run the provided closure on current stack.
-//! stacker::maybe_grow(32 * 1024, 1024 * 1024, || {
-//! // guaranteed to have at least 32K of stack
-//! });
-//! ```
-
-#![allow(improper_ctypes)]
-
-#[macro_use]
-extern crate cfg_if;
-extern crate libc;
-#[cfg(windows)]
-extern crate windows_sys;
-#[macro_use]
-extern crate psm;
-
-mod backends;
-
-use std::cell::Cell;
-
-/// Grows the call stack if necessary.
-///
-/// This function is intended to be called at manually instrumented points in a program where
-/// recursion is known to happen quite a bit. This function will check to see if we're within
-/// `red_zone` bytes of the end of the stack, and if so it will allocate a new stack of at least
-/// `stack_size` bytes.
-///
-/// The closure `f` is guaranteed to run on a stack with at least `red_zone` bytes, and it will be
-/// run on the current stack if there's space available.
-#[inline(always)]
-pub fn maybe_grow<R, F: FnOnce() -> R>(red_zone: usize, stack_size: usize, callback: F) -> R {
- // if we can't guess the remaining stack (unsupported on some platforms) we immediately grow
- // the stack and then cache the new stack size (which we do know now because we allocated it.
- let enough_space = match remaining_stack() {
- Some(remaining) => remaining >= red_zone,
- None => false,
- };
- if enough_space {
- callback()
- } else {
- grow(stack_size, callback)
- }
-}
-
-/// Always creates a new stack for the passed closure to run on.
-/// The closure will still be on the same thread as the caller of `grow`.
-/// This will allocate a new stack with at least `stack_size` bytes.
-pub fn grow<R, F: FnOnce() -> R>(stack_size: usize, callback: F) -> R {
- // To avoid monomorphizing `_grow()` and everything it calls,
- // we convert the generic callback to a dynamic one.
- let mut opt_callback = Some(callback);
- let mut ret = None;
- let ret_ref = &mut ret;
-
- // This wrapper around `callback` achieves two things:
- // * It converts the `impl FnOnce` to a `dyn FnMut`.
- // `dyn` because we want it to not be generic, and
- // `FnMut` because we can't pass a `dyn FnOnce` around without boxing it.
- // * It eliminates the generic return value, by writing it to the stack of this function.
- // Otherwise the closure would have to return an unsized value, which isn't possible.
- let dyn_callback: &mut dyn FnMut() = &mut || {
- let taken_callback = opt_callback.take().unwrap();
- *ret_ref = Some(taken_callback());
- };
-
- _grow(stack_size, dyn_callback);
- ret.unwrap()
-}
-
-/// Queries the amount of remaining stack as interpreted by this library.
-///
-/// This function will return the amount of stack space left which will be used
-/// to determine whether a stack switch should be made or not.
-pub fn remaining_stack() -> Option<usize> {
- let current_ptr = current_stack_ptr();
- get_stack_limit().map(|limit| current_ptr.saturating_sub(limit))
-}
-
-psm_stack_information!(
- yes {
- fn current_stack_ptr() -> usize {
- psm::stack_pointer() as usize
- }
- }
- no {
- #[inline(always)]
- fn current_stack_ptr() -> usize {
- unsafe {
- let mut x = std::mem::MaybeUninit::<u8>::uninit();
- // Unlikely to be ever exercised. As a fallback we execute a volatile read to a
- // local (to hopefully defeat the optimisations that would make this local a static
- // global) and take its address. This way we get a very approximate address of the
- // current frame.
- x.as_mut_ptr().write_volatile(42);
- x.as_ptr() as usize
- }
- }
- }
-);
-
-thread_local! {
- static STACK_LIMIT: Cell<Option<usize>> = Cell::new(unsafe {
- backends::guess_os_stack_limit()
- })
-}
-
-#[inline(always)]
-fn get_stack_limit() -> Option<usize> {
- STACK_LIMIT.with(|s| s.get())
-}
-
-#[inline(always)]
-#[allow(unused)]
-fn set_stack_limit(l: Option<usize>) {
- STACK_LIMIT.with(|s| s.set(l))
-}
-
-psm_stack_manipulation! {
- yes {
- #[cfg(not(any(target_arch = "wasm32",target_os = "hermit")))]
- #[path = "mmap_stack_restore_guard.rs"]
- mod stack_restore_guard;
-
- #[cfg(any(target_arch = "wasm32",target_os = "hermit"))]
- #[path = "alloc_stack_restore_guard.rs"]
- mod stack_restore_guard;
-
- use stack_restore_guard::StackRestoreGuard;
-
- fn _grow(requested_stack_size: usize, callback: &mut dyn FnMut()) {
- // Other than that this code has no meaningful gotchas.
- unsafe {
- // We use a guard pattern to ensure we deallocate the allocated stack when we leave
- // this function and also try to uphold various safety invariants required by `psm`
- // (such as not unwinding from the callback we pass to it).
- // `StackRestoreGuard` allocates a memory area with suitable size and alignment.
- // It also sets up stack guards if supported on target.
- let guard = StackRestoreGuard::new(requested_stack_size);
- let (stack_base, allocated_stack_size) = guard.stack_area();
- debug_assert!(allocated_stack_size >= requested_stack_size);
- set_stack_limit(Some(stack_base as usize));
- // TODO should we not pass `allocated_stack_size` here?
- let panic = psm::on_stack(stack_base, requested_stack_size, move || {
- std::panic::catch_unwind(std::panic::AssertUnwindSafe(callback)).err()
- });
- drop(guard);
- if let Some(p) = panic {
- std::panic::resume_unwind(p);
- }
- }
- }
- }
-
- no {
- #[cfg(not(windows))]
- fn _grow(stack_size: usize, callback: &mut dyn FnMut()) {
- let _ = stack_size;
- callback();
- }
- #[cfg(windows)]
- use backends::windows::_grow;
- }
-}
diff --git a/vendor/stacker/src/mmap_stack_restore_guard.rs b/vendor/stacker/src/mmap_stack_restore_guard.rs
deleted file mode 100644
index 1e021c2d..00000000
--- a/vendor/stacker/src/mmap_stack_restore_guard.rs
+++ /dev/null
@@ -1,105 +0,0 @@
-use crate::{get_stack_limit, set_stack_limit};
-
-pub struct StackRestoreGuard {
- mapping: *mut u8,
- size_with_guard: usize,
- page_size: usize,
- old_stack_limit: Option<usize>,
-}
-
-impl StackRestoreGuard {
- pub fn new(requested_size: usize) -> StackRestoreGuard {
- // For maximum portability we want to produce a stack that is aligned to a page and has
- // a size that’s a multiple of page size. It is natural to use mmap to allocate
- // these pages. Furthermore, we want to allocate two extras pages for the stack guard.
- // To achieve that we do our calculations in number of pages and convert to bytes last.
- let page_size = page_size();
- let requested_pages = requested_size
- .checked_add(page_size - 1)
- .expect("unreasonably large stack requested")
- / page_size;
- let page_count_with_guard = std::cmp::max(1, requested_pages) + 2;
- let size_with_guard = page_count_with_guard
- .checked_mul(page_size)
- .expect("unreasonably large stack requested");
-
- unsafe {
- let new_stack = libc::mmap(
- std::ptr::null_mut(),
- size_with_guard,
- libc::PROT_NONE,
- libc::MAP_PRIVATE | libc::MAP_ANON,
- -1, // Some implementations assert fd = -1 if MAP_ANON is specified
- 0,
- );
- assert_ne!(
- new_stack,
- libc::MAP_FAILED,
- "mmap failed to allocate stack: {}",
- std::io::Error::last_os_error()
- );
- let guard = StackRestoreGuard {
- mapping: new_stack as *mut u8,
- page_size,
- size_with_guard,
- old_stack_limit: get_stack_limit(),
- };
- // We leave two guard pages without read/write access in our allocation.
- // There is one guard page below the stack and another above it.
- let above_guard_page = new_stack.add(page_size);
- #[cfg(not(target_os = "openbsd"))]
- let result = libc::mprotect(
- above_guard_page,
- size_with_guard - 2 * page_size,
- libc::PROT_READ | libc::PROT_WRITE,
- );
- #[cfg(target_os = "openbsd")]
- let result = if libc::mmap(
- above_guard_page,
- size_with_guard - 2 * page_size,
- libc::PROT_READ | libc::PROT_WRITE,
- libc::MAP_FIXED | libc::MAP_PRIVATE | libc::MAP_ANON | libc::MAP_STACK,
- -1,
- 0,
- ) == above_guard_page
- {
- 0
- } else {
- -1
- };
- assert_ne!(
- result,
- -1,
- "mprotect/mmap failed: {}",
- std::io::Error::last_os_error()
- );
- guard
- }
- }
-
- // TODO this should return a *mut [u8], but pointer slices only got proper support with Rust 1.79.
- pub fn stack_area(&self) -> (*mut u8, usize) {
- unsafe {
- (
- self.mapping.add(self.page_size),
- self.size_with_guard - self.page_size,
- )
- }
- }
-}
-
-impl Drop for StackRestoreGuard {
- fn drop(&mut self) {
- unsafe {
- // FIXME: check the error code and decide what to do with it.
- // Perhaps a debug_assertion?
- libc::munmap(self.mapping as *mut std::ffi::c_void, self.size_with_guard);
- }
- set_stack_limit(self.old_stack_limit);
- }
-}
-
-fn page_size() -> usize {
- // FIXME: consider caching the page size.
- unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) as usize }
-}