summaryrefslogtreecommitdiff
path: root/vendor/stacker/src/alloc_stack_restore_guard.rs
blob: ef2babb76ac0590264dd4aeda0422a9283841f5d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
use crate::{get_stack_limit, set_stack_limit};

pub struct StackRestoreGuard {
    new_stack: *mut u8,
    stack_bytes: usize,
    old_stack_limit: Option<usize>,
}

const ALIGNMENT: usize = 16;

impl StackRestoreGuard {
    pub fn new(stack_bytes: usize) -> StackRestoreGuard {
        // On these platforms we do not use stack guards. this is very unfortunate,
        // but there is not much we can do about it without OS support.
        // We simply allocate the requested size from the global allocator with a suitable
        // alignment.
        let stack_bytes = stack_bytes
            .checked_add(ALIGNMENT - 1)
            .expect("unreasonably large stack requested")
            / ALIGNMENT
            * ALIGNMENT;
        let layout = std::alloc::Layout::from_size_align(stack_bytes, ALIGNMENT).unwrap();
        let ptr = unsafe { std::alloc::alloc(layout) };
        assert!(!ptr.is_null(), "unable to allocate stack");
        StackRestoreGuard {
            new_stack: ptr,
            stack_bytes,
            old_stack_limit: get_stack_limit(),
        }
    }

    pub fn stack_area(&self) -> (*mut u8, usize) {
        (self.new_stack, self.stack_bytes)
    }
}

impl Drop for StackRestoreGuard {
    fn drop(&mut self) {
        unsafe {
            std::alloc::dealloc(
                self.new_stack,
                std::alloc::Layout::from_size_align_unchecked(self.stack_bytes, ALIGNMENT),
            );
        }
        set_stack_limit(self.old_stack_limit);
    }
}