-
Notifications
You must be signed in to change notification settings - Fork 63
Expand file tree
/
Copy pathllff.rs
More file actions
127 lines (114 loc) · 3.75 KB
/
llff.rs
File metadata and controls
127 lines (114 loc) · 3.75 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
use core::alloc::{GlobalAlloc, Layout};
use core::cell::RefCell;
use core::ptr::{self, NonNull};
use critical_section::Mutex;
use linked_list_allocator::Heap as LLHeap;
/// A linked list first fit heap.
pub struct Heap {
heap: Mutex<RefCell<(LLHeap, bool)>>,
}
impl Heap {
/// Create a new UNINITIALIZED heap allocator
///
/// You must initialize this heap using the
/// [`init`](Self::init) method before using the allocator.
pub const fn empty() -> Heap {
Heap {
heap: Mutex::new(RefCell::new((LLHeap::empty(), false))),
}
}
/// Initializes the heap
///
/// This function must be called BEFORE you run any code that makes use of the
/// allocator.
///
/// `start_addr` is the address where the heap will be located.
///
/// `size` is the size of the heap in bytes.
///
/// Note that:
///
/// - The heap grows "upwards", towards larger addresses. Thus `start_addr` will
/// be the smallest address used.
///
/// - The largest address used is `start_addr + size - 1`, so if `start_addr` is
/// `0x1000` and `size` is `0x30000` then the allocator won't use memory at
/// addresses `0x31000` and larger.
///
/// # Safety
///
/// This function is safe if the following invariants hold:
///
/// - `start_addr` points to valid memory.
/// - `size` is correct.
///
/// # Panics
///
/// This function will panic if either of the following are true:
///
/// - this function is called more than ONCE.
/// - `size == 0`.
pub unsafe fn init(&self, start_addr: usize, size: usize) {
assert!(size > 0);
critical_section::with(|cs| {
let mut heap = self.heap.borrow_ref_mut(cs);
assert!(!heap.1);
heap.1 = true;
heap.0.init(start_addr as *mut u8, size);
});
}
/// Returns an estimate of the amount of bytes in use.
pub fn used(&self) -> usize {
critical_section::with(|cs| self.heap.borrow_ref_mut(cs).0.used())
}
/// Returns an estimate of the amount of bytes available.
pub fn free(&self) -> usize {
critical_section::with(|cs| self.heap.borrow_ref_mut(cs).0.free())
}
fn alloc(&self, layout: Layout) -> Option<NonNull<u8>> {
critical_section::with(|cs| {
self.heap
.borrow_ref_mut(cs)
.0
.allocate_first_fit(layout)
.ok()
})
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
critical_section::with(|cs| {
self.heap
.borrow_ref_mut(cs)
.0
.deallocate(NonNull::new_unchecked(ptr), layout)
});
}
}
unsafe impl GlobalAlloc for Heap {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.alloc(layout)
.map_or(ptr::null_mut(), |allocation| allocation.as_ptr())
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.dealloc(ptr, layout);
}
}
#[cfg(feature = "allocator_api")]
mod allocator_api {
use super::*;
use core::alloc::{AllocError, Allocator};
unsafe impl Allocator for Heap {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(NonNull::slice_from_raw_parts(layout.dangling_ptr(), 0)),
size => self.alloc(layout).map_or(Err(AllocError), |allocation| {
Ok(NonNull::slice_from_raw_parts(allocation, size))
}),
}
}
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
self.dealloc(ptr.as_ptr(), layout);
}
}
}
}