1
- /*
2
- * Pool Allocator
3
- * allocator for fixed size(in future, maybe able to alloc several size...)
4
- * Allow nullptr for accessing Physical Address:0
5
- */
6
-
7
- /*TODO: think about mutex*/
8
-
9
- use core:: marker:: PhantomData ;
10
- use core:: mem:: size_of;
1
+ //!
2
+ //! Pool Allocator
3
+ //! An allocator for fixed size(in future, maybe able to alloc several size...)
4
+ //! This allows nullptr for accessing Physical Address:0
5
+ //!
11
6
12
7
pub struct PoolAllocator < T > {
13
8
linked_count : usize ,
14
9
object_size : usize ,
15
10
head : Option < * mut FreeList > ,
16
- phantom : PhantomData < T > ,
11
+ phantom : core :: marker :: PhantomData < T > ,
17
12
}
18
13
19
14
struct FreeList {
20
- prev : Option < * mut FreeList > ,
21
15
next : Option < * mut FreeList > ,
22
16
}
23
17
18
+ /// PoolAllocator
19
+ ///
20
+ /// This allocator is FILO(First In Last Out) to increase the probability of cache-hit.
24
21
impl < T > PoolAllocator < T > {
25
22
const SIZE_CHECK_HOOK : ( ) = Self :: size_check ( ) ;
26
23
27
24
const fn size_check ( ) {
28
- if size_of :: < T > ( ) < size_of :: < FreeList > ( ) {
25
+ if core :: mem :: size_of :: < T > ( ) < core :: mem :: size_of :: < FreeList > ( ) {
29
26
panic ! ( "PoolAllocator can process the struct bigger than FreeList only." ) ;
30
27
//static_assert
31
28
}
@@ -35,24 +32,22 @@ impl<T> PoolAllocator<T> {
35
32
let _c = Self :: SIZE_CHECK_HOOK ;
36
33
Self {
37
34
linked_count : 0 ,
38
- object_size : size_of :: < T > ( ) ,
35
+ object_size : core :: mem :: size_of :: < T > ( ) ,
39
36
head : None ,
40
- phantom : PhantomData ,
37
+ phantom : core :: marker :: PhantomData ,
41
38
}
42
39
}
43
40
44
41
pub unsafe fn set_initial_pool ( & mut self , pool_address : usize , pool_size : usize ) {
45
42
assert_eq ! ( self . linked_count, 0 ) ;
46
43
let mut address = pool_address;
47
44
let mut prev_entry = address as * mut FreeList ;
48
- ( * prev_entry) . prev = None ;
49
45
( * prev_entry) . next = None ;
50
46
self . head = Some ( prev_entry. clone ( ) ) ;
51
47
self . linked_count = 1 ;
52
48
address += self . object_size ;
53
49
for _i in 1 ..( pool_size / self . object_size ) {
54
50
let entry = address as * mut FreeList ;
55
- ( * entry) . prev = Some ( prev_entry) ;
56
51
( * entry) . next = None ;
57
52
( * prev_entry) . next = Some ( entry. clone ( ) ) ;
58
53
self . linked_count += 1 ;
@@ -71,18 +66,11 @@ impl<T> PoolAllocator<T> {
71
66
72
67
pub fn alloc_ptr ( & mut self ) -> Result < * mut T , ( ) > {
73
68
if self . linked_count == 0 {
74
- /*add: alloc page from manager*/
75
69
return Err ( ( ) ) ;
76
70
}
77
71
//assert!(self.head.is_some());
78
72
let e = self . head . unwrap ( ) . clone ( ) ;
79
- if let Some ( next) = unsafe { & mut * e } . next . clone ( ) {
80
- unsafe { & mut * next } . prev = None ;
81
- self . head = Some ( next) ;
82
- } else {
83
- assert_eq ! ( self . linked_count, 1 ) ;
84
- self . head = None ;
85
- }
73
+ self . head = unsafe { ( & mut * e) . next } ;
86
74
self . linked_count -= 1 ;
87
75
Ok ( e as usize as * mut T )
88
76
}
@@ -93,23 +81,10 @@ impl<T> PoolAllocator<T> {
93
81
94
82
pub fn free_ptr ( & mut self , target : * mut T ) {
95
83
/*do not use target after free */
96
- use core:: usize;
97
- assert ! ( self . linked_count < usize :: MAX ) ;
84
+ assert ! ( self . linked_count < core:: usize :: MAX ) ;
98
85
let e = target as usize as * mut FreeList ;
99
- if let Some ( mut first_entry) = self . head {
100
- unsafe {
101
- ( * e) . next = Some ( first_entry) ;
102
- ( * first_entry) . prev = Some ( e. clone ( ) ) ;
103
- self . head = Some ( e) ;
104
- }
105
- } else {
106
- assert_eq ! ( self . linked_count, 0 ) ;
107
- unsafe {
108
- ( * e) . prev = None ;
109
- ( * e) . next = None ;
110
- }
111
- self . head = Some ( e) ;
112
- }
86
+ unsafe { ( & mut * e) . next = self . head } ;
87
+ self . head = Some ( e) ;
113
88
self . linked_count += 1 ;
114
89
}
115
90
}
0 commit comments