@@ -10,38 +10,19 @@ use std::{
10
10
ptr:: NonNull ,
11
11
sync:: {
12
12
atomic:: { AtomicUsize , Ordering } ,
13
- Weak ,
13
+ Arc ,
14
14
} ,
15
15
} ;
16
16
use tracing:: trace;
17
17
18
18
/// Callback which releases a descriptor back into the free list
19
19
pub ( super ) trait FreeList : ' static + Send + Sync {
20
- fn free ( & self , descriptor : Descriptor ) ;
21
- }
22
-
23
- /// A handle to various parts for the descriptor group instance
24
- pub ( super ) struct Memory {
25
- capacity : u16 ,
26
- references : AtomicUsize ,
27
- free_list : Weak < dyn FreeList > ,
28
- #[ allow( dead_code) ]
29
- region : Box < dyn ' static + Send + Sync > ,
30
- }
31
-
32
- impl Memory {
33
- pub ( super ) fn new < F : FreeList > (
34
- capacity : u16 ,
35
- free_list : Weak < F > ,
36
- region : Box < dyn ' static + Send + Sync > ,
37
- ) -> Box < Self > {
38
- Box :: new ( Self {
39
- capacity,
40
- references : AtomicUsize :: new ( 0 ) ,
41
- free_list,
42
- region,
43
- } )
44
- }
20
+ /// Frees a descriptor back into the free list
21
+ ///
22
+ /// Once the free list has been closed and all descriptors returned, the `free` function
23
+ /// should return an object that can be dropped to release all of the memory associated
24
+ /// with the descriptor pool.
25
+ fn free ( & self , descriptor : Descriptor ) -> Option < Box < dyn ' static + Send > > ;
45
26
}
46
27
47
28
/// A pointer to a single descriptor in a group
@@ -64,8 +45,18 @@ impl Descriptor {
64
45
}
65
46
}
66
47
48
+ /// # Safety
49
+ ///
50
+ /// This should only be called once the caller can guarantee the descriptor is no longer
51
+ /// used.
52
+ #[ inline]
53
+ pub ( super ) unsafe fn drop_in_place ( & self ) {
54
+ let inner = self . inner ( ) ;
55
+ Arc :: decrement_strong_count ( Arc :: as_ptr ( & inner. free_list ) ) ;
56
+ }
57
+
67
58
#[ inline]
68
- pub ( super ) fn id ( & self ) -> u64 {
59
+ pub ( super ) fn id ( & self ) -> u32 {
69
60
self . inner ( ) . id
70
61
}
71
62
@@ -88,14 +79,8 @@ impl Descriptor {
88
79
fn upgrade ( & self ) {
89
80
let inner = self . inner ( ) ;
90
81
trace ! ( upgrade = inner. id) ;
91
- inner. references . fetch_add ( 1 , Ordering :: Relaxed ) ;
92
- unsafe {
93
- inner
94
- . memory
95
- . as_ref ( )
96
- . references
97
- . fetch_add ( 1 , Ordering :: Relaxed ) ;
98
- }
82
+ // we can use relaxed since this only happens after it is filled, which was done by a single owner
83
+ inner. references . store ( 1 , Ordering :: Relaxed ) ;
99
84
}
100
85
101
86
#[ inline]
@@ -128,22 +113,18 @@ impl Descriptor {
128
113
129
114
core:: sync:: atomic:: fence ( Ordering :: Acquire ) ;
130
115
131
- let mem = inner. free ( self ) ;
132
-
116
+ let storage = inner. free ( self ) ;
133
117
trace ! ( free_desc = inner. id, state = %"filled" ) ;
134
-
135
- drop ( mem) ;
118
+ drop ( storage) ;
136
119
}
137
120
138
121
#[ inline]
139
122
pub ( super ) fn drop_unfilled ( & self ) {
140
123
let inner = self . inner ( ) ;
141
- inner. references . store ( 0 , Ordering :: Release ) ;
142
- let mem = inner. free ( self ) ;
143
-
124
+ let storage = inner. free ( self ) ;
144
125
trace ! ( free_desc = inner. id, state = %"unfilled" ) ;
145
-
146
- drop ( mem ) ;
126
+ let _ = inner ;
127
+ drop ( storage ) ;
147
128
}
148
129
}
149
130
@@ -155,7 +136,9 @@ pub(super) struct DescriptorInner {
155
136
///
156
137
/// This can be used by the pool implementation to map the descriptor to an internal
157
138
/// detail, e.g. in AF_XDP it passes around UMEM offsets.
158
- id : u64 ,
139
+ id : u32 ,
140
+ /// The maximum capacity for this descriptor
141
+ capacity : u16 ,
159
142
/// The pointer to the descriptor address
160
143
address : NonNull < Addr > ,
161
144
/// The pointer to the descriptor payload
@@ -165,8 +148,8 @@ pub(super) struct DescriptorInner {
165
148
/// This refcount allows for splitting descriptors into multiple segments
166
149
/// and then correctly freeing the descriptor once the last segment is dropped.
167
150
references : AtomicUsize ,
168
- /// A reference back to the memory region that owns the descriptor
169
- memory : NonNull < Memory > ,
151
+ /// A reference back to the free list
152
+ free_list : Arc < dyn FreeList > ,
170
153
}
171
154
172
155
impl DescriptorInner {
@@ -179,59 +162,29 @@ impl DescriptorInner {
179
162
///
180
163
/// `memory` must be initialized.
181
164
pub ( super ) unsafe fn new (
182
- id : u64 ,
165
+ id : u32 ,
166
+ capacity : u16 ,
183
167
address : NonNull < Addr > ,
184
168
payload : NonNull < u8 > ,
185
- memory : NonNull < Memory > ,
169
+ free_list : Arc < dyn FreeList > ,
186
170
) -> Self {
187
171
Self {
188
172
id,
173
+ capacity,
189
174
address,
190
175
payload,
191
176
references : AtomicUsize :: new ( 0 ) ,
192
- memory ,
177
+ free_list ,
193
178
}
194
179
}
195
180
196
- #[ inline]
197
- fn capacity ( & self ) -> u16 {
198
- unsafe { self . memory . as_ref ( ) . capacity }
199
- }
200
-
201
181
/// Frees the descriptor back into the pool
202
182
#[ inline]
203
- fn free ( & self , desc : & Descriptor ) -> Option < Box < Memory > > {
204
- let memory = unsafe { self . memory . as_ref ( ) } ;
205
- let mem_refs = memory. references . fetch_sub ( 1 , Ordering :: Release ) ;
206
- debug_assert_ne ! ( mem_refs, 0 , "reference count underflow" ) ;
207
-
208
- // if the free_list is still active (the allocator hasn't dropped) then just push the id
209
- // The `upgrade` acts as a lock for freeing the `Memory` instance, in the case that the
210
- // free list has been dropped by the allocator.
211
- if let Some ( free_list) = memory. free_list . upgrade ( ) {
212
- free_list. free ( Descriptor {
213
- ptr : desc. ptr ,
214
- phantom : PhantomData ,
215
- } ) ;
216
- return None ;
217
- }
218
-
219
- // the free_list no longer active and we need to clean up the memory
220
-
221
- // based on the implementation in:
222
- // https://github.com/rust-lang/rust/blob/28b83ee59698ae069f5355b8e03f976406f410f5/library/alloc/src/sync.rs#L2551
223
- if mem_refs != 1 {
224
- trace ! ( memory_draining = mem_refs - 1 , desc = self . id) ;
225
- return None ;
226
- }
227
-
228
- core:: sync:: atomic:: fence ( Ordering :: Acquire ) ;
229
-
230
- trace ! ( memory_free = ?self . memory. as_ptr( ) , desc = self . id) ;
231
-
232
- // return the boxed memory rather than free it here - this works around
233
- // any stacked borrowing issues found by Miri
234
- Some ( unsafe { Box :: from_raw ( self . memory . as_ptr ( ) ) } )
183
+ fn free ( & self , desc : & Descriptor ) -> Option < Box < dyn ' static + Send > > {
184
+ self . free_list . free ( Descriptor {
185
+ ptr : desc. ptr ,
186
+ phantom : PhantomData ,
187
+ } )
235
188
}
236
189
}
237
190
@@ -256,7 +209,6 @@ impl Unfilled {
256
209
/// Creates an [`Unfilled`] descriptor from a raw [`Descriptor`].
257
210
#[ inline]
258
211
pub ( super ) fn from_descriptor ( desc : Descriptor ) -> Self {
259
- desc. upgrade ( ) ;
260
212
Self { desc : Some ( desc) }
261
213
}
262
214
@@ -269,7 +221,7 @@ impl Unfilled {
269
221
let desc = self . desc . take ( ) . expect ( "invalid state" ) ;
270
222
let inner = desc. inner ( ) ;
271
223
let addr = unsafe { & mut * inner. address . as_ptr ( ) } ;
272
- let capacity = inner. capacity ( ) as usize ;
224
+ let capacity = inner. capacity as usize ;
273
225
let data = unsafe {
274
226
// SAFETY: a pool implementation is required to initialize all payload bytes
275
227
core:: slice:: from_raw_parts_mut ( inner. payload . as_ptr ( ) , capacity)
@@ -290,6 +242,8 @@ impl Unfilled {
290
242
291
243
let segment_len = cmsg. segment_len ( ) ;
292
244
let ecn = cmsg. ecn ( ) ;
245
+ // increment the reference count since it is now filled and has a reference
246
+ desc. upgrade ( ) ;
293
247
let desc = Filled {
294
248
desc,
295
249
offset : 0 ,
0 commit comments