Skip to content

Commit 27bed3f

Browse files
committed
simplify memory ownership
1 parent 6a08a6c commit 27bed3f

File tree

3 files changed

+178
-158
lines changed

3 files changed

+178
-158
lines changed

dc/s2n-quic-dc/src/socket/recv/descriptor.rs

+43-89
Original file line numberDiff line numberDiff line change
@@ -10,38 +10,19 @@ use std::{
1010
ptr::NonNull,
1111
sync::{
1212
atomic::{AtomicUsize, Ordering},
13-
Weak,
13+
Arc,
1414
},
1515
};
1616
use tracing::trace;
1717

1818
/// Callback which releases a descriptor back into the free list
1919
pub(super) trait FreeList: 'static + Send + Sync {
20-
fn free(&self, descriptor: Descriptor);
21-
}
22-
23-
/// A handle to various parts for the descriptor group instance
24-
pub(super) struct Memory {
25-
capacity: u16,
26-
references: AtomicUsize,
27-
free_list: Weak<dyn FreeList>,
28-
#[allow(dead_code)]
29-
region: Box<dyn 'static + Send + Sync>,
30-
}
31-
32-
impl Memory {
33-
pub(super) fn new<F: FreeList>(
34-
capacity: u16,
35-
free_list: Weak<F>,
36-
region: Box<dyn 'static + Send + Sync>,
37-
) -> Box<Self> {
38-
Box::new(Self {
39-
capacity,
40-
references: AtomicUsize::new(0),
41-
free_list,
42-
region,
43-
})
44-
}
20+
/// Frees a descriptor back into the free list
21+
///
22+
/// Once the free list has been closed and all descriptors returned, the `free` function
23+
/// should return an object that can be dropped to release all of the memory associated
24+
/// with the descriptor pool.
25+
fn free(&self, descriptor: Descriptor) -> Option<Box<dyn 'static + Send>>;
4526
}
4627

4728
/// A pointer to a single descriptor in a group
@@ -64,8 +45,18 @@ impl Descriptor {
6445
}
6546
}
6647

48+
/// # Safety
49+
///
50+
/// This should only be called once the caller can guarantee the descriptor is no longer
51+
/// used.
52+
#[inline]
53+
pub(super) unsafe fn drop_in_place(&self) {
54+
let inner = self.inner();
55+
Arc::decrement_strong_count(Arc::as_ptr(&inner.free_list));
56+
}
57+
6758
#[inline]
68-
pub(super) fn id(&self) -> u64 {
59+
pub(super) fn id(&self) -> u32 {
6960
self.inner().id
7061
}
7162

@@ -88,14 +79,8 @@ impl Descriptor {
8879
fn upgrade(&self) {
8980
let inner = self.inner();
9081
trace!(upgrade = inner.id);
91-
inner.references.fetch_add(1, Ordering::Relaxed);
92-
unsafe {
93-
inner
94-
.memory
95-
.as_ref()
96-
.references
97-
.fetch_add(1, Ordering::Relaxed);
98-
}
82+
// we can use relaxed since this only happens after it is filled, which was done by a single owner
83+
inner.references.store(1, Ordering::Relaxed);
9984
}
10085

10186
#[inline]
@@ -128,22 +113,18 @@ impl Descriptor {
128113

129114
core::sync::atomic::fence(Ordering::Acquire);
130115

131-
let mem = inner.free(self);
132-
116+
let storage = inner.free(self);
133117
trace!(free_desc = inner.id, state = %"filled");
134-
135-
drop(mem);
118+
drop(storage);
136119
}
137120

138121
#[inline]
139122
pub(super) fn drop_unfilled(&self) {
140123
let inner = self.inner();
141-
inner.references.store(0, Ordering::Release);
142-
let mem = inner.free(self);
143-
124+
let storage = inner.free(self);
144125
trace!(free_desc = inner.id, state = %"unfilled");
145-
146-
drop(mem);
126+
let _ = inner;
127+
drop(storage);
147128
}
148129
}
149130

@@ -155,7 +136,9 @@ pub(super) struct DescriptorInner {
155136
///
156137
/// This can be used by the pool implementation to map the descriptor to an internal
157138
/// detail, e.g. in AF_XDP it passes around UMEM offsets.
158-
id: u64,
139+
id: u32,
140+
/// The maximum capacity for this descriptor
141+
capacity: u16,
159142
/// The pointer to the descriptor address
160143
address: NonNull<Addr>,
161144
/// The pointer to the descriptor payload
@@ -165,8 +148,8 @@ pub(super) struct DescriptorInner {
165148
/// This refcount allows for splitting descriptors into multiple segments
166149
/// and then correctly freeing the descriptor once the last segment is dropped.
167150
references: AtomicUsize,
168-
/// A reference back to the memory region that owns the descriptor
169-
memory: NonNull<Memory>,
151+
/// A reference back to the free list
152+
free_list: Arc<dyn FreeList>,
170153
}
171154

172155
impl DescriptorInner {
@@ -179,59 +162,29 @@ impl DescriptorInner {
179162
///
180163
/// `memory` must be initialized.
181164
pub(super) unsafe fn new(
182-
id: u64,
165+
id: u32,
166+
capacity: u16,
183167
address: NonNull<Addr>,
184168
payload: NonNull<u8>,
185-
memory: NonNull<Memory>,
169+
free_list: Arc<dyn FreeList>,
186170
) -> Self {
187171
Self {
188172
id,
173+
capacity,
189174
address,
190175
payload,
191176
references: AtomicUsize::new(0),
192-
memory,
177+
free_list,
193178
}
194179
}
195180

196-
#[inline]
197-
fn capacity(&self) -> u16 {
198-
unsafe { self.memory.as_ref().capacity }
199-
}
200-
201181
/// Frees the descriptor back into the pool
202182
#[inline]
203-
fn free(&self, desc: &Descriptor) -> Option<Box<Memory>> {
204-
let memory = unsafe { self.memory.as_ref() };
205-
let mem_refs = memory.references.fetch_sub(1, Ordering::Release);
206-
debug_assert_ne!(mem_refs, 0, "reference count underflow");
207-
208-
// if the free_list is still active (the allocator hasn't dropped) then just push the id
209-
// The `upgrade` acts as a lock for freeing the `Memory` instance, in the case that the
210-
// free list has been dropped by the allocator.
211-
if let Some(free_list) = memory.free_list.upgrade() {
212-
free_list.free(Descriptor {
213-
ptr: desc.ptr,
214-
phantom: PhantomData,
215-
});
216-
return None;
217-
}
218-
219-
// the free_list no longer active and we need to clean up the memory
220-
221-
// based on the implementation in:
222-
// https://github.com/rust-lang/rust/blob/28b83ee59698ae069f5355b8e03f976406f410f5/library/alloc/src/sync.rs#L2551
223-
if mem_refs != 1 {
224-
trace!(memory_draining = mem_refs - 1, desc = self.id);
225-
return None;
226-
}
227-
228-
core::sync::atomic::fence(Ordering::Acquire);
229-
230-
trace!(memory_free = ?self.memory.as_ptr(), desc = self.id);
231-
232-
// return the boxed memory rather than free it here - this works around
233-
// any stacked borrowing issues found by Miri
234-
Some(unsafe { Box::from_raw(self.memory.as_ptr()) })
183+
fn free(&self, desc: &Descriptor) -> Option<Box<dyn 'static + Send>> {
184+
self.free_list.free(Descriptor {
185+
ptr: desc.ptr,
186+
phantom: PhantomData,
187+
})
235188
}
236189
}
237190

@@ -256,7 +209,6 @@ impl Unfilled {
256209
/// Creates an [`Unfilled`] descriptor from a raw [`Descriptor`].
257210
#[inline]
258211
pub(super) fn from_descriptor(desc: Descriptor) -> Self {
259-
desc.upgrade();
260212
Self { desc: Some(desc) }
261213
}
262214

@@ -269,7 +221,7 @@ impl Unfilled {
269221
let desc = self.desc.take().expect("invalid state");
270222
let inner = desc.inner();
271223
let addr = unsafe { &mut *inner.address.as_ptr() };
272-
let capacity = inner.capacity() as usize;
224+
let capacity = inner.capacity as usize;
273225
let data = unsafe {
274226
// SAFETY: a pool implementation is required to initialize all payload bytes
275227
core::slice::from_raw_parts_mut(inner.payload.as_ptr(), capacity)
@@ -290,6 +242,8 @@ impl Unfilled {
290242

291243
let segment_len = cmsg.segment_len();
292244
let ecn = cmsg.ecn();
245+
// increment the reference count since it is now filled and has a reference
246+
desc.upgrade();
293247
let desc = Filled {
294248
desc,
295249
offset: 0,

0 commit comments

Comments
 (0)