Skip to content

Commit 7e6b694

Browse files
committed
add more comments/docs
1 parent 8835f58 commit 7e6b694

File tree

4 files changed

+143
-50
lines changed

4 files changed

+143
-50
lines changed

dc/s2n-quic-dc/src/socket/recv/descriptor.rs

+59-10
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,11 @@ impl Memory {
4545
}
4646

4747
/// A pointer to a single descriptor in a group
48+
///
49+
/// Fundamentally, this is similar to something like `Arc<DescriptorInner>`. However,
50+
/// it doesn't use its own allocation for the Arc layout, and instead embeds the reference
51+
/// counts in the descriptor data. This avoids allocating a new `Arc` every time a packet
52+
/// is received and instead allows the descriptor to be reused.
4853
pub(super) struct Descriptor {
4954
ptr: NonNull<DescriptorInner>,
5055
phantom: PhantomData<DescriptorInner>,
@@ -76,7 +81,7 @@ impl Descriptor {
7681

7782
#[inline]
7883
fn data(&self) -> NonNull<u8> {
79-
self.inner().data
84+
self.inner().payload
8085
}
8186

8287
#[inline]
@@ -146,26 +151,43 @@ unsafe impl Send for Descriptor {}
146151
unsafe impl Sync for Descriptor {}
147152

148153
pub(super) struct DescriptorInner {
154+
/// An identifier for the descriptor
155+
///
156+
/// This can be used by the pool implementation to map the descriptor to an internal
157+
/// detail, e.g. in AF_XDP it passes around UMEM offsets.
149158
id: u64,
159+
/// The pointer to the descriptor address
150160
address: NonNull<Addr>,
151-
data: NonNull<u8>,
152-
161+
/// The pointer to the descriptor payload
162+
payload: NonNull<u8>,
163+
/// The number of active references for this descriptor.
164+
///
165+
/// This refcount allows for splitting descriptors into multiple segments
166+
/// and then correctly freeing the descriptor once the last segment is dropped.
153167
references: AtomicUsize,
154-
168+
/// A reference back to the memory region that owns the descriptor
155169
memory: NonNull<Memory>,
156170
}
157171

158172
impl DescriptorInner {
159-
pub(super) fn new(
173+
/// # Safety
174+
///
175+
/// `address` must be initialized.
176+
///
177+
/// `payload` must point to a valid region of memory that is at least `capacity` bytes
178+
/// long. Additionally it must be initialized to valid memory.
179+
///
180+
/// `memory` must be initialized.
181+
pub(super) unsafe fn new(
160182
id: u64,
161183
address: NonNull<Addr>,
162-
data: NonNull<u8>,
184+
payload: NonNull<u8>,
163185
memory: NonNull<Memory>,
164186
) -> Self {
165187
Self {
166188
id,
167189
address,
168-
data,
190+
payload,
169191
references: AtomicUsize::new(0),
170192
memory,
171193
}
@@ -184,7 +206,8 @@ impl DescriptorInner {
184206
debug_assert_ne!(mem_refs, 0, "reference count underflow");
185207

186208
// if the free_list is still active (the allocator hasn't dropped) then just push the id
187-
// TODO Weak::upgrade is a bit expensive since it clones the `Arc`, only to drop it again
209+
// The `upgrade` acts as a lock for freeing the `Memory` instance, in the case that the
210+
// free list has been dropped by the allocator.
188211
if let Some(free_list) = memory.free_list.upgrade() {
189212
free_list.free(Descriptor {
190213
ptr: desc.ptr,
@@ -214,6 +237,11 @@ impl DescriptorInner {
214237

215238
/// An unfilled packet
216239
pub struct Unfilled {
240+
/// The inner raw descriptor.
241+
///
242+
/// This needs to be an [`Option`] to allow for both consuming the descriptor
243+
/// into a [`Filled`] after receiving a packet or dropping the [`Unfilled`] and
244+
/// releasing it back into the packet pool.
217245
desc: Option<Descriptor>,
218246
}
219247

@@ -225,6 +253,7 @@ impl fmt::Debug for Unfilled {
225253
}
226254

227255
impl Unfilled {
256+
/// Creates an [`Unfilled`] descriptor from a raw [`Descriptor`].
228257
#[inline]
229258
pub(super) fn from_descriptor(desc: Descriptor) -> Self {
230259
desc.upgrade();
@@ -241,7 +270,10 @@ impl Unfilled {
241270
let inner = desc.inner();
242271
let addr = unsafe { &mut *inner.address.as_ptr() };
243272
let capacity = inner.capacity() as usize;
244-
let data = unsafe { core::slice::from_raw_parts_mut(inner.data.as_ptr(), capacity) };
273+
let data = unsafe {
274+
// SAFETY: a pool implementation is required to initialize all payload bytes
275+
core::slice::from_raw_parts_mut(inner.payload.as_ptr(), capacity)
276+
};
245277
let iov = IoSliceMut::new(data);
246278
let mut cmsg = cmsg::Receiver::default();
247279

@@ -282,10 +314,18 @@ impl Drop for Unfilled {
282314
}
283315
}
284316

317+
/// A filled packet
285318
pub struct Filled {
319+
/// The raw descriptor
286320
desc: Descriptor,
321+
/// The offset into the payload
322+
///
323+
/// This allows for splitting up a filled packet into multiple segments, while still ensuring
324+
/// exclusive access to a region.
287325
offset: u16,
326+
/// The filled length of the payload
288327
len: u16,
328+
/// The ECN marking of the packet
289329
ecn: ExplicitCongestionNotification,
290330
}
291331

@@ -338,6 +378,7 @@ impl Filled {
338378
#[inline]
339379
pub fn payload(&self) -> &[u8] {
340380
unsafe {
381+
// SAFETY: the descriptor has been filled through the [`Unfilled`] API
341382
let ptr = self.desc.data().as_ptr().add(self.offset as _);
342383
let len = self.len as usize;
343384
core::slice::from_raw_parts(ptr, len)
@@ -349,6 +390,8 @@ impl Filled {
349390
#[inline]
350391
pub fn payload_mut(&mut self) -> &mut [u8] {
351392
unsafe {
393+
// SAFETY: the descriptor has been filled through the [`Unfilled`] API
394+
// SAFETY: the `offset` + `len` are exclusively owned by this reference
352395
let ptr = self.desc.data().as_ptr().add(self.offset as _);
353396
let len = self.len as usize;
354397
core::slice::from_raw_parts_mut(ptr, len)
@@ -374,8 +417,12 @@ impl Filled {
374417
let ecn = self.ecn;
375418
self.offset += at;
376419
self.len -= at;
420+
421+
// update the reference counts for the descriptor
422+
let desc = self.desc.clone_filled();
423+
377424
Self {
378-
desc: self.desc.clone_filled(),
425+
desc,
379426
offset,
380427
len: at,
381428
ecn,
@@ -408,6 +455,8 @@ impl Filled {
408455
impl Drop for Filled {
409456
#[inline]
410457
fn drop(&mut self) {
458+
// decrement the reference count, which may put the descriptor back into the pool once
459+
// it reaches 0
411460
self.desc.drop_filled()
412461
}
413462
}

dc/s2n-quic-dc/src/socket/recv/pool.rs

+8-1
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ impl Pool {
5252
// initialize the address
5353
addr.write(Addr::default());
5454
// initialize the descriptor - note that it is self-referential to `addr`, `data`, and `memory`
55+
// SAFETY: address, payload, and memory are all initialized
5556
descriptor.write(DescriptorInner::new(
5657
idx as _,
5758
NonNull::new_unchecked(addr),
@@ -123,6 +124,7 @@ impl Region {
123124
let ptr = unsafe {
124125
// SAFETY: the layout is non-zero size
125126
debug_assert_ne!(packets.size(), 0);
127+
// ensure that the allocation is zeroed out so we don't have to worry about MaybeUninit
126128
std::alloc::alloc_zeroed(packets)
127129
};
128130
let ptr = NonNull::new(ptr).expect("failed to allocate memory");
@@ -152,6 +154,11 @@ impl Drop for Region {
152154
}
153155
}
154156

157+
/// A free list of unfilled descriptors
158+
///
159+
/// Note that this uses a [`Vec`] instead of [`std::collections::VecDeque`], which acts more
160+
/// like a stack than a queue. This is to prefer more-recently used descriptors which should
161+
/// hopefully reduce the number of cache misses.
155162
struct Free(Mutex<Vec<Unfilled>>);
156163

157164
impl Free {
@@ -371,7 +378,7 @@ mod tests {
371378
let expected_free_packets = 16;
372379
let mut model = Model::new(max_packet_size, expected_free_packets);
373380
for op in ops {
374-
model.apply(&op);
381+
model.apply(op);
375382
}
376383
});
377384
}

0 commit comments

Comments
 (0)