Skip to content

Commit 84b990b

Browse files
committed
Use FIOFFS instead of fsync (on supported platforms)
1 parent b629563 commit 84b990b

File tree

7 files changed

+298
-116
lines changed

7 files changed

+298
-116
lines changed

Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ itertools = "0.12.1"
6161
libc = "0.2"
6262
mime_guess = "2.0.4"
6363
nbd = "0.2.3"
64-
nix = { version = "0.28", features = [ "feature", "uio" ] }
64+
nix = { version = "0.28", features = [ "feature", "uio", "ioctl" ] }
6565
num_enum = "0.7"
6666
num-derive = "0.4"
6767
num-traits = "0.2"

downstairs/Cargo.toml

+1
Original file line numberDiff line numberDiff line change
@@ -71,3 +71,4 @@ asm = ["usdt/asm"]
7171
default = []
7272
zfs_snapshot = []
7373
integration-tests = [] # Enables creating SQLite volumes
74+
omicron-build = [] # Uses FIOFSS for flushes instead of fsync

downstairs/src/extent.rs

+72-6
Original file line numberDiff line numberDiff line change
@@ -34,13 +34,45 @@ pub(crate) trait ExtentInner: Send + Sync + Debug {
3434
fn flush_number(&self) -> Result<u64, CrucibleError>;
3535
fn dirty(&self) -> Result<bool, CrucibleError>;
3636

37-
fn flush(
37+
/// Performs any metadata updates needed before a flush
38+
fn pre_flush(
39+
&mut self,
40+
new_flush: u64,
41+
new_gen: u64,
42+
job_id: JobOrReconciliationId,
43+
) -> Result<(), CrucibleError>;
44+
45+
/// Syncs all relevant data to persistant storage
46+
fn flush_inner(
47+
&mut self,
48+
job_id: JobOrReconciliationId,
49+
) -> Result<(), CrucibleError>;
50+
51+
/// Performs any metadata updates after syncing data to persistent storage
52+
fn post_flush(
3853
&mut self,
3954
new_flush: u64,
4055
new_gen: u64,
4156
job_id: JobOrReconciliationId,
4257
) -> Result<(), CrucibleError>;
4358

59+
/// Performs a full flush (pre/inner/post)
60+
///
61+
/// This is only exposed for the sake of unit testing; normal code should
62+
/// use the fine-grained functions and be forced to consider performance.
63+
#[cfg(test)]
64+
fn flush(
65+
&mut self,
66+
new_flush: u64,
67+
new_gen: u64,
68+
job_id: JobOrReconciliationId,
69+
) -> Result<(), CrucibleError> {
70+
self.pre_flush(new_flush, new_gen, job_id)?;
71+
self.flush_inner(job_id)?;
72+
self.post_flush(new_flush, new_gen, job_id)?;
73+
Ok(())
74+
}
75+
4476
fn read(
4577
&mut self,
4678
job_id: JobId,
@@ -579,22 +611,25 @@ impl Extent {
579611
Ok(())
580612
}
581613

582-
#[instrument]
583-
pub(crate) async fn flush<I: Into<JobOrReconciliationId> + Debug>(
614+
/// Prepares for a flush
615+
///
616+
/// Returns `false` if we should skip the flush (because this extent is not
617+
/// dirty), or `true` if we should proceed.
618+
pub(crate) fn pre_flush<I: Into<JobOrReconciliationId> + Debug>(
584619
&mut self,
585620
new_flush: u64,
586621
new_gen: u64,
587622
id: I, // only used for logging
588623
log: &Logger,
589-
) -> Result<(), CrucibleError> {
624+
) -> Result<bool, CrucibleError> {
590625
let job_id: JobOrReconciliationId = id.into();
591626

592627
if !self.inner.dirty()? {
593628
/*
594629
* If we have made no writes to this extent since the last flush,
595630
* we do not need to update the extent on disk
596631
*/
597-
return Ok(());
632+
return Ok(false);
598633
}
599634

600635
// Read only extents should never have the dirty bit set. If they do,
@@ -605,7 +640,38 @@ impl Extent {
605640
crucible_bail!(ModifyingReadOnlyRegion);
606641
}
607642

608-
self.inner.flush(new_flush, new_gen, job_id)
643+
self.inner.pre_flush(new_flush, new_gen, job_id)?;
644+
Ok(true)
645+
}
646+
647+
/// Performs post-flush cleanup
648+
pub(crate) fn post_flush<I: Into<JobOrReconciliationId> + Debug>(
649+
&mut self,
650+
new_flush: u64,
651+
new_gen: u64,
652+
id: I, // only used for logging
653+
) -> Result<(), CrucibleError> {
654+
let job_id: JobOrReconciliationId = id.into();
655+
self.inner.post_flush(new_flush, new_gen, job_id)
656+
}
657+
658+
/// Flushes this extent if it is dirty
659+
#[instrument]
660+
pub(crate) async fn flush<
661+
I: Into<JobOrReconciliationId> + Debug + Copy + Clone,
662+
>(
663+
&mut self,
664+
new_flush: u64,
665+
new_gen: u64,
666+
id: I, // only used for logging
667+
log: &Logger,
668+
) -> Result<(), CrucibleError> {
669+
if !self.pre_flush(new_flush, new_gen, id, log)? {
670+
return Ok(());
671+
}
672+
self.inner.flush_inner(id.into())?;
673+
self.post_flush(new_flush, new_gen, id)?;
674+
Ok(())
609675
}
610676

611677
#[allow(clippy::unused_async)] // this will be async again in the future

downstairs/src/extent_inner_raw.rs

+19-14
Original file line numberDiff line numberDiff line change
@@ -381,20 +381,12 @@ impl ExtentInner for RawInner {
381381
Ok(ExtentReadResponse { data: buf, blocks })
382382
}
383383

384-
fn flush(
384+
fn pre_flush(
385385
&mut self,
386386
new_flush: u64,
387387
new_gen: u64,
388388
job_id: JobOrReconciliationId,
389389
) -> Result<(), CrucibleError> {
390-
if !self.dirty()? {
391-
/*
392-
* If we have made no writes to this extent since the last flush,
393-
* we do not need to update the extent on disk
394-
*/
395-
return Ok(());
396-
}
397-
398390
cdt::extent__flush__start!(|| {
399391
(job_id.get(), self.extent_number.0, 0)
400392
});
@@ -403,10 +395,17 @@ impl ExtentInner for RawInner {
403395
// operation atomic.
404396
self.set_flush_number(new_flush, new_gen)?;
405397

398+
Ok(())
399+
}
400+
401+
fn flush_inner(
402+
&mut self,
403+
job_id: JobOrReconciliationId,
404+
) -> Result<(), CrucibleError> {
406405
// Now, we fsync to ensure data is flushed to disk. It's okay to crash
407406
// before this point, because setting the flush number is atomic.
408407
cdt::extent__flush__file__start!(|| {
409-
(job_id.get(), self.extent_number.0, 0)
408+
(job_id.get(), self.extent_number.0)
410409
});
411410
if let Err(e) = self.file.sync_all() {
412411
/*
@@ -419,9 +418,17 @@ impl ExtentInner for RawInner {
419418
}
420419
self.context_slot_dirty.fill(0);
421420
cdt::extent__flush__file__done!(|| {
422-
(job_id.get(), self.extent_number.0, 0)
421+
(job_id.get(), self.extent_number.0)
423422
});
423+
Ok(())
424+
}
424425

426+
fn post_flush(
427+
&mut self,
428+
_new_flush: u64,
429+
_new_gen: u64,
430+
job_id: JobOrReconciliationId,
431+
) -> Result<(), CrucibleError> {
425432
// Check for fragmentation in the context slots leading to worse
426433
// performance, and defragment if that's the case.
427434
let extra_syscalls_per_rw = self
@@ -436,9 +443,7 @@ impl ExtentInner for RawInner {
436443
Ok(())
437444
};
438445

439-
cdt::extent__flush__done!(|| {
440-
(job_id.get(), self.extent_number.0, 0)
441-
});
446+
cdt::extent__flush__done!(|| { (job_id.get(), self.extent_number.0) });
442447

443448
r
444449
}

downstairs/src/extent_inner_sqlite.rs

+46-13
Original file line numberDiff line numberDiff line change
@@ -36,13 +36,32 @@ impl ExtentInner for SqliteInner {
3636
self.0.lock().unwrap().dirty()
3737
}
3838

39-
fn flush(
39+
fn pre_flush(
4040
&mut self,
4141
new_flush: u64,
4242
new_gen: u64,
4343
job_id: JobOrReconciliationId,
4444
) -> Result<(), CrucibleError> {
45-
self.0.lock().unwrap().flush(new_flush, new_gen, job_id)
45+
self.0.lock().unwrap().pre_flush(new_flush, new_gen, job_id)
46+
}
47+
48+
fn flush_inner(
49+
&mut self,
50+
job_id: JobOrReconciliationId,
51+
) -> Result<(), CrucibleError> {
52+
self.0.lock().unwrap().flush_inner(job_id)
53+
}
54+
55+
fn post_flush(
56+
&mut self,
57+
new_flush: u64,
58+
new_gen: u64,
59+
job_id: JobOrReconciliationId,
60+
) -> Result<(), CrucibleError> {
61+
self.0
62+
.lock()
63+
.unwrap()
64+
.post_flush(new_flush, new_gen, job_id)
4665
}
4766

4867
fn read(
@@ -194,10 +213,10 @@ impl SqliteMoreInner {
194213
Ok(self.dirty.get())
195214
}
196215

197-
fn flush(
216+
fn pre_flush(
198217
&mut self,
199-
new_flush: u64,
200-
new_gen: u64,
218+
_new_flush: u64,
219+
_new_gen: u64,
201220
job_id: JobOrReconciliationId,
202221
) -> Result<(), CrucibleError> {
203222
// Used for profiling
@@ -207,12 +226,19 @@ impl SqliteMoreInner {
207226
(job_id.get(), self.extent_number.0, n_dirty_blocks)
208227
});
209228

229+
Ok(())
230+
}
231+
232+
fn flush_inner(
233+
&mut self,
234+
job_id: JobOrReconciliationId,
235+
) -> Result<(), CrucibleError> {
210236
/*
211237
* We must first fsync to get any outstanding data written to disk.
212238
* This must be done before we update the flush number.
213239
*/
214240
cdt::extent__flush__file__start!(|| {
215-
(job_id.get(), self.extent_number.0, n_dirty_blocks)
241+
(job_id.get(), self.extent_number.0)
216242
});
217243
if let Err(e) = self.file.sync_all() {
218244
/*
@@ -225,9 +251,18 @@ impl SqliteMoreInner {
225251
);
226252
}
227253
cdt::extent__flush__file__done!(|| {
228-
(job_id.get(), self.extent_number.0, n_dirty_blocks)
254+
(job_id.get(), self.extent_number.0)
229255
});
230256

257+
Ok(())
258+
}
259+
260+
fn post_flush(
261+
&mut self,
262+
new_flush: u64,
263+
new_gen: u64,
264+
job_id: JobOrReconciliationId,
265+
) -> Result<(), CrucibleError> {
231266
// Clear old block contexts. In order to be crash consistent, only
232267
// perform this after the extent fsync is done. For each block
233268
// written since the last flush, remove all block context rows where
@@ -237,7 +272,7 @@ impl SqliteMoreInner {
237272
// file is rehashed, since in that case we don't have that luxury.
238273

239274
cdt::extent__flush__collect__hashes__start!(|| {
240-
(job_id.get(), self.extent_number.0, n_dirty_blocks)
275+
(job_id.get(), self.extent_number.0)
241276
});
242277

243278
// Rehash any parts of the file that we *may have written* data to since
@@ -250,7 +285,7 @@ impl SqliteMoreInner {
250285
});
251286

252287
cdt::extent__flush__sqlite__insert__start!(|| {
253-
(job_id.get(), self.extent_number.0, n_dirty_blocks)
288+
(job_id.get(), self.extent_number.0)
254289
});
255290

256291
// We put all of our metadb updates into a single transaction to
@@ -265,7 +300,7 @@ impl SqliteMoreInner {
265300
)?;
266301

267302
cdt::extent__flush__sqlite__insert__done!(|| {
268-
(job_id.get(), self.extent_number.0, n_dirty_blocks)
303+
(job_id.get(), self.extent_number.0)
269304
});
270305

271306
self.set_flush_number(new_flush, new_gen)?;
@@ -275,9 +310,7 @@ impl SqliteMoreInner {
275310
// Finally, reset the file's seek offset to 0
276311
self.file.seek(SeekFrom::Start(0))?;
277312

278-
cdt::extent__flush__done!(|| {
279-
(job_id.get(), self.extent_number.0, n_dirty_blocks)
280-
});
313+
cdt::extent__flush__done!(|| { (job_id.get(), self.extent_number.0) });
281314
Ok(())
282315
}
283316

downstairs/src/lib.rs

+8-28
Original file line numberDiff line numberDiff line change
@@ -744,44 +744,24 @@ pub mod cdt {
744744
fn submit__writeunwritten__done(_: u64) {}
745745
fn submit__write__done(_: u64) {}
746746
fn submit__flush__done(_: u64) {}
747-
fn extent__flush__start(job_id: u64, extent_id: u32, extent_size: u64) {}
748-
fn extent__flush__done(job_id: u64, extent_id: u32, extent_size: u64) {}
749-
fn extent__flush__file__start(
747+
fn extent__flush__start(
750748
job_id: u64,
751749
extent_id: u32,
752-
extent_size: u64,
753-
) {
754-
}
755-
fn extent__flush__file__done(
756-
job_id: u64,
757-
extent_id: u32,
758-
extent_size: u64,
759-
) {
760-
}
761-
fn extent__flush__collect__hashes__start(
762-
job_id: u64,
763-
extent_id: u32,
764-
num_dirty: u64,
750+
num_dirty_blocks: u64,
765751
) {
766752
}
753+
fn extent__flush__done(job_id: u64, extent_id: u32) {}
754+
fn extent__flush__file__start(job_id: u64, extent_id: u32) {}
755+
fn extent__flush__file__done(job_id: u64, extent_id: u32) {}
756+
fn extent__flush__collect__hashes__start(job_id: u64, extent_id: u32) {}
767757
fn extent__flush__collect__hashes__done(
768758
job_id: u64,
769759
extent_id: u32,
770760
num_rehashed: u64,
771761
) {
772762
}
773-
fn extent__flush__sqlite__insert__start(
774-
job_id: u64,
775-
extent_id: u32,
776-
extent_size: u64,
777-
) {
778-
}
779-
fn extent__flush__sqlite__insert__done(
780-
_job_id: u64,
781-
_extent_id: u32,
782-
extent_size: u64,
783-
) {
784-
}
763+
fn extent__flush__sqlite__insert__start(job_id: u64, extent_id: u32) {}
764+
fn extent__flush__sqlite__insert__done(job_id: u64, extent_id: u32) {}
785765
fn extent__write__start(job_id: u64, extent_id: u32, n_blocks: u64) {}
786766
fn extent__write__done(job_id: u64, extent_id: u32, n_blocks: u64) {}
787767
fn extent__write__get__hashes__start(

0 commit comments

Comments
 (0)