Skip to content

Commit

Permalink
rt: todo tag for spelling check
Browse files Browse the repository at this point in the history
  • Loading branch information
IgorErin committed Feb 28, 2025
1 parent 1d24443 commit b5b17e7
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 18 deletions.
4 changes: 2 additions & 2 deletions tokio/src/runtime/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ pub struct Builder {
/// Only used when not using the current-thread executor.
worker_threads: Option<usize>,

/// TODO(i.erin)
/// TODO(i.Erin)
worker_group: usize,

/// Cap on thread usage.
Expand Down Expand Up @@ -433,7 +433,7 @@ impl Builder {
self
}

/// TODO(i.erin)
/// TODO(i.Erin)
pub fn worker_group(&mut self, val: usize) -> &mut Self {
assert!(val > 0, "Worker groups cannot be set to 0");
self.worker_group = val;
Expand Down
2 changes: 1 addition & 1 deletion tokio/src/runtime/metrics/runtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ impl RuntimeMetrics {
/// #[tokio::main]
/// async fn main() {
/// let metrics = Handle::current().metrics();
/// // TODO(i.erin)
/// // TODO(i.Erin)
/// let n = metrics.global_queue_depth(0);
/// println!("{} tasks currently pending in the runtime's global queue", n);
/// }
Expand Down
24 changes: 12 additions & 12 deletions tokio/src/runtime/scheduler/multi_thread/worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,10 +95,10 @@ pub(super) struct Worker {
/// Index holding this worker's remote state
index: usize,

/// TODO(i.erin) ask about locallity
/// TODO(i.Erin) ask about locallity
group: usize,

// TODO(i.erin) index within group (field or compute this?)
// TODO(i.Erin) index within group (field or compute this?)
local_index: usize,

/// Used to hand-off a worker's core to another thread.
Expand Down Expand Up @@ -164,10 +164,10 @@ pub(crate) struct Shared {
/// 2. Submit work to the scheduler when a worker run queue is saturated
pub(super) injects: Box<[inject::Inject<Arc<Handle>>]>,

/// TODO(i.erin) only for reading
/// TODO(i.Erin) only for reading
group_size: usize,

//TODO(i.erin) only for reading
//TODO(i.Erin) only for reading
ngroup: usize,

/// Coordinates idle workers groups
Expand Down Expand Up @@ -564,7 +564,7 @@ impl Context {
// We consumed all work in the group queues and will start searching for work.
core.stats.end_processing_scheduled_tasks();
// There is no more **local** work to process, try to steal work
// from other workers within group TODO(i.erin).
// from other workers within group TODO(i.Erin).
if let Some(task) = core.steal_work(&self.worker) {
// Found work, switch back to processing
core.stats.start_processing_scheduled_tasks();
Expand All @@ -573,7 +573,7 @@ impl Context {
}

// Fallback for search over group bounderies.
// Process like local stealing --- we should use local stealing practices TODO(i.erin)
// Process like local stealing --- we should use local stealing practices TODO(i.Erin)
if let Some(task) = core.steal_remote_work(&self.worker) {
core.stats.start_processing_scheduled_tasks();
core = self.run_task(task, core)?;
Expand Down Expand Up @@ -856,10 +856,10 @@ impl Core {
fn steal_remote_work(&mut self, worker: &Worker) -> Option<Notified> {
for group in worker.handle.random_groups() {
let inject = &worker.handle.shared.injects[group];
// TODO(i.erin) we may want to find bigger queue
// TODO(i.Erin) we may want to find bigger queue
// or ask user for hot queue
if !inject.is_empty() {
// TODO(i.erin) split somehow better
// TODO(i.Erin) split somehow better
return self.take_n_from_inject(worker.handle.shared.ngroup, inject);
}
}
Expand Down Expand Up @@ -935,7 +935,7 @@ impl Core {
}
}

// TODO(i.erin) which queu to check? why here?
// TODO(i.Erin) which queu to check? why here?
// Fallback on checking the global queue
worker.handle.next_remote_task(Some(worker.group))
}
Expand Down Expand Up @@ -1158,7 +1158,7 @@ impl Handle {

if let Some(prev) = prev {
core.run_queue
// TODO(i.erin) explicit group structure to specify overflow
// TODO(i.Erin) explicit group structure to specify overflow
.push_back_or_overflow(prev, group, self, &mut core.stats);
}

Expand Down Expand Up @@ -1205,7 +1205,7 @@ impl Handle {
}

fn push_remote_task(&self, group: Option<usize>, task: Notified) {
// TODO(i.erin) inc certain remote schedule
// TODO(i.Erin) inc certain remote schedule
self.shared.scheduler_metrics.inc_remote_schedule_count();

let group = group.unwrap_or_else(|| self.random_groups().next().unwrap());
Expand Down Expand Up @@ -1290,7 +1290,7 @@ impl Handle {
// Drain the injection queue
//
// We already shut down every task, so we can simply drop the tasks.
// TODO(i.erin) smarter choose tasks
// TODO(i.Erin) smarter choose tasks
while let Some(task) = self.next_remote_task(None) {
drop(task);
}
Expand Down
6 changes: 3 additions & 3 deletions tokio/src/runtime/tests/queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ fn fits_256_one_at_a_time() {
let mut stats = new_stats();

for _ in 0..256 {
// TODO(i.erin)
// TODO(i.Erin)
let (task, _) = super::unowned(async {});
local.push_back_or_overflow(task, 0, &inject, &mut stats);
}
Expand Down Expand Up @@ -125,7 +125,7 @@ fn steal_batch() {

for _ in 0..4 {
let (task, _) = super::unowned(async {});
// TODO(i.erin)
// TODO(i.Erin)
local1.push_back_or_overflow(task, 0, &inject, &mut stats);
}

Expand Down Expand Up @@ -200,7 +200,7 @@ fn stress1() {
for nlocal in 0..NUM_LOCAL {
for npush in 0..NUM_PUSH {
let (task, _) = super::unowned(async {});
// TODO(i.erin)
// TODO(i.Erin)
let group = (nlocal * npush) % n_shards;
local.push_back_or_overflow(task, group, &inject, &mut stats);
}
Expand Down

0 comments on commit b5b17e7

Please sign in to comment.