Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 44efee7

Browse files
committedMar 1, 2024·
graceful unwrap
1 parent 2b8549f commit 44efee7

File tree

1 file changed

+108
-45
lines changed
  • opentelemetry-sdk/src/metrics/internal

1 file changed

+108
-45
lines changed
 

‎opentelemetry-sdk/src/metrics/internal/sum.rs

+108-45
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ impl<T: Number<T>> ValueMap<T> {
4747
.take(BUCKET_COUNT)
4848
.collect::<Vec<_>>()
4949
.try_into()
50-
.unwrap();
50+
.unwrap(); // this will never fail as Vec length is fixed
5151

5252
ValueMap {
5353
buckets: Arc::new(buckets),
@@ -71,12 +71,16 @@ impl<T: Number<T>> ValueMap<T> {
7171
// Calculate the total length of data points across all buckets.
7272
fn total_data_points_count(&self) -> usize {
7373
self.buckets
74-
.iter()
75-
.map(|bucket_mutex| {
76-
let locked_bucket = bucket_mutex.lock().unwrap();
77-
locked_bucket.as_ref().map_or(0, |bucket| bucket.len())
78-
})
79-
.sum::<usize>()
74+
.iter()
75+
.map(|bucket_mutex| {
76+
bucket_mutex.lock()
77+
.map(|locked_bucket| locked_bucket.as_ref().map_or(0, |bucket| bucket.len()))
78+
.unwrap_or_else(|_| {
79+
global::handle_error(MetricsError::Other("Failed to acquire lock on a bucket. Using default `0` as total data points.".into()));
80+
0
81+
})
82+
})
83+
.sum::<usize>()
8084
}
8185
}
8286

@@ -87,12 +91,19 @@ impl<T: Number<T>> ValueMap<T> {
8791
self.has_no_value_attribute_value
8892
.store(true, Ordering::Release);
8993
} else {
90-
let bucket_index = Self::hash_to_bucket(&attrs) as usize; // Ensure index is usize for array indexing
94+
let bucket_index = Self::hash_to_bucket(&attrs) as usize;
9195
let bucket_mutex = &self.buckets[bucket_index];
92-
let mut bucket_guard = bucket_mutex.lock().unwrap();
96+
97+
let mut bucket_guard = match bucket_mutex.lock() {
98+
Ok(guard) => guard,
99+
Err(e) => {
100+
eprintln!("Failed to acquire lock due to: {:?}", e);
101+
return; // TBD - retry ?
102+
}
103+
};
93104

94105
if bucket_guard.is_none() {
95-
*bucket_guard = Some(HashMap::new()); // Initialize the bucket if it's None
106+
*bucket_guard = Some(HashMap::new());
96107
}
97108

98109
if let Some(ref mut values) = *bucket_guard {
@@ -106,7 +117,6 @@ impl<T: Number<T>> ValueMap<T> {
106117
if is_under_cardinality_limit(size) {
107118
vacant_entry.insert(measurement);
108119
} else {
109-
// TBD - Update total_count ??
110120
values
111121
.entry(STREAM_OVERFLOW_ATTRIBUTE_SET.clone())
112122
.and_modify(|val| *val += measurement)
@@ -188,17 +198,31 @@ impl<T: Number<T>> Sum<T> {
188198
}
189199

190200
for bucket_mutex in self.value_map.buckets.iter() {
191-
if let Some(ref mut locked_bucket) = *bucket_mutex.lock().unwrap() {
192-
for (attrs, value) in locked_bucket.drain() {
193-
s_data.data_points.push(DataPoint {
194-
attributes: attrs,
195-
start_time: Some(*self.start.lock().unwrap()),
196-
time: Some(t),
197-
value,
198-
exemplars: vec![],
199-
});
201+
match bucket_mutex.lock() {
202+
Ok(mut locked_bucket) => {
203+
if let Some(ref mut bucket) = *locked_bucket {
204+
for (attrs, value) in bucket.drain() {
205+
// Correctly handle lock acquisition on self.start
206+
let start_time = self.start.lock().map_or_else(
207+
|_| SystemTime::now(), // In case of an error, use SystemTime::now()
208+
|guard| *guard, // In case of success, dereference the guard to get the SystemTime
209+
);
210+
211+
s_data.data_points.push(DataPoint {
212+
attributes: attrs,
213+
start_time: Some(start_time),
214+
time: Some(t),
215+
value,
216+
exemplars: vec![],
217+
});
218+
}
219+
}
220+
}
221+
Err(e) => {
222+
global::handle_error(MetricsError::Other(
223+
format!("Failed to acquire lock on bucket due to: {:?}", e).into(),
224+
));
200225
}
201-
// The bucket is automatically cleared by the .drain() method
202226
}
203227
}
204228

@@ -261,16 +285,29 @@ impl<T: Number<T>> Sum<T> {
261285
// sets that become "stale" need to be forgotten so this will not
262286
// overload the system.
263287
for bucket_mutex in self.value_map.buckets.iter() {
264-
if let Some(ref locked_bucket) = *bucket_mutex.lock().unwrap() {
265-
for (attrs, value) in locked_bucket.iter() {
266-
s_data.data_points.push(DataPoint {
267-
attributes: attrs.clone(),
268-
start_time: Some(*self.start.lock().unwrap()), // Consider last reset time
269-
time: Some(t),
270-
value: *value,
271-
exemplars: vec![],
272-
});
288+
// Handle potential lock failure gracefully
289+
if let Ok(locked_bucket) = bucket_mutex.lock() {
290+
if let Some(locked_bucket) = &*locked_bucket {
291+
for (attrs, value) in locked_bucket.iter() {
292+
// Handle potential lock failure on self.start and use current time as fallback
293+
let start_time = self.start.lock().map_or_else(
294+
|_| SystemTime::now(), // Use SystemTime::now() as fallback on error
295+
|guard| *guard, // Dereference the guard to get the SystemTime on success
296+
);
297+
298+
s_data.data_points.push(DataPoint {
299+
attributes: attrs.clone(),
300+
start_time: Some(start_time),
301+
time: Some(t),
302+
value: *value,
303+
exemplars: vec![],
304+
});
305+
}
273306
}
307+
} else {
308+
global::handle_error(MetricsError::Other(
309+
"Failed to acquire lock on a bucket".into(),
310+
));
274311
}
275312
}
276313

@@ -352,20 +389,32 @@ impl<T: Number<T>> PrecomputedSum<T> {
352389
}
353390

354391
for bucket_mutex in self.value_map.buckets.iter() {
355-
if let Some(ref mut locked_bucket) = *bucket_mutex.lock().unwrap() {
356-
let default = T::default();
357-
for (attrs, value) in locked_bucket.drain() {
358-
let delta = value - *reported.get(&attrs).unwrap_or(&default);
359-
if delta != default {
360-
new_reported.insert(attrs.clone(), value);
392+
match bucket_mutex.lock() {
393+
Ok(mut locked_bucket) => {
394+
if let Some(locked_bucket) = &mut *locked_bucket {
395+
let default = T::default();
396+
for (attrs, value) in locked_bucket.drain() {
397+
let delta = value - *reported.get(&attrs).unwrap_or(&default);
398+
if delta != default {
399+
new_reported.insert(attrs.clone(), value);
400+
}
401+
s_data.data_points.push(DataPoint {
402+
attributes: attrs.clone(),
403+
start_time: Some(prev_start),
404+
time: Some(t),
405+
value: delta,
406+
exemplars: vec![],
407+
});
408+
}
361409
}
362-
s_data.data_points.push(DataPoint {
363-
attributes: attrs.clone(),
364-
start_time: Some(prev_start),
365-
time: Some(t),
366-
value: delta,
367-
exemplars: vec![],
368-
});
410+
}
411+
Err(e) => {
412+
// Log or handle the lock acquisition error if necessary
413+
global::handle_error(MetricsError::Other(
414+
format!("Failed to acquire lock on bucket due to: {:?}", e).into(),
415+
));
416+
// Continue to the next bucket if the lock cannot be acquired
417+
continue;
369418
}
370419
}
371420
}
@@ -434,17 +483,31 @@ impl<T: Number<T>> PrecomputedSum<T> {
434483

435484
let default = T::default();
436485
for bucket_mutex in self.value_map.buckets.iter() {
437-
if let Some(ref locked_bucket) = *bucket_mutex.lock().unwrap() {
486+
// Safely attempt to acquire the lock, handling any potential error.
487+
let locked_bucket = match bucket_mutex.lock() {
488+
Ok(bucket) => bucket,
489+
Err(e) => {
490+
// Log the error or handle it as needed.
491+
global::handle_error(MetricsError::Other(
492+
format!("Failed to acquire lock on bucket due to: {:?}", e).into(),
493+
));
494+
continue; // Skip to the next bucket if the lock cannot be acquired.
495+
}
496+
};
497+
498+
// Proceed only if the bucket is not empty.
499+
if let Some(locked_bucket) = &*locked_bucket {
438500
for (attrs, value) in locked_bucket.iter() {
439501
let delta = *value - *reported.get(attrs).unwrap_or(&default);
440502
if delta != default {
441503
new_reported.insert(attrs.clone(), *value);
442504
}
505+
443506
s_data.data_points.push(DataPoint {
444507
attributes: attrs.clone(),
445508
start_time: Some(prev_start),
446509
time: Some(t),
447-
value: *value, // For cumulative, we use the value directly without calculating delta
510+
value: *value, // For cumulative, directly use the value without calculating the delta.
448511
exemplars: vec![],
449512
});
450513
}

0 commit comments

Comments
 (0)
Please sign in to comment.