diff --git a/examples/logs-basic/Cargo.toml b/examples/logs-basic/Cargo.toml index 00321af4fc..275dbea8a3 100644 --- a/examples/logs-basic/Cargo.toml +++ b/examples/logs-basic/Cargo.toml @@ -10,4 +10,4 @@ opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["logs"] } opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["logs"]} opentelemetry-appender-tracing = { path = "../../opentelemetry-appender-tracing", default-features = false} tracing = { workspace = true, features = ["std"]} -tracing-subscriber = { workspace = true, features = ["registry", "std"] } +tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] } diff --git a/examples/logs-basic/src/main.rs b/examples/logs-basic/src/main.rs index 73ec252c39..fb75b8cb00 100644 --- a/examples/logs-basic/src/main.rs +++ b/examples/logs-basic/src/main.rs @@ -2,7 +2,7 @@ use opentelemetry_appender_tracing::layer; use opentelemetry_sdk::logs::SdkLoggerProvider; use opentelemetry_sdk::Resource; use tracing::error; -use tracing_subscriber::prelude::*; +use tracing_subscriber::{prelude::*, EnvFilter}; fn main() { let exporter = opentelemetry_stdout::LogExporter::default(); @@ -14,8 +14,37 @@ fn main() { ) .with_simple_exporter(exporter) .build(); - let layer = layer::OpenTelemetryTracingBridge::new(&provider); - tracing_subscriber::registry().with(layer).init(); + + // For the OpenTelemetry layer, add a tracing filter to filter events from + // OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates + // like reqwest/tonic etc.) from being sent back to OTel itself, thus + // preventing infinite telemetry generation. The filter levels are set as + // follows: + // - Allow `info` level and above by default. + // - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely. + // Note: This will also drop events from crates like `tonic` etc. even when + // they are used outside the OTLP Exporter. For more details, see: + // https://github.com/open-telemetry/opentelemetry-rust/issues/761 + let filter_otel = EnvFilter::new("info") + .add_directive("hyper=off".parse().unwrap()) + .add_directive("opentelemetry=off".parse().unwrap()) + .add_directive("tonic=off".parse().unwrap()) + .add_directive("h2=off".parse().unwrap()) + .add_directive("reqwest=off".parse().unwrap()); + let otel_layer = layer::OpenTelemetryTracingBridge::new(&provider).with_filter(filter_otel); + + // Create a new tracing::Fmt layer to print the logs to stdout. It has a + // default filter of `info` level and above, and `debug` and above for logs + // from OpenTelemetry crates. The filter levels can be customized as needed. + let filter_fmt = EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap()); + let fmt_layer = tracing_subscriber::fmt::layer() + .with_thread_names(true) + .with_filter(filter_fmt); + + tracing_subscriber::registry() + .with(otel_layer) + .with(fmt_layer) + .init(); error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io", message = "This is an example message"); let _ = provider.shutdown(); diff --git a/examples/tracing-grpc/Cargo.toml b/examples/tracing-grpc/Cargo.toml index c836904a37..1f3ade0c87 100644 --- a/examples/tracing-grpc/Cargo.toml +++ b/examples/tracing-grpc/Cargo.toml @@ -19,7 +19,7 @@ opentelemetry_sdk = { path = "../../opentelemetry-sdk", features = ["rt-tokio"] opentelemetry-stdout = { path = "../../opentelemetry-stdout", features = ["trace"] } prost = { workspace = true } tokio = { workspace = true, features = ["full"] } -tonic = { workspace = true } +tonic = { workspace = true, features = ["server"] } [build-dependencies] tonic-build = { workspace = true } diff --git a/examples/tracing-jaeger/src/main.rs b/examples/tracing-jaeger/src/main.rs index a47ebfa5d8..64b7429129 100644 --- a/examples/tracing-jaeger/src/main.rs +++ b/examples/tracing-jaeger/src/main.rs @@ -1,14 +1,16 @@ use opentelemetry::{ global, - trace::{TraceContextExt, TraceError, Tracer}, + trace::{TraceContextExt, Tracer}, KeyValue, }; +use opentelemetry_otlp::ExporterBuildError; use opentelemetry_sdk::trace::SdkTracerProvider; use opentelemetry_sdk::Resource; use std::error::Error; -fn init_tracer_provider() -> Result { +fn init_tracer_provider() -> Result +{ let exporter = opentelemetry_otlp::SpanExporter::builder() .with_tonic() .build()?; diff --git a/opentelemetry-appender-tracing/CHANGELOG.md b/opentelemetry-appender-tracing/CHANGELOG.md index d7898bf92e..ada8903385 100644 --- a/opentelemetry-appender-tracing/CHANGELOG.md +++ b/opentelemetry-appender-tracing/CHANGELOG.md @@ -2,13 +2,14 @@ ## vNext +Fixes [1682](https://github.com/open-telemetry/opentelemetry-rust/issues/1682). +"spec_unstable_logs_enabled" feature now do not suppress logs for other layers. + ## 0.28.1 Released 2025-Feb-12 -- Bump `tracing-opentelemetry` to 0.29 -- New experimental feature to use trace\_id & span\_id from spans created through the [tracing](https://crates.io/crates/tracing) crate (experimental_use_tracing_span_context) [#2438](https://github.com/open-telemetry/opentelemetry-rust/pull/2438) - +- New *experimental* feature to use trace_id & span_id from spans created through the [tracing](https://crates.io/crates/tracing) crate (experimental_use_tracing_span_context) [#2438](https://github.com/open-telemetry/opentelemetry-rust/pull/2438) ## 0.28.0 @@ -27,6 +28,7 @@ Released 2024-Nov-11 - **Breaking** [2291](https://github.com/open-telemetry/opentelemetry-rust/pull/2291) Rename `logs_level_enabled flag` to `spec_unstable_logs_enabled`. Please enable this updated flag if the feature is needed. This flag will be removed once the feature is stabilized in the specifications. ## v0.26.0 + Released 2024-Sep-30 - Update `opentelemetry` dependency version to 0.26 @@ -45,7 +47,7 @@ Released 2024-Sep-30 Exporters might use the target to override the instrumentation scope, which previously contained "opentelemetry-appender-tracing". - **Breaking** [1928](https://github.com/open-telemetry/opentelemetry-rust/pull/1928) Insert tracing event name into LogRecord::event_name instead of attributes. - - If using a custom exporter, then they must serialize this field directly from LogRecord::event_name instead of iterating over the attributes. OTLP Exporter is modified to handle this. + - If using a custom exporter, then they must serialize this field directly from LogRecord::event_name instead of iterating over the attributes. OTLP Exporter is modified to handle this. - Update `opentelemetry` dependency version to 0.24 ## v0.4.0 diff --git a/opentelemetry-appender-tracing/Cargo.toml b/opentelemetry-appender-tracing/Cargo.toml index 0d9a0e9b9b..5a4002344d 100644 --- a/opentelemetry-appender-tracing/Cargo.toml +++ b/opentelemetry-appender-tracing/Cargo.toml @@ -23,7 +23,8 @@ tracing-opentelemetry = { version = "0.29", optional = true } log = { workspace = true } opentelemetry-stdout = { path = "../opentelemetry-stdout", features = ["logs"] } opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["logs", "testing"] } -tracing-subscriber = { workspace = true, features = ["registry", "std", "env-filter"] } +tracing = { workspace = true, features = ["std"]} +tracing-subscriber = { workspace = true, features = ["env-filter","registry", "std", "fmt"] } tracing-log = "0.2" criterion = { workspace = true } tokio = { workspace = true, features = ["full"]} @@ -35,7 +36,6 @@ pprof = { version = "0.14", features = ["flamegraph", "criterion"] } default = [] experimental_metadata_attributes = ["dep:tracing-log"] spec_unstable_logs_enabled = ["opentelemetry/spec_unstable_logs_enabled"] -# TODO - Enable this in 0.28.1 (once tracing-opentelemetry v0.29 is released) experimental_use_tracing_span_context = ["tracing-opentelemetry"] diff --git a/opentelemetry-appender-tracing/benches/logs.rs b/opentelemetry-appender-tracing/benches/logs.rs index 6a2c938177..0ec47c1863 100644 --- a/opentelemetry-appender-tracing/benches/logs.rs +++ b/opentelemetry-appender-tracing/benches/logs.rs @@ -17,7 +17,6 @@ use criterion::{criterion_group, criterion_main, Criterion}; use opentelemetry::InstrumentationScope; use opentelemetry_appender_tracing::layer as tracing_layer; use opentelemetry_sdk::error::OTelSdkResult; -use opentelemetry_sdk::logs::{LogBatch, LogExporter}; use opentelemetry_sdk::logs::{LogProcessor, SdkLogRecord, SdkLoggerProvider}; use opentelemetry_sdk::Resource; #[cfg(not(target_os = "windows"))] @@ -27,40 +26,19 @@ use tracing_subscriber::prelude::*; use tracing_subscriber::Layer; use tracing_subscriber::Registry; -#[derive(Debug, Clone)] -struct NoopExporter { - enabled: bool, -} - -impl LogExporter for NoopExporter { - #[allow(clippy::manual_async_fn)] - fn export( - &self, - _batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - async { OTelSdkResult::Ok(()) } - } - - fn event_enabled(&self, _: opentelemetry::logs::Severity, _: &str, _: &str) -> bool { - self.enabled - } -} - #[derive(Debug)] -struct NoopProcessor { - exporter: E, +struct NoopProcessor { + enabled: bool, } -impl NoopProcessor { - fn new(exporter: E) -> Self { - Self { exporter } +impl NoopProcessor { + fn new(enabled: bool) -> Self { + Self { enabled } } } -impl LogProcessor for NoopProcessor { - fn emit(&self, _: &mut SdkLogRecord, _: &InstrumentationScope) { - // no-op - } +impl LogProcessor for NoopProcessor { + fn emit(&self, _: &mut SdkLogRecord, _: &InstrumentationScope) {} fn force_flush(&self) -> OTelSdkResult { Ok(()) @@ -72,11 +50,11 @@ impl LogProcessor for NoopProcessor { fn event_enabled( &self, - level: opentelemetry::logs::Severity, - target: &str, - name: &str, + _level: opentelemetry::logs::Severity, + _target: &str, + _name: &str, ) -> bool { - self.exporter.event_enabled(level, target, name) + self.enabled } } @@ -126,8 +104,7 @@ fn benchmark_no_subscriber(c: &mut Criterion) { } fn benchmark_with_ot_layer(c: &mut Criterion, enabled: bool, bench_name: &str) { - let exporter = NoopExporter { enabled }; - let processor = NoopProcessor::new(exporter); + let processor = NoopProcessor::new(enabled); let provider = SdkLoggerProvider::builder() .with_resource( Resource::builder_empty() diff --git a/opentelemetry-appender-tracing/examples/basic.rs b/opentelemetry-appender-tracing/examples/basic.rs index bad63757cc..b50f575cde 100644 --- a/opentelemetry-appender-tracing/examples/basic.rs +++ b/opentelemetry-appender-tracing/examples/basic.rs @@ -3,7 +3,7 @@ use opentelemetry_appender_tracing::layer; use opentelemetry_sdk::{logs::SdkLoggerProvider, Resource}; use tracing::error; -use tracing_subscriber::prelude::*; +use tracing_subscriber::{prelude::*, EnvFilter}; fn main() { let exporter = opentelemetry_stdout::LogExporter::default(); @@ -15,8 +15,37 @@ fn main() { ) .with_simple_exporter(exporter) .build(); - let layer = layer::OpenTelemetryTracingBridge::new(&provider); - tracing_subscriber::registry().with(layer).init(); + + // For the OpenTelemetry layer, add a tracing filter to filter events from + // OpenTelemetry and its dependent crates (opentelemetry-otlp uses crates + // like reqwest/tonic etc.) from being sent back to OTel itself, thus + // preventing infinite telemetry generation. The filter levels are set as + // follows: + // - Allow `info` level and above by default. + // - Restrict `opentelemetry`, `hyper`, `tonic`, and `reqwest` completely. + // Note: This will also drop events from crates like `tonic` etc. even when + // they are used outside the OTLP Exporter. For more details, see: + // https://github.com/open-telemetry/opentelemetry-rust/issues/761 + let filter_otel = EnvFilter::new("info") + .add_directive("hyper=off".parse().unwrap()) + .add_directive("opentelemetry=off".parse().unwrap()) + .add_directive("tonic=off".parse().unwrap()) + .add_directive("h2=off".parse().unwrap()) + .add_directive("reqwest=off".parse().unwrap()); + let otel_layer = layer::OpenTelemetryTracingBridge::new(&provider).with_filter(filter_otel); + + // Create a new tracing::Fmt layer to print the logs to stdout. It has a + // default filter of `info` level and above, and `debug` and above for logs + // from OpenTelemetry crates. The filter levels can be customized as needed. + let filter_fmt = EnvFilter::new("info").add_directive("opentelemetry=debug".parse().unwrap()); + let fmt_layer = tracing_subscriber::fmt::layer() + .with_thread_names(true) + .with_filter(filter_fmt); + + tracing_subscriber::registry() + .with(otel_layer) + .with(fmt_layer) + .init(); error!(name: "my-event-name", target: "my-system", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io", message = "This is an example message"); let _ = provider.shutdown(); diff --git a/opentelemetry-appender-tracing/src/layer.rs b/opentelemetry-appender-tracing/src/layer.rs index f883cf087a..ebacc89385 100644 --- a/opentelemetry-appender-tracing/src/layer.rs +++ b/opentelemetry-appender-tracing/src/layer.rs @@ -158,6 +158,14 @@ where event: &tracing::Event<'_>, _ctx: tracing_subscriber::layer::Context<'_, S>, ) { + let severity = severity_of_level(event.metadata().level()); + let target = event.metadata().target(); + #[cfg(feature = "spec_unstable_logs_enabled")] + if !self.logger.event_enabled(severity, target) { + // TODO: See if we need internal logs or track the count. + return; + } + #[cfg(feature = "experimental_metadata_attributes")] let normalized_meta = event.normalized_metadata(); @@ -170,9 +178,9 @@ where let mut log_record = self.logger.create_log_record(); // TODO: Fix heap allocation - log_record.set_target(meta.target().to_string()); + log_record.set_target(target.to_string()); log_record.set_event_name(meta.name()); - log_record.set_severity_number(severity_of_level(meta.level())); + log_record.set_severity_number(severity); log_record.set_severity_text(meta.level().as_str()); let mut visitor = EventVisitor::new(&mut log_record); #[cfg(feature = "experimental_metadata_attributes")] @@ -203,17 +211,6 @@ where //emit record self.logger.emit(log_record); } - - #[cfg(feature = "spec_unstable_logs_enabled")] - fn event_enabled( - &self, - _event: &tracing_core::Event<'_>, - _ctx: tracing_subscriber::layer::Context<'_, S>, - ) -> bool { - let severity = severity_of_level(_event.metadata().level()); - self.logger - .event_enabled(severity, _event.metadata().target()) - } } const fn severity_of_level(level: &Level) -> Severity { @@ -266,17 +263,11 @@ mod tests { struct ReentrantLogExporter; impl LogExporter for ReentrantLogExporter { - #[allow(clippy::manual_async_fn)] - fn export( - &self, - _batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - async { - // This will cause a deadlock as the export itself creates a log - // while still within the lock of the SimpleLogProcessor. - warn!(name: "my-event-name", target: "reentrant", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); - Ok(()) - } + async fn export(&self, _batch: LogBatch<'_>) -> OTelSdkResult { + // This will cause a deadlock as the export itself creates a log + // while still within the lock of the SimpleLogProcessor. + warn!(name: "my-event-name", target: "reentrant", event_id = 20, user_name = "otel", user_email = "otel@opentelemetry.io"); + Ok(()) } } diff --git a/opentelemetry-otlp/CHANGELOG.md b/opentelemetry-otlp/CHANGELOG.md index bc8046f9ad..a2fa2e090e 100644 --- a/opentelemetry-otlp/CHANGELOG.md +++ b/opentelemetry-otlp/CHANGELOG.md @@ -2,6 +2,19 @@ ## vNext +- The `OTEL_EXPORTER_OTLP_TIMEOUT`, `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`, `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` and `OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` are changed from seconds to miliseconds. + +- *Breaking* + +ExporterBuilder's build() method now Result with `ExporterBuildError` being the +Error variant. Previously it returned signal specific errors like `LogError` +from the `opentelemetry_sdk`, which are no longer part of the sdk. No changes +required if you were using unwrap/expect. If you were matching on the returning +Error enum, replace with the enum `ExporterBuildError`. Unlike the previous +`Error` which contained many variants unrelated to building an exporter, the +new one returns specific variants applicable to building an exporter. Some +variants might be applicable only on select features. + ## 0.28.0 Released 2025-Feb-10 diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index 35f270c6ce..5d593a5b58 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -26,7 +26,6 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -async-trait = { workspace = true } futures-core = { workspace = true } opentelemetry = { version = "0.28", default-features = false, path = "../opentelemetry" } opentelemetry_sdk = { version = "0.28", default-features = false, path = "../opentelemetry-sdk" } @@ -51,6 +50,7 @@ opentelemetry_sdk = { features = ["trace", "rt-tokio", "testing"], path = "../op tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } futures-util = { workspace = true } temp-env = { workspace = true } +tonic = { workspace = true, features = ["server"] } [features] # telemetry pillars and functions diff --git a/opentelemetry-otlp/src/exporter/http/logs.rs b/opentelemetry-otlp/src/exporter/http/logs.rs index c7c0f92a71..d108e59c5c 100644 --- a/opentelemetry-otlp/src/exporter/http/logs.rs +++ b/opentelemetry-otlp/src/exporter/http/logs.rs @@ -5,51 +5,45 @@ use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult}; use opentelemetry_sdk::logs::{LogBatch, LogExporter}; impl LogExporter for OtlpHttpClient { - #[allow(clippy::manual_async_fn)] - fn export( - &self, - batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - async move { - let client = self - .client - .lock() - .map_err(|e| OTelSdkError::InternalFailure(format!("Mutex lock failed: {}", e)))? - .clone() - .ok_or(OTelSdkError::AlreadyShutdown)?; + async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult { + let client = self + .client + .lock() + .map_err(|e| OTelSdkError::InternalFailure(format!("Mutex lock failed: {}", e)))? + .clone() + .ok_or(OTelSdkError::AlreadyShutdown)?; - let (body, content_type) = self - .build_logs_export_body(batch) - .map_err(|e| OTelSdkError::InternalFailure(e.to_string()))?; + let (body, content_type) = self + .build_logs_export_body(batch) + .map_err(|e| OTelSdkError::InternalFailure(e.to_string()))?; - let mut request = http::Request::builder() - .method(Method::POST) - .uri(&self.collector_endpoint) - .header(CONTENT_TYPE, content_type) - .body(body.into()) - .map_err(|e| OTelSdkError::InternalFailure(e.to_string()))?; + let mut request = http::Request::builder() + .method(Method::POST) + .uri(&self.collector_endpoint) + .header(CONTENT_TYPE, content_type) + .body(body.into()) + .map_err(|e| OTelSdkError::InternalFailure(e.to_string()))?; - for (k, v) in &self.headers { - request.headers_mut().insert(k.clone(), v.clone()); - } + for (k, v) in &self.headers { + request.headers_mut().insert(k.clone(), v.clone()); + } - let request_uri = request.uri().to_string(); - otel_debug!(name: "HttpLogsClient.CallingExport"); - let response = client - .send_bytes(request) - .await - .map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?; - if !response.status().is_success() { - let error = format!( - "OpenTelemetry logs export failed. Url: {}, Status Code: {}, Response: {:?}", - request_uri, - response.status().as_u16(), - response.body() - ); - return Err(OTelSdkError::InternalFailure(error)); - } - Ok(()) + let request_uri = request.uri().to_string(); + otel_debug!(name: "HttpLogsClient.CallingExport"); + let response = client + .send_bytes(request) + .await + .map_err(|e| OTelSdkError::InternalFailure(format!("{e:?}")))?; + if !response.status().is_success() { + let error = format!( + "OpenTelemetry logs export failed. Url: {}, Status Code: {}, Response: {:?}", + request_uri, + response.status().as_u16(), + response.body() + ); + return Err(OTelSdkError::InternalFailure(error)); } + Ok(()) } fn shutdown(&mut self) -> OTelSdkResult { diff --git a/opentelemetry-otlp/src/exporter/http/metrics.rs b/opentelemetry-otlp/src/exporter/http/metrics.rs index f534b69a7f..759dd6a7d5 100644 --- a/opentelemetry-otlp/src/exporter/http/metrics.rs +++ b/opentelemetry-otlp/src/exporter/http/metrics.rs @@ -1,7 +1,6 @@ use std::sync::Arc; use crate::metric::MetricsClient; -use async_trait::async_trait; use http::{header::CONTENT_TYPE, Method}; use opentelemetry::otel_debug; use opentelemetry_sdk::error::{OTelSdkError, OTelSdkResult}; @@ -9,7 +8,6 @@ use opentelemetry_sdk::metrics::data::ResourceMetrics; use super::OtlpHttpClient; -#[async_trait] impl MetricsClient for OtlpHttpClient { async fn export(&self, metrics: &mut ResourceMetrics) -> OTelSdkResult { let client = self diff --git a/opentelemetry-otlp/src/exporter/http/mod.rs b/opentelemetry-otlp/src/exporter/http/mod.rs index 650358533e..923ff2823a 100644 --- a/opentelemetry-otlp/src/exporter/http/mod.rs +++ b/opentelemetry-otlp/src/exporter/http/mod.rs @@ -1,5 +1,5 @@ use super::{ - default_headers, default_protocol, parse_header_string, + default_headers, default_protocol, parse_header_string, ExporterBuildError, OTEL_EXPORTER_OTLP_HTTP_ENDPOINT_DEFAULT, }; use crate::{ @@ -61,7 +61,7 @@ pub struct HttpConfig { /// # #[cfg(feature="metrics")] /// use opentelemetry_sdk::metrics::Temporality; /// -/// # fn main() -> Result<(), Box> { +/// # fn main() -> Result<(), opentelemetry_otlp::ExporterBuildError> { /// // Create a span exporter you can use to when configuring tracer providers /// # #[cfg(feature="trace")] /// let span_exporter = opentelemetry_otlp::SpanExporter::builder().with_http().build()?; @@ -108,7 +108,7 @@ impl HttpExporterBuilder { signal_endpoint_path: &str, signal_timeout_var: &str, signal_http_headers_var: &str, - ) -> Result { + ) -> Result { let endpoint = resolve_http_endpoint( signal_endpoint_var, signal_endpoint_path, @@ -120,7 +120,7 @@ impl HttpExporterBuilder { .or(env::var(OTEL_EXPORTER_OTLP_TIMEOUT).ok()) { Some(val) => match val.parse() { - Ok(seconds) => Duration::from_secs(seconds), + Ok(seconds) => Duration::from_millis(seconds), Err(_) => self.exporter_config.timeout, }, None => self.exporter_config.timeout, @@ -173,7 +173,7 @@ impl HttpExporterBuilder { } } - let http_client = http_client.ok_or(crate::Error::NoHttpClient)?; + let http_client = http_client.ok_or(ExporterBuildError::NoHttpClient)?; #[allow(clippy::mutable_key_type)] // http headers are not mutated let mut headers: HashMap = self @@ -208,9 +208,7 @@ impl HttpExporterBuilder { /// Create a log exporter with the current configuration #[cfg(feature = "trace")] - pub fn build_span_exporter( - mut self, - ) -> Result { + pub fn build_span_exporter(mut self) -> Result { use crate::{ OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_HEADERS, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, @@ -228,7 +226,7 @@ impl HttpExporterBuilder { /// Create a log exporter with the current configuration #[cfg(feature = "logs")] - pub fn build_log_exporter(mut self) -> opentelemetry_sdk::logs::LogResult { + pub fn build_log_exporter(mut self) -> Result { use crate::{ OTEL_EXPORTER_OTLP_LOGS_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_HEADERS, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT, @@ -249,7 +247,7 @@ impl HttpExporterBuilder { pub fn build_metrics_exporter( mut self, temporality: opentelemetry_sdk::metrics::Temporality, - ) -> opentelemetry_sdk::metrics::MetricResult { + ) -> Result { use crate::{ OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_HEADERS, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, @@ -262,7 +260,7 @@ impl HttpExporterBuilder { OTEL_EXPORTER_OTLP_METRICS_HEADERS, )?; - Ok(crate::MetricExporter::new(client, temporality)) + Ok(crate::MetricExporter::from_http(client, temporality)) } } @@ -301,17 +299,17 @@ impl OtlpHttpClient { fn build_trace_export_body( &self, spans: Vec, - ) -> opentelemetry::trace::TraceResult<(Vec, &'static str)> { + ) -> Result<(Vec, &'static str), Box> { use opentelemetry_proto::tonic::collector::trace::v1::ExportTraceServiceRequest; let resource_spans = group_spans_by_resource_and_scope(spans, &self.resource); let req = ExportTraceServiceRequest { resource_spans }; match self.protocol { #[cfg(feature = "http-json")] - Protocol::HttpJson => match serde_json::to_string_pretty(&req) { - Ok(json) => Ok((json.into(), "application/json")), - Err(e) => Err(opentelemetry::trace::TraceError::from(e.to_string())), - }, + Protocol::HttpJson => { + let json = serde_json::to_string_pretty(&req)?; + Ok((json.into(), "application/json")) + } _ => Ok((req.encode_to_vec(), "application/x-protobuf")), } } @@ -320,17 +318,17 @@ impl OtlpHttpClient { fn build_logs_export_body( &self, logs: LogBatch<'_>, - ) -> opentelemetry_sdk::logs::LogResult<(Vec, &'static str)> { + ) -> Result<(Vec, &'static str), Box> { use opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest; let resource_logs = group_logs_by_resource_and_scope(logs, &self.resource); let req = ExportLogsServiceRequest { resource_logs }; match self.protocol { #[cfg(feature = "http-json")] - Protocol::HttpJson => match serde_json::to_string_pretty(&req) { - Ok(json) => Ok((json.into(), "application/json")), - Err(e) => Err(opentelemetry_sdk::logs::LogError::from(e.to_string())), - }, + Protocol::HttpJson => { + let json = serde_json::to_string_pretty(&req)?; + Ok((json.into(), "application/json")) + } _ => Ok((req.encode_to_vec(), "application/x-protobuf")), } } @@ -339,19 +337,17 @@ impl OtlpHttpClient { fn build_metrics_export_body( &self, metrics: &mut ResourceMetrics, - ) -> opentelemetry_sdk::metrics::MetricResult<(Vec, &'static str)> { + ) -> Result<(Vec, &'static str), Box> { use opentelemetry_proto::tonic::collector::metrics::v1::ExportMetricsServiceRequest; let req: ExportMetricsServiceRequest = (&*metrics).into(); match self.protocol { #[cfg(feature = "http-json")] - Protocol::HttpJson => match serde_json::to_string_pretty(&req) { - Ok(json) => Ok((json.into(), "application/json")), - Err(e) => Err(opentelemetry_sdk::metrics::MetricError::Other( - e.to_string(), - )), - }, + Protocol::HttpJson => { + let json = serde_json::to_string_pretty(&req)?; + Ok((json.into(), "application/json")) + } _ => Ok((req.encode_to_vec(), "application/x-protobuf")), } } @@ -371,7 +367,7 @@ fn resolve_http_endpoint( signal_endpoint_var: &str, signal_endpoint_path: &str, provided_endpoint: Option, -) -> Result { +) -> Result { // per signal env var is not modified if let Some(endpoint) = env::var(signal_endpoint_var) .ok() @@ -389,12 +385,23 @@ fn resolve_http_endpoint( } provided_endpoint - .map(|e| e.parse().map_err(From::from)) + .as_ref() + .map(|endpoint| { + endpoint.parse().map_err(|err: http::uri::InvalidUri| { + ExporterBuildError::InvalidUri(endpoint.to_string(), err.to_string()) + }) + }) .unwrap_or_else(|| { build_endpoint_uri( OTEL_EXPORTER_OTLP_HTTP_ENDPOINT_DEFAULT, signal_endpoint_path, ) + .map_err(|e| { + ExporterBuildError::InvalidUri( + format!("{OTEL_EXPORTER_OTLP_HTTP_ENDPOINT_DEFAULT}{signal_endpoint_path}"), + e.to_string(), + ) + }) }) } diff --git a/opentelemetry-otlp/src/exporter/mod.rs b/opentelemetry-otlp/src/exporter/mod.rs index bec1c809bb..0b7e2a3b87 100644 --- a/opentelemetry-otlp/src/exporter/mod.rs +++ b/opentelemetry-otlp/src/exporter/mod.rs @@ -6,12 +6,13 @@ use crate::exporter::http::HttpExporterBuilder; #[cfg(feature = "grpc-tonic")] use crate::exporter::tonic::TonicExporterBuilder; -use crate::{Error, Protocol}; +use crate::Protocol; #[cfg(feature = "serialize")] use serde::{Deserialize, Serialize}; use std::fmt::{Display, Formatter}; use std::str::FromStr; use std::time::Duration; +use thiserror::Error; /// Target to which the exporter is going to send signals, defaults to https://localhost:4317. /// Learn about the relationship between this constant and metrics/spans/logs at @@ -52,7 +53,7 @@ const OTEL_EXPORTER_OTLP_PROTOCOL_HTTP_JSON: &str = "http/json"; /// Max waiting time for the backend to process each signal batch, defaults to 10 seconds. pub const OTEL_EXPORTER_OTLP_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_TIMEOUT"; /// Default max waiting time for the backend to process each signal batch. -pub const OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT: u64 = 10; +pub const OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT: u64 = 10000; // Endpoints per protocol https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md #[cfg(feature = "grpc-tonic")] @@ -87,11 +88,47 @@ impl Default for ExportConfig { // don't use default_endpoint(protocol) here otherwise we // won't know if user provided a value protocol, - timeout: Duration::from_secs(OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT), + timeout: Duration::from_millis(OTEL_EXPORTER_OTLP_TIMEOUT_DEFAULT), } } } +#[derive(Error, Debug)] +/// Errors that can occur while building an exporter. +// TODO: Refine and polish this. +pub enum ExporterBuildError { + /// Spawning a new thread failed. + #[error("Spawning a new thread failed. Unable to create Reqwest-Blocking client.")] + ThreadSpawnFailed, + + /// Feature required to use the specified compression algorithm. + #[cfg(any(not(feature = "gzip-tonic"), not(feature = "zstd-tonic")))] + #[error("feature '{0}' is required to use the compression algorithm '{1}'")] + FeatureRequiredForCompressionAlgorithm(&'static str, Compression), + + /// No Http client specified. + #[error("no http client specified")] + NoHttpClient, + + /// Unsupported compression algorithm. + #[error("unsupported compression algorithm '{0}'")] + UnsupportedCompressionAlgorithm(String), + + /// Invalid URI. + #[cfg(any(feature = "grpc-tonic", feature = "http-proto", feature = "http-json"))] + #[error("invalid URI {0}. Reason {1}")] + InvalidUri(String, String), + + /// Failed due to an internal error. + /// + /// The error message is intended for logging purposes only and should not + /// be used to make programmatic decisions. It is implementation-specific + /// and subject to change without notice. Consumers of this error should not + /// rely on its content beyond logging. + #[error("Reason: {0}")] + InternalFailure(String), +} + /// The compression algorithm to use when sending data. #[cfg_attr(feature = "serialize", derive(Deserialize, Serialize))] #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -112,13 +149,15 @@ impl Display for Compression { } impl FromStr for Compression { - type Err = Error; + type Err = ExporterBuildError; fn from_str(s: &str) -> Result { match s { "gzip" => Ok(Compression::Gzip), "zstd" => Ok(Compression::Zstd), - _ => Err(Error::UnsupportedCompressionAlgorithm(s.to_string())), + _ => Err(ExporterBuildError::UnsupportedCompressionAlgorithm( + s.to_string(), + )), } } } diff --git a/opentelemetry-otlp/src/exporter/tonic/logs.rs b/opentelemetry-otlp/src/exporter/tonic/logs.rs index d7841c2679..cdbed23be2 100644 --- a/opentelemetry-otlp/src/exporter/tonic/logs.rs +++ b/opentelemetry-otlp/src/exporter/tonic/logs.rs @@ -56,40 +56,34 @@ impl TonicLogsClient { } impl LogExporter for TonicLogsClient { - #[allow(clippy::manual_async_fn)] - fn export( - &self, - batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - async move { - let (mut client, metadata, extensions) = match &self.inner { - Some(inner) => { - let (m, e, _) = inner - .interceptor - .lock() - .await // tokio::sync::Mutex doesn't return a poisoned error, so we can safely use the interceptor here - .call(Request::new(())) - .map_err(|e| OTelSdkError::InternalFailure(format!("error: {:?}", e)))? - .into_parts(); - (inner.client.clone(), m, e) - } - None => return Err(OTelSdkError::AlreadyShutdown), - }; + async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult { + let (mut client, metadata, extensions) = match &self.inner { + Some(inner) => { + let (m, e, _) = inner + .interceptor + .lock() + .await // tokio::sync::Mutex doesn't return a poisoned error, so we can safely use the interceptor here + .call(Request::new(())) + .map_err(|e| OTelSdkError::InternalFailure(format!("error: {:?}", e)))? + .into_parts(); + (inner.client.clone(), m, e) + } + None => return Err(OTelSdkError::AlreadyShutdown), + }; - let resource_logs = group_logs_by_resource_and_scope(batch, &self.resource); + let resource_logs = group_logs_by_resource_and_scope(batch, &self.resource); - otel_debug!(name: "TonicsLogsClient.CallingExport"); + otel_debug!(name: "TonicsLogsClient.CallingExport"); - client - .export(Request::from_parts( - metadata, - extensions, - ExportLogsServiceRequest { resource_logs }, - )) - .await - .map_err(|e| OTelSdkError::InternalFailure(format!("export error: {:?}", e)))?; - Ok(()) - } + client + .export(Request::from_parts( + metadata, + extensions, + ExportLogsServiceRequest { resource_logs }, + )) + .await + .map_err(|e| OTelSdkError::InternalFailure(format!("export error: {:?}", e)))?; + Ok(()) } fn shutdown(&mut self) -> OTelSdkResult { diff --git a/opentelemetry-otlp/src/exporter/tonic/metrics.rs b/opentelemetry-otlp/src/exporter/tonic/metrics.rs index c101829a34..2e5eb9df41 100644 --- a/opentelemetry-otlp/src/exporter/tonic/metrics.rs +++ b/opentelemetry-otlp/src/exporter/tonic/metrics.rs @@ -1,7 +1,6 @@ use core::fmt; use std::sync::Mutex; -use async_trait::async_trait; use opentelemetry::otel_debug; use opentelemetry_proto::tonic::collector::metrics::v1::{ metrics_service_client::MetricsServiceClient, ExportMetricsServiceRequest, @@ -52,7 +51,6 @@ impl TonicMetricsClient { } } -#[async_trait] impl MetricsClient for TonicMetricsClient { async fn export(&self, metrics: &mut ResourceMetrics) -> OTelSdkResult { let (mut client, metadata, extensions) = self diff --git a/opentelemetry-otlp/src/exporter/tonic/mod.rs b/opentelemetry-otlp/src/exporter/tonic/mod.rs index 9e2b54c631..f2aba85c77 100644 --- a/opentelemetry-otlp/src/exporter/tonic/mod.rs +++ b/opentelemetry-otlp/src/exporter/tonic/mod.rs @@ -12,7 +12,10 @@ use tonic::transport::Channel; #[cfg(feature = "tls")] use tonic::transport::ClientTlsConfig; -use super::{default_headers, parse_header_string, OTEL_EXPORTER_OTLP_GRPC_ENDPOINT_DEFAULT}; +use super::{ + default_headers, parse_header_string, ExporterBuildError, + OTEL_EXPORTER_OTLP_GRPC_ENDPOINT_DEFAULT, +}; use crate::exporter::Compression; use crate::{ ExportConfig, OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_ENDPOINT, @@ -23,7 +26,7 @@ use crate::{ pub(crate) mod logs; #[cfg(feature = "metrics")] -mod metrics; +pub(crate) mod metrics; #[cfg(feature = "trace")] pub(crate) mod trace; @@ -46,21 +49,21 @@ pub struct TonicConfig { } impl TryFrom for tonic::codec::CompressionEncoding { - type Error = crate::Error; + type Error = ExporterBuildError; - fn try_from(value: Compression) -> Result { + fn try_from(value: Compression) -> Result { match value { #[cfg(feature = "gzip-tonic")] Compression::Gzip => Ok(tonic::codec::CompressionEncoding::Gzip), #[cfg(not(feature = "gzip-tonic"))] - Compression::Gzip => Err(crate::Error::FeatureRequiredForCompressionAlgorithm( + Compression::Gzip => Err(ExporterBuildError::FeatureRequiredForCompressionAlgorithm( "gzip-tonic", Compression::Gzip, )), #[cfg(feature = "zstd-tonic")] Compression::Zstd => Ok(tonic::codec::CompressionEncoding::Zstd), #[cfg(not(feature = "zstd-tonic"))] - Compression::Zstd => Err(crate::Error::FeatureRequiredForCompressionAlgorithm( + Compression::Zstd => Err(ExporterBuildError::FeatureRequiredForCompressionAlgorithm( "zstd-tonic", Compression::Zstd, )), @@ -151,7 +154,8 @@ impl TonicExporterBuilder { signal_timeout_var: &str, signal_compression_var: &str, signal_headers_var: &str, - ) -> Result<(Channel, BoxInterceptor, Option), crate::Error> { + ) -> Result<(Channel, BoxInterceptor, Option), super::ExporterBuildError> + { let compression = self.resolve_compression(signal_compression_var)?; let (headers_from_env, headers_for_logging) = parse_headers_from_env(signal_headers_var); @@ -194,13 +198,15 @@ impl TonicExporterBuilder { // Used for logging the endpoint let endpoint_clone = endpoint.clone(); - let endpoint = Channel::from_shared(endpoint).map_err(crate::Error::from)?; + let endpoint = Channel::from_shared(endpoint.clone()).map_err(|op| { + super::ExporterBuildError::InvalidUri(endpoint.to_string(), op.to_string()) + })?; let timeout = match env::var(signal_timeout_var) .ok() .or(env::var(OTEL_EXPORTER_OTLP_TIMEOUT).ok()) { Some(val) => match val.parse() { - Ok(seconds) => Duration::from_secs(seconds), + Ok(seconds) => Duration::from_millis(seconds), Err(_) => config.timeout, }, None => config.timeout, @@ -210,7 +216,7 @@ impl TonicExporterBuilder { let channel = match self.tonic_config.tls_config { Some(tls_config) => endpoint .tls_config(tls_config) - .map_err(crate::Error::from)?, + .map_err(|er| super::ExporterBuildError::InternalFailure(er.to_string()))?, None => endpoint, } .timeout(timeout) @@ -243,7 +249,7 @@ impl TonicExporterBuilder { fn resolve_compression( &self, env_override: &str, - ) -> Result, crate::Error> { + ) -> Result, ExporterBuildError> { if let Some(compression) = self.tonic_config.compression { Ok(Some(compression.try_into()?)) } else if let Ok(compression) = env::var(env_override) { @@ -259,7 +265,7 @@ impl TonicExporterBuilder { #[cfg(feature = "logs")] pub(crate) fn build_log_exporter( self, - ) -> Result { + ) -> Result { use crate::exporter::tonic::logs::TonicLogsClient; otel_debug!(name: "LogsTonicChannelBuilding"); @@ -281,7 +287,7 @@ impl TonicExporterBuilder { pub(crate) fn build_metrics_exporter( self, temporality: opentelemetry_sdk::metrics::Temporality, - ) -> opentelemetry_sdk::metrics::MetricResult { + ) -> Result { use crate::MetricExporter; use metrics::TonicMetricsClient; @@ -296,14 +302,14 @@ impl TonicExporterBuilder { let client = TonicMetricsClient::new(channel, interceptor, compression); - Ok(MetricExporter::new(client, temporality)) + Ok(MetricExporter::from_tonic(client, temporality)) } /// Build a new tonic span exporter #[cfg(feature = "trace")] pub(crate) fn build_span_exporter( self, - ) -> Result { + ) -> Result { use crate::exporter::tonic::trace::TonicTracesClient; otel_debug!(name: "TracesTonicChannelBuilding"); diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index 21a0cbbf27..953fd66c5f 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -223,6 +223,7 @@ mod span; pub use crate::exporter::Compression; pub use crate::exporter::ExportConfig; +pub use crate::exporter::ExporterBuildError; #[cfg(feature = "trace")] #[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] pub use crate::span::{ diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs index 1a85baeda8..abe6e92aa2 100644 --- a/opentelemetry-otlp/src/logs.rs +++ b/opentelemetry-otlp/src/logs.rs @@ -8,7 +8,7 @@ use std::fmt::Debug; use opentelemetry_sdk::{error::OTelSdkResult, logs::LogBatch}; -use crate::{HasExportConfig, NoExporterBuilderSet}; +use crate::{exporter::ExporterBuildError, HasExportConfig, NoExporterBuilderSet}; #[cfg(feature = "grpc-tonic")] use crate::{HasTonicConfig, TonicExporterBuilder, TonicExporterBuilderSet}; @@ -61,7 +61,7 @@ impl LogExporterBuilder { #[cfg(feature = "grpc-tonic")] impl LogExporterBuilder { - pub fn build(self) -> Result { + pub fn build(self) -> Result { let result = self.client.0.build_log_exporter(); otel_debug!(name: "LogExporterBuilt", result = format!("{:?}", &result)); result @@ -70,7 +70,7 @@ impl LogExporterBuilder { #[cfg(any(feature = "http-proto", feature = "http-json"))] impl LogExporterBuilder { - pub fn build(self) -> Result { + pub fn build(self) -> Result { self.client.0.build_log_exporter() } } @@ -139,18 +139,12 @@ impl LogExporter { } impl opentelemetry_sdk::logs::LogExporter for LogExporter { - #[allow(clippy::manual_async_fn)] - fn export( - &self, - batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - async move { - match &self.client { - #[cfg(feature = "grpc-tonic")] - SupportedTransportClient::Tonic(client) => client.export(batch).await, - #[cfg(any(feature = "http-proto", feature = "http-json"))] - SupportedTransportClient::Http(client) => client.export(batch).await, - } + async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult { + match &self.client { + #[cfg(feature = "grpc-tonic")] + SupportedTransportClient::Tonic(client) => client.export(batch).await, + #[cfg(any(feature = "http-proto", feature = "http-json"))] + SupportedTransportClient::Http(client) => client.export(batch).await, } } diff --git a/opentelemetry-otlp/src/metric.rs b/opentelemetry-otlp/src/metric.rs index caca5a0af7..e7f25f76fb 100644 --- a/opentelemetry-otlp/src/metric.rs +++ b/opentelemetry-otlp/src/metric.rs @@ -3,6 +3,7 @@ //! Defines a [MetricExporter] to send metric data to backend via OTLP protocol. //! +use crate::exporter::ExporterBuildError; #[cfg(any(feature = "http-proto", feature = "http-json", feature = "grpc-tonic"))] use crate::HasExportConfig; @@ -14,11 +15,8 @@ use crate::{exporter::tonic::TonicExporterBuilder, HasTonicConfig, TonicExporter use crate::NoExporterBuilderSet; -use async_trait::async_trait; use core::fmt; use opentelemetry_sdk::error::OTelSdkResult; -use opentelemetry_sdk::metrics::MetricResult; - use opentelemetry_sdk::metrics::{ data::ResourceMetrics, exporter::PushMetricExporter, Temporality, }; @@ -77,7 +75,7 @@ impl MetricExporterBuilder { #[cfg(feature = "grpc-tonic")] impl MetricExporterBuilder { - pub fn build(self) -> MetricResult { + pub fn build(self) -> Result { let exporter = self.client.0.build_metrics_exporter(self.temporality)?; opentelemetry::otel_debug!(name: "MetricExporterBuilt"); Ok(exporter) @@ -86,7 +84,7 @@ impl MetricExporterBuilder { #[cfg(any(feature = "http-proto", feature = "http-json"))] impl MetricExporterBuilder { - pub fn build(self) -> MetricResult { + pub fn build(self) -> Result { let exporter = self.client.0.build_metrics_exporter(self.temporality)?; Ok(exporter) } @@ -121,37 +119,56 @@ impl HasHttpConfig for MetricExporterBuilder { } /// An interface for OTLP metrics clients -#[async_trait] pub(crate) trait MetricsClient: fmt::Debug + Send + Sync + 'static { - async fn export(&self, metrics: &mut ResourceMetrics) -> OTelSdkResult; + fn export( + &self, + metrics: &mut ResourceMetrics, + ) -> impl std::future::Future + Send; fn shutdown(&self) -> OTelSdkResult; } /// Export metrics in OTEL format. pub struct MetricExporter { - client: Box, + client: SupportedTransportClient, temporality: Temporality, } +#[derive(Debug)] +enum SupportedTransportClient { + #[cfg(feature = "grpc-tonic")] + Tonic(crate::exporter::tonic::metrics::TonicMetricsClient), + #[cfg(any(feature = "http-proto", feature = "http-json"))] + Http(crate::exporter::http::OtlpHttpClient), +} + impl Debug for MetricExporter { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { f.debug_struct("MetricExporter").finish() } } -#[async_trait] impl PushMetricExporter for MetricExporter { async fn export(&self, metrics: &mut ResourceMetrics) -> OTelSdkResult { - self.client.export(metrics).await + match &self.client { + #[cfg(feature = "grpc-tonic")] + SupportedTransportClient::Tonic(client) => client.export(metrics).await, + #[cfg(any(feature = "http-proto", feature = "http-json"))] + SupportedTransportClient::Http(client) => client.export(metrics).await, + } } - async fn force_flush(&self) -> OTelSdkResult { + fn force_flush(&self) -> OTelSdkResult { // this component is stateless Ok(()) } fn shutdown(&self) -> OTelSdkResult { - self.client.shutdown() + match &self.client { + #[cfg(feature = "grpc-tonic")] + SupportedTransportClient::Tonic(client) => client.shutdown(), + #[cfg(any(feature = "http-proto", feature = "http-json"))] + SupportedTransportClient::Http(client) => client.shutdown(), + } } fn temporality(&self) -> Temporality { @@ -165,10 +182,24 @@ impl MetricExporter { MetricExporterBuilder::default() } - /// Create a new metrics exporter - pub(crate) fn new(client: impl MetricsClient, temporality: Temporality) -> MetricExporter { - MetricExporter { - client: Box::new(client), + #[cfg(feature = "grpc-tonic")] + pub(crate) fn from_tonic( + client: crate::exporter::tonic::metrics::TonicMetricsClient, + temporality: Temporality, + ) -> Self { + Self { + client: SupportedTransportClient::Tonic(client), + temporality, + } + } + + #[cfg(any(feature = "http-proto", feature = "http-json"))] + pub(crate) fn from_http( + client: crate::exporter::http::OtlpHttpClient, + temporality: Temporality, + ) -> Self { + Self { + client: SupportedTransportClient::Http(client), temporality, } } diff --git a/opentelemetry-otlp/src/span.rs b/opentelemetry-otlp/src/span.rs index e6a8b7fd71..c6af377874 100644 --- a/opentelemetry-otlp/src/span.rs +++ b/opentelemetry-otlp/src/span.rs @@ -20,7 +20,10 @@ use crate::{ HttpExporterBuilderSet, }; -use crate::{exporter::HasExportConfig, NoExporterBuilderSet}; +use crate::{ + exporter::{ExporterBuildError, HasExportConfig}, + NoExporterBuilderSet, +}; /// Target to which the exporter is going to send spans, defaults to https://localhost:4317/v1/traces. /// Learn about the relationship between this constant and default/metrics/logs at @@ -63,7 +66,7 @@ impl SpanExporterBuilder { #[cfg(feature = "grpc-tonic")] impl SpanExporterBuilder { - pub fn build(self) -> Result { + pub fn build(self) -> Result { let span_exporter = self.client.0.build_span_exporter()?; opentelemetry::otel_debug!(name: "SpanExporterBuilt"); Ok(SpanExporter::new(span_exporter)) @@ -72,7 +75,7 @@ impl SpanExporterBuilder { #[cfg(any(feature = "http-proto", feature = "http-json"))] impl SpanExporterBuilder { - pub fn build(self) -> Result { + pub fn build(self) -> Result { let span_exporter = self.client.0.build_span_exporter()?; Ok(SpanExporter::new(span_exporter)) } diff --git a/opentelemetry-otlp/tests/integration_test/src/metric_helpers.rs b/opentelemetry-otlp/tests/integration_test/src/metric_helpers.rs index 7368fa4f21..047ae81f69 100644 --- a/opentelemetry-otlp/tests/integration_test/src/metric_helpers.rs +++ b/opentelemetry-otlp/tests/integration_test/src/metric_helpers.rs @@ -3,7 +3,7 @@ use crate::test_utils; use anyhow::Result; use anyhow::{Context, Ok}; use opentelemetry_otlp::MetricExporter; -use opentelemetry_sdk::metrics::{MeterProviderBuilder, PeriodicReader, SdkMeterProvider}; +use opentelemetry_sdk::metrics::{MeterProviderBuilder, SdkMeterProvider}; use opentelemetry_sdk::Resource; use serde_json::Value; use std::fs; diff --git a/opentelemetry-otlp/tests/integration_test/tests/traces.rs b/opentelemetry-otlp/tests/integration_test/tests/traces.rs index 623894eeaa..dd2a4b0562 100644 --- a/opentelemetry-otlp/tests/integration_test/tests/traces.rs +++ b/opentelemetry-otlp/tests/integration_test/tests/traces.rs @@ -3,7 +3,7 @@ use std::{fs::File, os::unix::fs::MetadataExt}; use integration_test_runner::trace_asserter::{read_spans_from_json, TraceAsserter}; -use opentelemetry::trace::TraceError; +use opentelemetry_otlp::ExporterBuildError; use opentelemetry_otlp::SpanExporter; use anyhow::Result; @@ -11,7 +11,7 @@ use ctor::dtor; use integration_test_runner::test_utils; use opentelemetry_sdk::{trace as sdktrace, Resource}; -fn init_tracer_provider() -> Result { +fn init_tracer_provider() -> Result { let exporter_builder = SpanExporter::builder(); #[cfg(feature = "tonic-client")] let exporter_builder = exporter_builder.with_tonic(); diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index 1213d48a5b..64ebf81d7d 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -33,7 +33,7 @@ default = ["full"] full = ["gen-tonic", "trace", "logs", "metrics", "zpages", "with-serde", "internal-logs"] # crates used to generate rs files -gen-tonic = ["gen-tonic-messages", "tonic/transport"] +gen-tonic = ["gen-tonic-messages", "tonic/channel"] gen-tonic-messages = ["tonic", "prost"] # telemetry pillars and functions diff --git a/opentelemetry-sdk/CHANGELOG.md b/opentelemetry-sdk/CHANGELOG.md index 67e6143d25..2612ebb5f6 100644 --- a/opentelemetry-sdk/CHANGELOG.md +++ b/opentelemetry-sdk/CHANGELOG.md @@ -2,6 +2,16 @@ ## vNext +- *Breaking* Make `force_flush()` in `PushMetricExporter` synchronous + +- *Breaking (Affects custom Exporter/Processor/MetricReader authors only)* Removed +`opentelelemetry_sdk::logs::error::{LogError, LogResult}`. These were not +intended to be public. If you are authoring custom processor/exporters, use +`opentelemetry_sdk::error::OTelSdkError` and +`opentelemetry_sdk::error::OTelSdkResult`. +// PLACEHOLDER to fill in when the similar change is done for traces, metrics. +// PLACEHOLDER to put all the PR links together. + ## 0.28.0 Released 2025-Feb-10 diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 2d2e55ed7e..861638526d 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -13,7 +13,6 @@ rust-version = "1.75.0" opentelemetry = { version = "0.28", path = "../opentelemetry/" } opentelemetry-http = { version = "0.28", path = "../opentelemetry-http", optional = true } async-std = { workspace = true, features = ["unstable"], optional = true } -async-trait = { workspace = true, optional = true } futures-channel = "0.3" futures-executor = { workspace = true } futures-util = { workspace = true, features = ["std", "sink", "async-await-macro"] } @@ -47,7 +46,7 @@ trace = ["opentelemetry/trace", "rand", "percent-encoding"] jaeger_remote_sampler = ["trace", "opentelemetry-http", "http", "serde", "serde_json", "url"] logs = ["opentelemetry/logs", "serde_json"] spec_unstable_logs_enabled = ["logs", "opentelemetry/spec_unstable_logs_enabled"] -metrics = ["opentelemetry/metrics", "glob", "async-trait"] +metrics = ["opentelemetry/metrics", "glob"] testing = ["opentelemetry/testing", "trace", "metrics", "logs", "rt-async-std", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"] experimental_async_runtime = [] rt-tokio = ["tokio", "tokio-stream", "experimental_async_runtime"] diff --git a/opentelemetry-sdk/benches/log_exporter.rs b/opentelemetry-sdk/benches/log_exporter.rs index 0e2272d49d..5fd4ac40dc 100644 --- a/opentelemetry-sdk/benches/log_exporter.rs +++ b/opentelemetry-sdk/benches/log_exporter.rs @@ -14,7 +14,6 @@ use opentelemetry::time::now; use opentelemetry_sdk::error::OTelSdkResult; use std::sync::Mutex; -use async_trait::async_trait; use criterion::{criterion_group, criterion_main, Criterion}; use opentelemetry::logs::{LogRecord as _, Logger, LoggerProvider, Severity}; @@ -29,9 +28,8 @@ use std::fmt::Debug; // Run this benchmark with: // cargo bench --bench log_exporter -#[async_trait] pub trait LogExporterWithFuture: Send + Sync + Debug { - async fn export(&mut self, batch: LogBatch<'_>); + fn export(&mut self, batch: LogBatch<'_>) -> impl std::future::Future + Send; } pub trait LogExporterWithoutFuture: Send + Sync + Debug { @@ -41,7 +39,6 @@ pub trait LogExporterWithoutFuture: Send + Sync + Debug { #[derive(Debug)] struct NoOpExporterWithFuture {} -#[async_trait] impl LogExporterWithFuture for NoOpExporterWithFuture { async fn export(&mut self, _batch: LogBatch<'_>) {} } diff --git a/opentelemetry-sdk/src/logs/batch_log_processor.rs b/opentelemetry-sdk/src/logs/batch_log_processor.rs new file mode 100644 index 0000000000..77f2a82c4d --- /dev/null +++ b/opentelemetry-sdk/src/logs/batch_log_processor.rs @@ -0,0 +1,971 @@ +//! # OpenTelemetry Batch Log Processor +//! The `BatchLogProcessor` is one implementation of the `LogProcessor` interface. +//! +//! It buffers log records and sends them to the exporter +//! in batches. This processor is designed for **production use** in high-throughput +//! applications and reduces the overhead of frequent exports by using a background +//! thread for batch processing. +//! +//! ## Diagram +//! +//! ```ascii +//! +-----+---------------+ +-----------------------+ +-------------------+ +//! | | | | | | | +//! | SDK | Logger.emit() +---> (Batch)LogProcessor +---> (OTLPExporter) | +//! +-----+---------------+ +-----------------------+ +-------------------+ +//! ``` + +use crate::error::{OTelSdkError, OTelSdkResult}; +use crate::logs::log_processor::LogProcessor; +use crate::{ + logs::{LogBatch, LogExporter, SdkLogRecord}, + Resource, +}; +use std::sync::mpsc::{self, RecvTimeoutError, SyncSender}; + +use opentelemetry::{otel_debug, otel_error, otel_info, otel_warn, InstrumentationScope}; + +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use std::{cmp::min, env, sync::Mutex}; +use std::{ + fmt::{self, Debug, Formatter}, + str::FromStr, + sync::Arc, + thread, + time::Duration, + time::Instant, +}; + +/// Delay interval between two consecutive exports. +pub(crate) const OTEL_BLRP_SCHEDULE_DELAY: &str = "OTEL_BLRP_SCHEDULE_DELAY"; +/// Default delay interval between two consecutive exports. +pub(crate) const OTEL_BLRP_SCHEDULE_DELAY_DEFAULT: u64 = 1_000; +/// Maximum allowed time to export data. +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +pub(crate) const OTEL_BLRP_EXPORT_TIMEOUT: &str = "OTEL_BLRP_EXPORT_TIMEOUT"; +/// Default maximum allowed time to export data. +#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] +pub(crate) const OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT: u64 = 30_000; +/// Maximum queue size. +pub(crate) const OTEL_BLRP_MAX_QUEUE_SIZE: &str = "OTEL_BLRP_MAX_QUEUE_SIZE"; +/// Default maximum queue size. +pub(crate) const OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT: usize = 2_048; +/// Maximum batch size, must be less than or equal to OTEL_BLRP_MAX_QUEUE_SIZE. +pub(crate) const OTEL_BLRP_MAX_EXPORT_BATCH_SIZE: &str = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"; +/// Default maximum batch size. +pub(crate) const OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT: usize = 512; + +/// Messages sent between application thread and batch log processor's work thread. +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +enum BatchMessage { + /// This is ONLY sent when the number of logs records in the data channel has reached `max_export_batch_size`. + ExportLog(Arc), + /// ForceFlush flushes the current buffer to the exporter. + ForceFlush(mpsc::SyncSender), + /// Shut down the worker thread, push all logs in buffer to the exporter. + Shutdown(mpsc::SyncSender), + /// Set the resource for the exporter. + SetResource(Arc), +} + +type LogsData = Box<(SdkLogRecord, InstrumentationScope)>; + +/// The `BatchLogProcessor` collects finished logs in a buffer and exports them +/// in batches to the configured `LogExporter`. This processor is ideal for +/// high-throughput environments, as it minimizes the overhead of exporting logs +/// individually. It uses a **dedicated background thread** to manage and export logs +/// asynchronously, ensuring that the application's main execution flow is not blocked. +/// +/// This processor supports the following configurations: +/// - **Queue size**: Maximum number of log records that can be buffered. +/// - **Batch size**: Maximum number of log records to include in a single export. +/// - **Scheduled delay**: Frequency at which the batch is exported. +/// +/// When using this processor with the OTLP Exporter, the following exporter +/// features are supported: +/// - `grpc-tonic`: Requires `LoggerProvider` to be created within a tokio runtime. +/// - `reqwest-blocking-client`: Works with a regular `main` or `tokio::main`. +/// +/// In other words, other clients like `reqwest` and `hyper` are not supported. +/// +/// `BatchLogProcessor` buffers logs in memory and exports them in batches. An +/// export is triggered when `max_export_batch_size` is reached or every +/// `scheduled_delay` milliseconds. Users can explicitly trigger an export using +/// the `force_flush` method. Shutdown also triggers an export of all buffered +/// logs and is recommended to be called before the application exits to ensure +/// all buffered logs are exported. +/// +/// **Warning**: When using tokio's current-thread runtime, `shutdown()`, which +/// is a blocking call ,should not be called from your main thread. This can +/// cause deadlock. Instead, call `shutdown()` from a separate thread or use +/// tokio's `spawn_blocking`. +/// +/// [`shutdown()`]: crate::logs::LoggerProvider::shutdown +/// [`force_flush()`]: crate::logs::LoggerProvider::force_flush +/// +/// ### Using a BatchLogProcessor: +/// +/// ```rust +/// use opentelemetry_sdk::logs::{BatchLogProcessor, BatchConfigBuilder, LoggerProvider}; +/// use opentelemetry::global; +/// use std::time::Duration; +/// use opentelemetry_sdk::logs::InMemoryLogExporter; +/// +/// let exporter = InMemoryLogExporter::default(); // Replace with an actual exporter +/// let processor = BatchLogProcessor::builder(exporter) +/// .with_batch_config( +/// BatchConfigBuilder::default() +/// .with_max_queue_size(2048) +/// .with_max_export_batch_size(512) +/// .with_scheduled_delay(Duration::from_secs(5)) +/// .build(), +/// ) +/// .build(); +/// +/// let provider = LoggerProvider::builder() +/// .with_log_processor(processor) +/// .build(); +/// +pub struct BatchLogProcessor { + logs_sender: SyncSender, // Data channel to store log records and instrumentation scopes + message_sender: SyncSender, // Control channel to store control messages for the worker thread + handle: Mutex>>, + forceflush_timeout: Duration, + shutdown_timeout: Duration, + export_log_message_sent: Arc, + current_batch_size: Arc, + max_export_batch_size: usize, + + // Track dropped logs - we'll log this at shutdown + dropped_logs_count: AtomicUsize, + + // Track the maximum queue size that was configured for this processor + max_queue_size: usize, +} + +impl Debug for BatchLogProcessor { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("BatchLogProcessor") + .field("message_sender", &self.message_sender) + .finish() + } +} + +impl LogProcessor for BatchLogProcessor { + fn emit(&self, record: &mut SdkLogRecord, instrumentation: &InstrumentationScope) { + let result = self + .logs_sender + .try_send(Box::new((record.clone(), instrumentation.clone()))); + + // match for result and handle each separately + match result { + Ok(_) => { + // Successfully sent the log record to the data channel. + // Increment the current batch size and check if it has reached + // the max export batch size. + if self.current_batch_size.fetch_add(1, Ordering::Relaxed) + 1 + >= self.max_export_batch_size + { + // Check if the a control message for exporting logs is + // already sent to the worker thread. If not, send a control + // message to export logs. `export_log_message_sent` is set + // to false ONLY when the worker thread has processed the + // control message. + + if !self.export_log_message_sent.load(Ordering::Relaxed) { + // This is a cost-efficient check as atomic load + // operations do not require exclusive access to cache + // line. Perform atomic swap to + // `export_log_message_sent` ONLY when the atomic load + // operation above returns false. Atomic + // swap/compare_exchange operations require exclusive + // access to cache line on most processor architectures. + // We could have used compare_exchange as well here, but + // it's more verbose than swap. + if !self.export_log_message_sent.swap(true, Ordering::Relaxed) { + match self.message_sender.try_send(BatchMessage::ExportLog( + self.export_log_message_sent.clone(), + )) { + Ok(_) => { + // Control message sent successfully. + } + Err(_err) => { + // TODO: Log error If the control message + // could not be sent, reset the + // `export_log_message_sent` flag. + self.export_log_message_sent.store(false, Ordering::Relaxed); + } + } + } + } + } + } + Err(mpsc::TrySendError::Full(_)) => { + // Increment dropped logs count. The first time we have to drop + // a log, emit a warning. + if self.dropped_logs_count.fetch_add(1, Ordering::Relaxed) == 0 { + otel_warn!(name: "BatchLogProcessor.LogDroppingStarted", + message = "BatchLogProcessor dropped a LogRecord due to queue full. No further log will be emitted for further drops until Shutdown. During Shutdown time, a log will be emitted with exact count of total logs dropped."); + } + } + Err(mpsc::TrySendError::Disconnected(_)) => { + // Given background thread is the only receiver, and it's + // disconnected, it indicates the thread is shutdown + otel_warn!( + name: "BatchLogProcessor.Emit.AfterShutdown", + message = "Logs are being emitted even after Shutdown. This indicates incorrect lifecycle management of OTelLoggerProvider in application. Logs will not be exported." + ); + } + } + } + + fn force_flush(&self) -> OTelSdkResult { + let (sender, receiver) = mpsc::sync_channel(1); + match self + .message_sender + .try_send(BatchMessage::ForceFlush(sender)) + { + Ok(_) => receiver + .recv_timeout(self.forceflush_timeout) + .map_err(|err| { + if err == RecvTimeoutError::Timeout { + OTelSdkError::Timeout(self.forceflush_timeout) + } else { + OTelSdkError::InternalFailure(format!("{}", err)) + } + })?, + Err(mpsc::TrySendError::Full(_)) => { + // If the control message could not be sent, emit a warning. + otel_debug!( + name: "BatchLogProcessor.ForceFlush.ControlChannelFull", + message = "Control message to flush the worker thread could not be sent as the control channel is full. This can occur if user repeatedily calls force_flush/shutdown without finishing the previous call." + ); + Err(OTelSdkError::InternalFailure("ForceFlush cannot be performed as Control channel is full. This can occur if user repeatedily calls force_flush/shutdown without finishing the previous call.".into())) + } + Err(mpsc::TrySendError::Disconnected(_)) => { + // Given background thread is the only receiver, and it's + // disconnected, it indicates the thread is shutdown + otel_debug!( + name: "BatchLogProcessor.ForceFlush.AlreadyShutdown", + message = "ForceFlush invoked after Shutdown. This will not perform Flush and indicates a incorrect lifecycle management in Application." + ); + + Err(OTelSdkError::AlreadyShutdown) + } + } + } + + fn shutdown(&self) -> OTelSdkResult { + let dropped_logs = self.dropped_logs_count.load(Ordering::Relaxed); + let max_queue_size = self.max_queue_size; + if dropped_logs > 0 { + otel_warn!( + name: "BatchLogProcessor.LogsDropped", + dropped_logs_count = dropped_logs, + max_queue_size = max_queue_size, + message = "Logs were dropped due to a queue being full. The count represents the total count of log records dropped in the lifetime of this BatchLogProcessor. Consider increasing the queue size and/or decrease delay between intervals." + ); + } + + let (sender, receiver) = mpsc::sync_channel(1); + match self.message_sender.try_send(BatchMessage::Shutdown(sender)) { + Ok(_) => { + receiver + .recv_timeout(self.shutdown_timeout) + .map(|_| { + // join the background thread after receiving back the + // shutdown signal + if let Some(handle) = self.handle.lock().unwrap().take() { + handle.join().unwrap(); + } + OTelSdkResult::Ok(()) + }) + .map_err(|err| match err { + RecvTimeoutError::Timeout => { + otel_error!( + name: "BatchLogProcessor.Shutdown.Timeout", + message = "BatchLogProcessor shutdown timing out." + ); + OTelSdkError::Timeout(self.shutdown_timeout) + } + _ => { + otel_error!( + name: "BatchLogProcessor.Shutdown.Error", + error = format!("{}", err) + ); + OTelSdkError::InternalFailure(format!("{}", err)) + } + })? + } + Err(mpsc::TrySendError::Full(_)) => { + // If the control message could not be sent, emit a warning. + otel_debug!( + name: "BatchLogProcessor.Shutdown.ControlChannelFull", + message = "Control message to shutdown the worker thread could not be sent as the control channel is full. This can occur if user repeatedily calls force_flush/shutdown without finishing the previous call." + ); + Err(OTelSdkError::InternalFailure("Shutdown cannot be performed as Control channel is full. This can occur if user repeatedily calls force_flush/shutdown without finishing the previous call.".into())) + } + Err(mpsc::TrySendError::Disconnected(_)) => { + // Given background thread is the only receiver, and it's + // disconnected, it indicates the thread is shutdown + otel_debug!( + name: "BatchLogProcessor.Shutdown.AlreadyShutdown", + message = "Shutdown is being invoked more than once. This is noop, but indicates a potential issue in the application's lifecycle management." + ); + + Err(OTelSdkError::AlreadyShutdown) + } + } + } + + fn set_resource(&self, resource: &Resource) { + let resource = Arc::new(resource.clone()); + let _ = self + .message_sender + .try_send(BatchMessage::SetResource(resource)); + } +} + +impl BatchLogProcessor { + pub(crate) fn new(mut exporter: E, config: BatchConfig) -> Self + where + E: LogExporter + Send + Sync + 'static, + { + let (logs_sender, logs_receiver) = mpsc::sync_channel::(config.max_queue_size); + let (message_sender, message_receiver) = mpsc::sync_channel::(64); // Is this a reasonable bound? + let max_queue_size = config.max_queue_size; + let max_export_batch_size = config.max_export_batch_size; + let current_batch_size = Arc::new(AtomicUsize::new(0)); + let current_batch_size_for_thread = current_batch_size.clone(); + + let handle = thread::Builder::new() + .name("OpenTelemetry.Logs.BatchProcessor".to_string()) + .spawn(move || { + otel_info!( + name: "BatchLogProcessor.ThreadStarted", + interval_in_millisecs = config.scheduled_delay.as_millis(), + max_export_batch_size = config.max_export_batch_size, + max_queue_size = max_queue_size, + ); + let mut last_export_time = Instant::now(); + let mut logs = Vec::with_capacity(config.max_export_batch_size); + let current_batch_size = current_batch_size_for_thread; + + // This method gets up to `max_export_batch_size` amount of logs from the channel and exports them. + // It returns the result of the export operation. + // It expects the logs vec to be empty when it's called. + #[inline] + fn get_logs_and_export( + logs_receiver: &mpsc::Receiver, + exporter: &E, + logs: &mut Vec, + last_export_time: &mut Instant, + current_batch_size: &AtomicUsize, + config: &BatchConfig, + ) -> OTelSdkResult + where + E: LogExporter + Send + Sync + 'static, + { + let target = current_batch_size.load(Ordering::Relaxed); // `target` is used to determine the stopping criteria for exporting logs. + let mut result = OTelSdkResult::Ok(()); + let mut total_exported_logs: usize = 0; + + while target > 0 && total_exported_logs < target { + // Get upto `max_export_batch_size` amount of logs log records from the channel and push them to the logs vec + while let Ok(log) = logs_receiver.try_recv() { + logs.push(log); + if logs.len() == config.max_export_batch_size { + break; + } + } + + let count_of_logs = logs.len(); // Count of logs that will be exported + total_exported_logs += count_of_logs; + + result = export_batch_sync(exporter, logs, last_export_time); // This method clears the logs vec after exporting + + current_batch_size.fetch_sub(count_of_logs, Ordering::Relaxed); + } + result + } + + loop { + let remaining_time = config + .scheduled_delay + .checked_sub(last_export_time.elapsed()) + .unwrap_or(config.scheduled_delay); + + match message_receiver.recv_timeout(remaining_time) { + Ok(BatchMessage::ExportLog(export_log_message_sent)) => { + // Reset the export log message sent flag now it has has been processed. + export_log_message_sent.store(false, Ordering::Relaxed); + + otel_debug!( + name: "BatchLogProcessor.ExportingDueToBatchSize", + ); + + let _ = get_logs_and_export( + &logs_receiver, + &exporter, + &mut logs, + &mut last_export_time, + ¤t_batch_size, + &config, + ); + } + Ok(BatchMessage::ForceFlush(sender)) => { + otel_debug!(name: "BatchLogProcessor.ExportingDueToForceFlush"); + let result = get_logs_and_export( + &logs_receiver, + &exporter, + &mut logs, + &mut last_export_time, + ¤t_batch_size, + &config, + ); + let _ = sender.send(result); + } + Ok(BatchMessage::Shutdown(sender)) => { + otel_debug!(name: "BatchLogProcessor.ExportingDueToShutdown"); + let result = get_logs_and_export( + &logs_receiver, + &exporter, + &mut logs, + &mut last_export_time, + ¤t_batch_size, + &config, + ); + let _ = sender.send(result); + + otel_debug!( + name: "BatchLogProcessor.ThreadExiting", + reason = "ShutdownRequested" + ); + // + // break out the loop and return from the current background thread. + // + break; + } + Ok(BatchMessage::SetResource(resource)) => { + exporter.set_resource(&resource); + } + Err(RecvTimeoutError::Timeout) => { + otel_debug!( + name: "BatchLogProcessor.ExportingDueToTimer", + ); + + let _ = get_logs_and_export( + &logs_receiver, + &exporter, + &mut logs, + &mut last_export_time, + ¤t_batch_size, + &config, + ); + } + Err(RecvTimeoutError::Disconnected) => { + // Channel disconnected, only thing to do is break + // out (i.e exit the thread) + otel_debug!( + name: "BatchLogProcessor.ThreadExiting", + reason = "MessageSenderDisconnected" + ); + break; + } + } + } + otel_info!( + name: "BatchLogProcessor.ThreadStopped" + ); + }) + .expect("Thread spawn failed."); //TODO: Handle thread spawn failure + + // Return batch processor with link to worker + BatchLogProcessor { + logs_sender, + message_sender, + handle: Mutex::new(Some(handle)), + forceflush_timeout: Duration::from_secs(5), // TODO: make this configurable + shutdown_timeout: Duration::from_secs(5), // TODO: make this configurable + dropped_logs_count: AtomicUsize::new(0), + max_queue_size, + export_log_message_sent: Arc::new(AtomicBool::new(false)), + current_batch_size, + max_export_batch_size, + } + } + + /// Create a new batch processor builder + pub fn builder(exporter: E) -> BatchLogProcessorBuilder + where + E: LogExporter, + { + BatchLogProcessorBuilder { + exporter, + config: Default::default(), + } + } +} + +#[allow(clippy::vec_box)] +fn export_batch_sync( + exporter: &E, + batch: &mut Vec>, + last_export_time: &mut Instant, +) -> OTelSdkResult +where + E: LogExporter + ?Sized, +{ + *last_export_time = Instant::now(); + + if batch.is_empty() { + return OTelSdkResult::Ok(()); + } + + let export = exporter.export(LogBatch::new_with_owned_data(batch.as_slice())); + let export_result = futures_executor::block_on(export); + + // Clear the batch vec after exporting + batch.clear(); + + match export_result { + Ok(_) => OTelSdkResult::Ok(()), + Err(err) => { + otel_error!( + name: "BatchLogProcessor.ExportError", + error = format!("{}", err) + ); + OTelSdkResult::Err(err) + } + } +} + +/// +/// A builder for creating [`BatchLogProcessor`] instances. +/// +#[derive(Debug)] +pub struct BatchLogProcessorBuilder { + exporter: E, + config: BatchConfig, +} + +impl BatchLogProcessorBuilder +where + E: LogExporter + 'static, +{ + /// Set the BatchConfig for [`BatchLogProcessorBuilder`] + pub fn with_batch_config(self, config: BatchConfig) -> Self { + BatchLogProcessorBuilder { config, ..self } + } + + /// Build a batch processor + pub fn build(self) -> BatchLogProcessor { + BatchLogProcessor::new(self.exporter, self.config) + } +} + +/// Batch log processor configuration. +/// Use [`BatchConfigBuilder`] to configure your own instance of [`BatchConfig`]. +#[derive(Debug)] +#[allow(dead_code)] +pub struct BatchConfig { + /// The maximum queue size to buffer logs for delayed processing. If the + /// queue gets full it drops the logs. The default value of is 2048. + pub(crate) max_queue_size: usize, + + /// The delay interval in milliseconds between two consecutive processing + /// of batches. The default value is 1 second. + pub(crate) scheduled_delay: Duration, + + /// The maximum number of logs to process in a single batch. If there are + /// more than one batch worth of logs then it processes multiple batches + /// of logs one batch after the other without any delay. The default value + /// is 512. + pub(crate) max_export_batch_size: usize, + + /// The maximum duration to export a batch of data. + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + pub(crate) max_export_timeout: Duration, +} + +impl Default for BatchConfig { + fn default() -> Self { + BatchConfigBuilder::default().build() + } +} + +/// A builder for creating [`BatchConfig`] instances. +#[derive(Debug)] +pub struct BatchConfigBuilder { + max_queue_size: usize, + scheduled_delay: Duration, + max_export_batch_size: usize, + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + max_export_timeout: Duration, +} + +impl Default for BatchConfigBuilder { + /// Create a new [`BatchConfigBuilder`] initialized with default batch config values as per the specs. + /// The values are overridden by environment variables if set. + /// The supported environment variables are: + /// * `OTEL_BLRP_MAX_QUEUE_SIZE` + /// * `OTEL_BLRP_SCHEDULE_DELAY` + /// * `OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` + /// * `OTEL_BLRP_EXPORT_TIMEOUT` + fn default() -> Self { + BatchConfigBuilder { + max_queue_size: OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, + scheduled_delay: Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT), + max_export_batch_size: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + max_export_timeout: Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT), + } + .init_from_env_vars() + } +} + +impl BatchConfigBuilder { + /// Set max_queue_size for [`BatchConfigBuilder`]. + /// It's the maximum queue size to buffer logs for delayed processing. + /// If the queue gets full it will drop the logs. + /// The default value of is 2048. + pub fn with_max_queue_size(mut self, max_queue_size: usize) -> Self { + self.max_queue_size = max_queue_size; + self + } + + /// Set scheduled_delay for [`BatchConfigBuilder`]. + /// It's the delay interval in milliseconds between two consecutive processing of batches. + /// The default value is 1000 milliseconds. + pub fn with_scheduled_delay(mut self, scheduled_delay: Duration) -> Self { + self.scheduled_delay = scheduled_delay; + self + } + + /// Set max_export_timeout for [`BatchConfigBuilder`]. + /// It's the maximum duration to export a batch of data. + /// The default value is 30000 milliseconds. + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + pub fn with_max_export_timeout(mut self, max_export_timeout: Duration) -> Self { + self.max_export_timeout = max_export_timeout; + self + } + + /// Set max_export_batch_size for [`BatchConfigBuilder`]. + /// It's the maximum number of logs to process in a single batch. If there are + /// more than one batch worth of logs then it processes multiple batches + /// of logs one batch after the other without any delay. + /// The default value is 512. + pub fn with_max_export_batch_size(mut self, max_export_batch_size: usize) -> Self { + self.max_export_batch_size = max_export_batch_size; + self + } + + /// Builds a `BatchConfig` enforcing the following invariants: + /// * `max_export_batch_size` must be less than or equal to `max_queue_size`. + pub fn build(self) -> BatchConfig { + // max export batch size must be less or equal to max queue size. + // we set max export batch size to max queue size if it's larger than max queue size. + let max_export_batch_size = min(self.max_export_batch_size, self.max_queue_size); + + BatchConfig { + max_queue_size: self.max_queue_size, + scheduled_delay: self.scheduled_delay, + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + max_export_timeout: self.max_export_timeout, + max_export_batch_size, + } + } + + fn init_from_env_vars(mut self) -> Self { + if let Some(max_queue_size) = env::var(OTEL_BLRP_MAX_QUEUE_SIZE) + .ok() + .and_then(|queue_size| usize::from_str(&queue_size).ok()) + { + self.max_queue_size = max_queue_size; + } + + if let Some(max_export_batch_size) = env::var(OTEL_BLRP_MAX_EXPORT_BATCH_SIZE) + .ok() + .and_then(|batch_size| usize::from_str(&batch_size).ok()) + { + self.max_export_batch_size = max_export_batch_size; + } + + if let Some(scheduled_delay) = env::var(OTEL_BLRP_SCHEDULE_DELAY) + .ok() + .and_then(|delay| u64::from_str(&delay).ok()) + { + self.scheduled_delay = Duration::from_millis(scheduled_delay); + } + + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + if let Some(max_export_timeout) = env::var(OTEL_BLRP_EXPORT_TIMEOUT) + .ok() + .and_then(|s| u64::from_str(&s).ok()) + { + self.max_export_timeout = Duration::from_millis(max_export_timeout); + } + + self + } +} + +#[cfg(all(test, feature = "testing", feature = "logs"))] +mod tests { + use super::{ + BatchConfig, BatchConfigBuilder, BatchLogProcessor, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, OTEL_BLRP_MAX_QUEUE_SIZE, + OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BLRP_SCHEDULE_DELAY, + OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, + }; + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + use super::{OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT}; + use crate::logs::log_processor::tests::MockLogExporter; + use crate::logs::SdkLogRecord; + use crate::{ + logs::{InMemoryLogExporter, InMemoryLogExporterBuilder, LogProcessor, SdkLoggerProvider}, + Resource, + }; + use opentelemetry::InstrumentationScope; + use opentelemetry::KeyValue; + use std::sync::{Arc, Mutex}; + use std::time::Duration; + + #[test] + fn test_default_const_values() { + assert_eq!(OTEL_BLRP_SCHEDULE_DELAY, "OTEL_BLRP_SCHEDULE_DELAY"); + assert_eq!(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, 1_000); + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + assert_eq!(OTEL_BLRP_EXPORT_TIMEOUT, "OTEL_BLRP_EXPORT_TIMEOUT"); + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + assert_eq!(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, 30_000); + assert_eq!(OTEL_BLRP_MAX_QUEUE_SIZE, "OTEL_BLRP_MAX_QUEUE_SIZE"); + assert_eq!(OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, 2_048); + assert_eq!( + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, + "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE" + ); + assert_eq!(OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, 512); + } + + #[test] + fn test_default_batch_config_adheres_to_specification() { + // The following environment variables are expected to be unset so that their default values are used. + let env_vars = vec![ + OTEL_BLRP_SCHEDULE_DELAY, + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + OTEL_BLRP_EXPORT_TIMEOUT, + OTEL_BLRP_MAX_QUEUE_SIZE, + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, + ]; + + let config = temp_env::with_vars_unset(env_vars, BatchConfig::default); + + assert_eq!( + config.scheduled_delay, + Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) + ); + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + assert_eq!( + config.max_export_timeout, + Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT) + ); + assert_eq!(config.max_queue_size, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT); + assert_eq!( + config.max_export_batch_size, + OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT + ); + } + + #[test] + fn test_batch_config_configurable_by_env_vars() { + let env_vars = vec![ + (OTEL_BLRP_SCHEDULE_DELAY, Some("2000")), + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + (OTEL_BLRP_EXPORT_TIMEOUT, Some("60000")), + (OTEL_BLRP_MAX_QUEUE_SIZE, Some("4096")), + (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("1024")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.scheduled_delay, Duration::from_millis(2000)); + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + assert_eq!(config.max_export_timeout, Duration::from_millis(60000)); + assert_eq!(config.max_queue_size, 4096); + assert_eq!(config.max_export_batch_size, 1024); + } + + #[test] + fn test_batch_config_max_export_batch_size_validation() { + let env_vars = vec![ + (OTEL_BLRP_MAX_QUEUE_SIZE, Some("256")), + (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("1024")), + ]; + + let config = temp_env::with_vars(env_vars, BatchConfig::default); + + assert_eq!(config.max_queue_size, 256); + assert_eq!(config.max_export_batch_size, 256); + assert_eq!( + config.scheduled_delay, + Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) + ); + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + assert_eq!( + config.max_export_timeout, + Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT) + ); + } + + #[test] + fn test_batch_config_with_fields() { + let batch_builder = BatchConfigBuilder::default() + .with_max_export_batch_size(1) + .with_scheduled_delay(Duration::from_millis(2)) + .with_max_queue_size(4); + + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + let batch_builder = batch_builder.with_max_export_timeout(Duration::from_millis(3)); + let batch = batch_builder.build(); + + assert_eq!(batch.max_export_batch_size, 1); + assert_eq!(batch.scheduled_delay, Duration::from_millis(2)); + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + assert_eq!(batch.max_export_timeout, Duration::from_millis(3)); + assert_eq!(batch.max_queue_size, 4); + } + + #[test] + fn test_build_batch_log_processor_builder() { + let mut env_vars = vec![ + (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("500")), + (OTEL_BLRP_SCHEDULE_DELAY, Some("I am not number")), + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + (OTEL_BLRP_EXPORT_TIMEOUT, Some("2046")), + ]; + temp_env::with_vars(env_vars.clone(), || { + let builder = BatchLogProcessor::builder(InMemoryLogExporter::default()); + + assert_eq!(builder.config.max_export_batch_size, 500); + assert_eq!( + builder.config.scheduled_delay, + Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) + ); + assert_eq!( + builder.config.max_queue_size, + OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT + ); + + #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] + assert_eq!( + builder.config.max_export_timeout, + Duration::from_millis(2046) + ); + }); + + env_vars.push((OTEL_BLRP_MAX_QUEUE_SIZE, Some("120"))); + + temp_env::with_vars(env_vars, || { + let builder = BatchLogProcessor::builder(InMemoryLogExporter::default()); + assert_eq!(builder.config.max_export_batch_size, 120); + assert_eq!(builder.config.max_queue_size, 120); + }); + } + + #[test] + fn test_build_batch_log_processor_builder_with_custom_config() { + let expected = BatchConfigBuilder::default() + .with_max_export_batch_size(1) + .with_scheduled_delay(Duration::from_millis(2)) + .with_max_queue_size(4) + .build(); + + let builder = + BatchLogProcessor::builder(InMemoryLogExporter::default()).with_batch_config(expected); + + let actual = &builder.config; + assert_eq!(actual.max_export_batch_size, 1); + assert_eq!(actual.scheduled_delay, Duration::from_millis(2)); + assert_eq!(actual.max_queue_size, 4); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn test_set_resource_batch_processor() { + let exporter = MockLogExporter { + resource: Arc::new(Mutex::new(None)), + }; + let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); + let provider = SdkLoggerProvider::builder() + .with_log_processor(processor) + .with_resource( + Resource::builder_empty() + .with_attributes([ + KeyValue::new("k1", "v1"), + KeyValue::new("k2", "v3"), + KeyValue::new("k3", "v3"), + KeyValue::new("k4", "v4"), + KeyValue::new("k5", "v5"), + ]) + .build(), + ) + .build(); + + // wait for the batch processor to process the resource. + tokio::time::sleep(Duration::from_millis(100)).await; + + assert_eq!(exporter.get_resource().unwrap().into_iter().count(), 5); + let _ = provider.shutdown(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_batch_shutdown() { + // assert we will receive an error + // setup + let exporter = InMemoryLogExporterBuilder::default() + .keep_records_on_shutdown() + .build(); + let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); + + let mut record = SdkLogRecord::new(); + let instrumentation = InstrumentationScope::default(); + + processor.emit(&mut record, &instrumentation); + processor.force_flush().unwrap(); + processor.shutdown().unwrap(); + // todo: expect to see errors here. How should we assert this? + processor.emit(&mut record, &instrumentation); + assert_eq!(1, exporter.get_emitted_logs().unwrap().len()) + } + + #[tokio::test(flavor = "current_thread")] + async fn test_batch_log_processor_shutdown_under_async_runtime_current_flavor_multi_thread() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); + + processor.shutdown().unwrap(); + } + + #[tokio::test(flavor = "current_thread")] + async fn test_batch_log_processor_shutdown_with_async_runtime_current_flavor_current_thread() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); + processor.shutdown().unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_batch_log_processor_shutdown_with_async_runtime_multi_flavor_multi_thread() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); + processor.shutdown().unwrap(); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_batch_log_processor_shutdown_with_async_runtime_multi_flavor_current_thread() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); + processor.shutdown().unwrap(); + } +} diff --git a/opentelemetry-sdk/src/logs/error.rs b/opentelemetry-sdk/src/logs/error.rs deleted file mode 100644 index d9b1b42157..0000000000 --- a/opentelemetry-sdk/src/logs/error.rs +++ /dev/null @@ -1,63 +0,0 @@ -use crate::ExportError; - -use std::{sync::PoisonError, time::Duration}; -use thiserror::Error; - -/// Describe the result of operations in log SDK. -pub type LogResult = Result; - -#[derive(Error, Debug)] -#[non_exhaustive] -/// Errors returned by the log SDK. -pub enum LogError { - /// Export failed with the error returned by the exporter. - #[error("Exporter {0} encountered the following errors: {name}", name = .0.exporter_name())] - ExportFailed(Box), - - /// Export failed to finish after certain period and processor stopped the export. - #[error("Exporter timed out after {} seconds", .0.as_secs())] - ExportTimedOut(Duration), - - /// Processor is already shutdown - #[error("{0} already shutdown")] - AlreadyShutdown(String), - - /// Mutex lock poisoning - #[error("mutex lock poisioning for {0}")] - MutexPoisoned(String), - - /// Other errors propagated from log SDK that weren't covered above. - #[error(transparent)] - Other(#[from] Box), -} - -impl From for LogError -where - T: ExportError, -{ - fn from(err: T) -> Self { - LogError::ExportFailed(Box::new(err)) - } -} - -impl From for LogError { - fn from(err_msg: String) -> Self { - LogError::Other(Box::new(Custom(err_msg))) - } -} - -impl From<&'static str> for LogError { - fn from(err_msg: &'static str) -> Self { - LogError::Other(Box::new(Custom(err_msg.into()))) - } -} - -impl From> for LogError { - fn from(err: PoisonError) -> Self { - LogError::Other(err.to_string().into()) - } -} -/// Wrap type for string -#[derive(Error, Debug)] -#[error("{0}")] -struct Custom(String); diff --git a/opentelemetry-sdk/src/logs/in_memory_exporter.rs b/opentelemetry-sdk/src/logs/in_memory_exporter.rs index ab757f200e..278201de40 100644 --- a/opentelemetry-sdk/src/logs/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/logs/in_memory_exporter.rs @@ -188,24 +188,18 @@ impl InMemoryLogExporter { } impl LogExporter for InMemoryLogExporter { - #[allow(clippy::manual_async_fn)] - fn export( - &self, - batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - async move { - let mut logs_guard = self.logs.lock().map_err(|e| { - OTelSdkError::InternalFailure(format!("Failed to lock logs for export: {}", e)) - })?; - for (log_record, instrumentation) in batch.iter() { - let owned_log = OwnedLogData { - record: (*log_record).clone(), - instrumentation: (*instrumentation).clone(), - }; - logs_guard.push(owned_log); - } - Ok(()) + async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult { + let mut logs_guard = self.logs.lock().map_err(|e| { + OTelSdkError::InternalFailure(format!("Failed to lock logs for export: {}", e)) + })?; + for (log_record, instrumentation) in batch.iter() { + let owned_log = OwnedLogData { + record: (*log_record).clone(), + instrumentation: (*instrumentation).clone(), + }; + logs_guard.push(owned_log); } + Ok(()) } fn shutdown(&mut self) -> OTelSdkResult { diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs index a74602e729..06cad3c207 100644 --- a/opentelemetry-sdk/src/logs/log_processor.rs +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -10,16 +10,11 @@ //! //! ## Types of Log Processors //! -//! - **SimpleLogProcessor**: Forwards log records to the exporter immediately -//! after they are emitted. This processor is **synchronous** and is designed -//! for debugging or testing purposes. It is **not suitable for production** -//! environments due to its lack of batching, performance optimizations, or support -//! for high-throughput scenarios. +//! There are currently two types of log processors available in the SDK: +//! - **SimpleLogProcessor**: Forwards log records to the exporter immediately. +//! - **BatchLogProcessor**: Buffers log records and sends them to the exporter in batches. //! -//! - **BatchLogProcessor**: Buffers log records and sends them to the exporter -//! in batches. This processor is designed for **production use** in high-throughput -//! applications and reduces the overhead of frequent exports by using a background -//! thread for batch processing. +//! For more information, see simple_log_processor.rs and batch_log_processor.rs. //! //! ## Diagram //! @@ -31,46 +26,14 @@ //! +-----+---------------+ +-----------------------+ +-------------------+ //! ``` -use crate::error::{OTelSdkError, OTelSdkResult}; -use crate::{ - logs::{LogBatch, LogExporter, SdkLogRecord}, - Resource, -}; -use std::sync::mpsc::{self, RecvTimeoutError, SyncSender}; +use crate::error::OTelSdkResult; +use crate::{logs::SdkLogRecord, Resource}; #[cfg(feature = "spec_unstable_logs_enabled")] use opentelemetry::logs::Severity; -use opentelemetry::{otel_debug, otel_error, otel_info, otel_warn, InstrumentationScope}; +use opentelemetry::InstrumentationScope; -use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; -use std::{cmp::min, env, sync::Mutex}; -use std::{ - fmt::{self, Debug, Formatter}, - str::FromStr, - sync::Arc, - thread, - time::Duration, - time::Instant, -}; - -/// Delay interval between two consecutive exports. -pub(crate) const OTEL_BLRP_SCHEDULE_DELAY: &str = "OTEL_BLRP_SCHEDULE_DELAY"; -/// Default delay interval between two consecutive exports. -pub(crate) const OTEL_BLRP_SCHEDULE_DELAY_DEFAULT: u64 = 1_000; -/// Maximum allowed time to export data. -#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] -pub(crate) const OTEL_BLRP_EXPORT_TIMEOUT: &str = "OTEL_BLRP_EXPORT_TIMEOUT"; -/// Default maximum allowed time to export data. -#[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] -pub(crate) const OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT: u64 = 30_000; -/// Maximum queue size. -pub(crate) const OTEL_BLRP_MAX_QUEUE_SIZE: &str = "OTEL_BLRP_MAX_QUEUE_SIZE"; -/// Default maximum queue size. -pub(crate) const OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT: usize = 2_048; -/// Maximum batch size, must be less than or equal to OTEL_BLRP_MAX_QUEUE_SIZE. -pub(crate) const OTEL_BLRP_MAX_EXPORT_BATCH_SIZE: &str = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"; -/// Default maximum batch size. -pub(crate) const OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT: usize = 512; +use std::fmt::Debug; /// The interface for plugging into a [`SdkLogger`]. /// @@ -105,799 +68,23 @@ pub trait LogProcessor: Send + Sync + Debug { fn set_resource(&self, _resource: &Resource) {} } -/// A [`LogProcessor`] designed for testing and debugging purpose, that immediately -/// exports log records as they are emitted. Log records are exported synchronously -/// in the same thread that emits the log record. -/// When using this processor with the OTLP Exporter, the following exporter -/// features are supported: -/// - `grpc-tonic`: This requires LoggerProvider to be created within a tokio -/// runtime. Logs can be emitted from any thread, including tokio runtime -/// threads. -/// - `reqwest-blocking-client`: LoggerProvider may be created anywhere, but -/// logs must be emitted from a non-tokio runtime thread. -/// - `reqwest-client`: LoggerProvider may be created anywhere, but logs must be -/// emitted from a tokio runtime thread. -/// -/// ## Example -/// -/// ### Using a SimpleLogProcessor -/// -/// ```rust -/// use opentelemetry_sdk::logs::{SimpleLogProcessor, LoggerProvider, LogExporter}; -/// use opentelemetry::global; -/// use opentelemetry_sdk::logs::InMemoryLogExporter; -/// -/// let exporter = InMemoryLogExporter::default(); // Replace with an actual exporter -/// let provider = LoggerProvider::builder() -/// .with_simple_exporter(exporter) -/// .build(); -/// -/// ``` -#[derive(Debug)] -pub struct SimpleLogProcessor { - exporter: Mutex, - is_shutdown: AtomicBool, -} - -impl SimpleLogProcessor { - pub(crate) fn new(exporter: T) -> Self { - SimpleLogProcessor { - exporter: Mutex::new(exporter), - is_shutdown: AtomicBool::new(false), - } - } -} - -impl LogProcessor for SimpleLogProcessor { - fn emit(&self, record: &mut SdkLogRecord, instrumentation: &InstrumentationScope) { - // noop after shutdown - if self.is_shutdown.load(std::sync::atomic::Ordering::Relaxed) { - // this is a warning, as the user is trying to log after the processor has been shutdown - otel_warn!( - name: "SimpleLogProcessor.Emit.ProcessorShutdown", - ); - return; - } - - let result = self - .exporter - .lock() - .map_err(|_| OTelSdkError::InternalFailure("SimpleLogProcessor mutex poison".into())) - .and_then(|exporter| { - let log_tuple = &[(record as &SdkLogRecord, instrumentation)]; - futures_executor::block_on(exporter.export(LogBatch::new(log_tuple))) - }); - // Handle errors with specific static names - match result { - Err(OTelSdkError::InternalFailure(_)) => { - // logging as debug as this is not a user error - otel_debug!( - name: "SimpleLogProcessor.Emit.MutexPoisoning", - ); - } - Err(err) => { - otel_error!( - name: "SimpleLogProcessor.Emit.ExportError", - error = format!("{}",err) - ); - } - _ => {} - } - } - - fn force_flush(&self) -> OTelSdkResult { - Ok(()) - } - - fn shutdown(&self) -> OTelSdkResult { - self.is_shutdown - .store(true, std::sync::atomic::Ordering::Relaxed); - if let Ok(mut exporter) = self.exporter.lock() { - exporter.shutdown() - } else { - Err(OTelSdkError::InternalFailure( - "SimpleLogProcessor mutex poison at shutdown".into(), - )) - } - } - - fn set_resource(&self, resource: &Resource) { - if let Ok(mut exporter) = self.exporter.lock() { - exporter.set_resource(resource); - } - } -} - -/// Messages sent between application thread and batch log processor's work thread. -#[allow(clippy::large_enum_variant)] -#[derive(Debug)] -enum BatchMessage { - /// This is ONLY sent when the number of logs records in the data channel has reached `max_export_batch_size`. - ExportLog(Arc), - /// ForceFlush flushes the current buffer to the exporter. - ForceFlush(mpsc::SyncSender), - /// Shut down the worker thread, push all logs in buffer to the exporter. - Shutdown(mpsc::SyncSender), - /// Set the resource for the exporter. - SetResource(Arc), -} - -type LogsData = Box<(SdkLogRecord, InstrumentationScope)>; - -/// The `BatchLogProcessor` collects finished logs in a buffer and exports them -/// in batches to the configured `LogExporter`. This processor is ideal for -/// high-throughput environments, as it minimizes the overhead of exporting logs -/// individually. It uses a **dedicated background thread** to manage and export logs -/// asynchronously, ensuring that the application's main execution flow is not blocked. -/// -/// This processor supports the following configurations: -/// - **Queue size**: Maximum number of log records that can be buffered. -/// - **Batch size**: Maximum number of log records to include in a single export. -/// - **Scheduled delay**: Frequency at which the batch is exported. -/// -/// When using this processor with the OTLP Exporter, the following exporter -/// features are supported: -/// - `grpc-tonic`: Requires `LoggerProvider` to be created within a tokio runtime. -/// - `reqwest-blocking-client`: Works with a regular `main` or `tokio::main`. -/// -/// In other words, other clients like `reqwest` and `hyper` are not supported. -/// -/// `BatchLogProcessor` buffers logs in memory and exports them in batches. An -/// export is triggered when `max_export_batch_size` is reached or every -/// `scheduled_delay` milliseconds. Users can explicitly trigger an export using -/// the `force_flush` method. Shutdown also triggers an export of all buffered -/// logs and is recommended to be called before the application exits to ensure -/// all buffered logs are exported. -/// -/// **Warning**: When using tokio's current-thread runtime, `shutdown()`, which -/// is a blocking call ,should not be called from your main thread. This can -/// cause deadlock. Instead, call `shutdown()` from a separate thread or use -/// tokio's `spawn_blocking`. -/// -/// [`shutdown()`]: crate::logs::LoggerProvider::shutdown -/// [`force_flush()`]: crate::logs::LoggerProvider::force_flush -/// -/// ### Using a BatchLogProcessor: -/// -/// ```rust -/// use opentelemetry_sdk::logs::{BatchLogProcessor, BatchConfigBuilder, LoggerProvider}; -/// use opentelemetry::global; -/// use std::time::Duration; -/// use opentelemetry_sdk::logs::InMemoryLogExporter; -/// -/// let exporter = InMemoryLogExporter::default(); // Replace with an actual exporter -/// let processor = BatchLogProcessor::builder(exporter) -/// .with_batch_config( -/// BatchConfigBuilder::default() -/// .with_max_queue_size(2048) -/// .with_max_export_batch_size(512) -/// .with_scheduled_delay(Duration::from_secs(5)) -/// .build(), -/// ) -/// .build(); -/// -/// let provider = LoggerProvider::builder() -/// .with_log_processor(processor) -/// .build(); -/// -pub struct BatchLogProcessor { - logs_sender: SyncSender, // Data channel to store log records and instrumentation scopes - message_sender: SyncSender, // Control channel to store control messages for the worker thread - handle: Mutex>>, - forceflush_timeout: Duration, - shutdown_timeout: Duration, - export_log_message_sent: Arc, - current_batch_size: Arc, - max_export_batch_size: usize, - - // Track dropped logs - we'll log this at shutdown - dropped_logs_count: AtomicUsize, - - // Track the maximum queue size that was configured for this processor - max_queue_size: usize, -} - -impl Debug for BatchLogProcessor { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("BatchLogProcessor") - .field("message_sender", &self.message_sender) - .finish() - } -} - -impl LogProcessor for BatchLogProcessor { - fn emit(&self, record: &mut SdkLogRecord, instrumentation: &InstrumentationScope) { - let result = self - .logs_sender - .try_send(Box::new((record.clone(), instrumentation.clone()))); - - // match for result and handle each separately - match result { - Ok(_) => { - // Successfully sent the log record to the data channel. - // Increment the current batch size and check if it has reached - // the max export batch size. - if self.current_batch_size.fetch_add(1, Ordering::Relaxed) + 1 - >= self.max_export_batch_size - { - // Check if the a control message for exporting logs is - // already sent to the worker thread. If not, send a control - // message to export logs. `export_log_message_sent` is set - // to false ONLY when the worker thread has processed the - // control message. - - if !self.export_log_message_sent.load(Ordering::Relaxed) { - // This is a cost-efficient check as atomic load - // operations do not require exclusive access to cache - // line. Perform atomic swap to - // `export_log_message_sent` ONLY when the atomic load - // operation above returns false. Atomic - // swap/compare_exchange operations require exclusive - // access to cache line on most processor architectures. - // We could have used compare_exchange as well here, but - // it's more verbose than swap. - if !self.export_log_message_sent.swap(true, Ordering::Relaxed) { - match self.message_sender.try_send(BatchMessage::ExportLog( - self.export_log_message_sent.clone(), - )) { - Ok(_) => { - // Control message sent successfully. - } - Err(_err) => { - // TODO: Log error If the control message - // could not be sent, reset the - // `export_log_message_sent` flag. - self.export_log_message_sent.store(false, Ordering::Relaxed); - } - } - } - } - } - } - Err(mpsc::TrySendError::Full(_)) => { - // Increment dropped logs count. The first time we have to drop - // a log, emit a warning. - if self.dropped_logs_count.fetch_add(1, Ordering::Relaxed) == 0 { - otel_warn!(name: "BatchLogProcessor.LogDroppingStarted", - message = "BatchLogProcessor dropped a LogRecord due to queue full. No further log will be emitted for further drops until Shutdown. During Shutdown time, a log will be emitted with exact count of total logs dropped."); - } - } - Err(mpsc::TrySendError::Disconnected(_)) => { - // Given background thread is the only receiver, and it's - // disconnected, it indicates the thread is shutdown - otel_warn!( - name: "BatchLogProcessor.Emit.AfterShutdown", - message = "Logs are being emitted even after Shutdown. This indicates incorrect lifecycle management of OTelLoggerProvider in application. Logs will not be exported." - ); - } - } - } - - fn force_flush(&self) -> OTelSdkResult { - let (sender, receiver) = mpsc::sync_channel(1); - match self - .message_sender - .try_send(BatchMessage::ForceFlush(sender)) - { - Ok(_) => receiver - .recv_timeout(self.forceflush_timeout) - .map_err(|err| { - if err == RecvTimeoutError::Timeout { - OTelSdkError::Timeout(self.forceflush_timeout) - } else { - OTelSdkError::InternalFailure(format!("{}", err)) - } - })?, - Err(mpsc::TrySendError::Full(_)) => { - // If the control message could not be sent, emit a warning. - otel_debug!( - name: "BatchLogProcessor.ForceFlush.ControlChannelFull", - message = "Control message to flush the worker thread could not be sent as the control channel is full. This can occur if user repeatedily calls force_flush/shutdown without finishing the previous call." - ); - Err(OTelSdkError::InternalFailure("ForceFlush cannot be performed as Control channel is full. This can occur if user repeatedily calls force_flush/shutdown without finishing the previous call.".into())) - } - Err(mpsc::TrySendError::Disconnected(_)) => { - // Given background thread is the only receiver, and it's - // disconnected, it indicates the thread is shutdown - otel_debug!( - name: "BatchLogProcessor.ForceFlush.AlreadyShutdown", - message = "ForceFlush invoked after Shutdown. This will not perform Flush and indicates a incorrect lifecycle management in Application." - ); - - Err(OTelSdkError::AlreadyShutdown) - } - } - } - - fn shutdown(&self) -> OTelSdkResult { - let dropped_logs = self.dropped_logs_count.load(Ordering::Relaxed); - let max_queue_size = self.max_queue_size; - if dropped_logs > 0 { - otel_warn!( - name: "BatchLogProcessor.LogsDropped", - dropped_logs_count = dropped_logs, - max_queue_size = max_queue_size, - message = "Logs were dropped due to a queue being full. The count represents the total count of log records dropped in the lifetime of this BatchLogProcessor. Consider increasing the queue size and/or decrease delay between intervals." - ); - } - - let (sender, receiver) = mpsc::sync_channel(1); - match self.message_sender.try_send(BatchMessage::Shutdown(sender)) { - Ok(_) => { - receiver - .recv_timeout(self.shutdown_timeout) - .map(|_| { - // join the background thread after receiving back the - // shutdown signal - if let Some(handle) = self.handle.lock().unwrap().take() { - handle.join().unwrap(); - } - OTelSdkResult::Ok(()) - }) - .map_err(|err| match err { - RecvTimeoutError::Timeout => { - otel_error!( - name: "BatchLogProcessor.Shutdown.Timeout", - message = "BatchLogProcessor shutdown timing out." - ); - OTelSdkError::Timeout(self.shutdown_timeout) - } - _ => { - otel_error!( - name: "BatchLogProcessor.Shutdown.Error", - error = format!("{}", err) - ); - OTelSdkError::InternalFailure(format!("{}", err)) - } - })? - } - Err(mpsc::TrySendError::Full(_)) => { - // If the control message could not be sent, emit a warning. - otel_debug!( - name: "BatchLogProcessor.Shutdown.ControlChannelFull", - message = "Control message to shutdown the worker thread could not be sent as the control channel is full. This can occur if user repeatedily calls force_flush/shutdown without finishing the previous call." - ); - Err(OTelSdkError::InternalFailure("Shutdown cannot be performed as Control channel is full. This can occur if user repeatedily calls force_flush/shutdown without finishing the previous call.".into())) - } - Err(mpsc::TrySendError::Disconnected(_)) => { - // Given background thread is the only receiver, and it's - // disconnected, it indicates the thread is shutdown - otel_debug!( - name: "BatchLogProcessor.Shutdown.AlreadyShutdown", - message = "Shutdown is being invoked more than once. This is noop, but indicates a potential issue in the application's lifecycle management." - ); - - Err(OTelSdkError::AlreadyShutdown) - } - } - } - - fn set_resource(&self, resource: &Resource) { - let resource = Arc::new(resource.clone()); - let _ = self - .message_sender - .try_send(BatchMessage::SetResource(resource)); - } -} - -impl BatchLogProcessor { - pub(crate) fn new(mut exporter: E, config: BatchConfig) -> Self - where - E: LogExporter + Send + Sync + 'static, - { - let (logs_sender, logs_receiver) = mpsc::sync_channel::(config.max_queue_size); - let (message_sender, message_receiver) = mpsc::sync_channel::(64); // Is this a reasonable bound? - let max_queue_size = config.max_queue_size; - let max_export_batch_size = config.max_export_batch_size; - let current_batch_size = Arc::new(AtomicUsize::new(0)); - let current_batch_size_for_thread = current_batch_size.clone(); - - let handle = thread::Builder::new() - .name("OpenTelemetry.Logs.BatchProcessor".to_string()) - .spawn(move || { - otel_info!( - name: "BatchLogProcessor.ThreadStarted", - interval_in_millisecs = config.scheduled_delay.as_millis(), - max_export_batch_size = config.max_export_batch_size, - max_queue_size = max_queue_size, - ); - let mut last_export_time = Instant::now(); - let mut logs = Vec::with_capacity(config.max_export_batch_size); - let current_batch_size = current_batch_size_for_thread; - - // This method gets up to `max_export_batch_size` amount of logs from the channel and exports them. - // It returns the result of the export operation. - // It expects the logs vec to be empty when it's called. - #[inline] - fn get_logs_and_export( - logs_receiver: &mpsc::Receiver, - exporter: &E, - logs: &mut Vec, - last_export_time: &mut Instant, - current_batch_size: &AtomicUsize, - config: &BatchConfig, - ) -> OTelSdkResult - where - E: LogExporter + Send + Sync + 'static, - { - let target = current_batch_size.load(Ordering::Relaxed); // `target` is used to determine the stopping criteria for exporting logs. - let mut result = OTelSdkResult::Ok(()); - let mut total_exported_logs: usize = 0; - - while target > 0 && total_exported_logs < target { - // Get upto `max_export_batch_size` amount of logs log records from the channel and push them to the logs vec - while let Ok(log) = logs_receiver.try_recv() { - logs.push(log); - if logs.len() == config.max_export_batch_size { - break; - } - } - - let count_of_logs = logs.len(); // Count of logs that will be exported - total_exported_logs += count_of_logs; - - result = export_batch_sync(exporter, logs, last_export_time); // This method clears the logs vec after exporting - - current_batch_size.fetch_sub(count_of_logs, Ordering::Relaxed); - } - result - } - - loop { - let remaining_time = config - .scheduled_delay - .checked_sub(last_export_time.elapsed()) - .unwrap_or(config.scheduled_delay); - - match message_receiver.recv_timeout(remaining_time) { - Ok(BatchMessage::ExportLog(export_log_message_sent)) => { - // Reset the export log message sent flag now it has has been processed. - export_log_message_sent.store(false, Ordering::Relaxed); - - otel_debug!( - name: "BatchLogProcessor.ExportingDueToBatchSize", - ); - - let _ = get_logs_and_export( - &logs_receiver, - &exporter, - &mut logs, - &mut last_export_time, - ¤t_batch_size, - &config, - ); - } - Ok(BatchMessage::ForceFlush(sender)) => { - otel_debug!(name: "BatchLogProcessor.ExportingDueToForceFlush"); - let result = get_logs_and_export( - &logs_receiver, - &exporter, - &mut logs, - &mut last_export_time, - ¤t_batch_size, - &config, - ); - let _ = sender.send(result); - } - Ok(BatchMessage::Shutdown(sender)) => { - otel_debug!(name: "BatchLogProcessor.ExportingDueToShutdown"); - let result = get_logs_and_export( - &logs_receiver, - &exporter, - &mut logs, - &mut last_export_time, - ¤t_batch_size, - &config, - ); - let _ = sender.send(result); - - otel_debug!( - name: "BatchLogProcessor.ThreadExiting", - reason = "ShutdownRequested" - ); - // - // break out the loop and return from the current background thread. - // - break; - } - Ok(BatchMessage::SetResource(resource)) => { - exporter.set_resource(&resource); - } - Err(RecvTimeoutError::Timeout) => { - otel_debug!( - name: "BatchLogProcessor.ExportingDueToTimer", - ); - - let _ = get_logs_and_export( - &logs_receiver, - &exporter, - &mut logs, - &mut last_export_time, - ¤t_batch_size, - &config, - ); - } - Err(RecvTimeoutError::Disconnected) => { - // Channel disconnected, only thing to do is break - // out (i.e exit the thread) - otel_debug!( - name: "BatchLogProcessor.ThreadExiting", - reason = "MessageSenderDisconnected" - ); - break; - } - } - } - otel_info!( - name: "BatchLogProcessor.ThreadStopped" - ); - }) - .expect("Thread spawn failed."); //TODO: Handle thread spawn failure - - // Return batch processor with link to worker - BatchLogProcessor { - logs_sender, - message_sender, - handle: Mutex::new(Some(handle)), - forceflush_timeout: Duration::from_secs(5), // TODO: make this configurable - shutdown_timeout: Duration::from_secs(5), // TODO: make this configurable - dropped_logs_count: AtomicUsize::new(0), - max_queue_size, - export_log_message_sent: Arc::new(AtomicBool::new(false)), - current_batch_size, - max_export_batch_size, - } - } - - /// Create a new batch processor builder - pub fn builder(exporter: E) -> BatchLogProcessorBuilder - where - E: LogExporter, - { - BatchLogProcessorBuilder { - exporter, - config: Default::default(), - } - } -} - -#[allow(clippy::vec_box)] -fn export_batch_sync( - exporter: &E, - batch: &mut Vec>, - last_export_time: &mut Instant, -) -> OTelSdkResult -where - E: LogExporter + ?Sized, -{ - *last_export_time = Instant::now(); - - if batch.is_empty() { - return OTelSdkResult::Ok(()); - } - - let export = exporter.export(LogBatch::new_with_owned_data(batch.as_slice())); - let export_result = futures_executor::block_on(export); - - // Clear the batch vec after exporting - batch.clear(); - - match export_result { - Ok(_) => OTelSdkResult::Ok(()), - Err(err) => { - otel_error!( - name: "BatchLogProcessor.ExportError", - error = format!("{}", err) - ); - OTelSdkResult::Err(err) - } - } -} - -/// -/// A builder for creating [`BatchLogProcessor`] instances. -/// -#[derive(Debug)] -pub struct BatchLogProcessorBuilder { - exporter: E, - config: BatchConfig, -} - -impl BatchLogProcessorBuilder -where - E: LogExporter + 'static, -{ - /// Set the BatchConfig for [`BatchLogProcessorBuilder`] - pub fn with_batch_config(self, config: BatchConfig) -> Self { - BatchLogProcessorBuilder { config, ..self } - } - - /// Build a batch processor - pub fn build(self) -> BatchLogProcessor { - BatchLogProcessor::new(self.exporter, self.config) - } -} - -/// Batch log processor configuration. -/// Use [`BatchConfigBuilder`] to configure your own instance of [`BatchConfig`]. -#[derive(Debug)] -#[allow(dead_code)] -pub struct BatchConfig { - /// The maximum queue size to buffer logs for delayed processing. If the - /// queue gets full it drops the logs. The default value of is 2048. - pub(crate) max_queue_size: usize, - - /// The delay interval in milliseconds between two consecutive processing - /// of batches. The default value is 1 second. - pub(crate) scheduled_delay: Duration, - - /// The maximum number of logs to process in a single batch. If there are - /// more than one batch worth of logs then it processes multiple batches - /// of logs one batch after the other without any delay. The default value - /// is 512. - pub(crate) max_export_batch_size: usize, - - /// The maximum duration to export a batch of data. - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - pub(crate) max_export_timeout: Duration, -} - -impl Default for BatchConfig { - fn default() -> Self { - BatchConfigBuilder::default().build() - } -} - -/// A builder for creating [`BatchConfig`] instances. -#[derive(Debug)] -pub struct BatchConfigBuilder { - max_queue_size: usize, - scheduled_delay: Duration, - max_export_batch_size: usize, - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - max_export_timeout: Duration, -} - -impl Default for BatchConfigBuilder { - /// Create a new [`BatchConfigBuilder`] initialized with default batch config values as per the specs. - /// The values are overridden by environment variables if set. - /// The supported environment variables are: - /// * `OTEL_BLRP_MAX_QUEUE_SIZE` - /// * `OTEL_BLRP_SCHEDULE_DELAY` - /// * `OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` - /// * `OTEL_BLRP_EXPORT_TIMEOUT` - fn default() -> Self { - BatchConfigBuilder { - max_queue_size: OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, - scheduled_delay: Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT), - max_export_batch_size: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - max_export_timeout: Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT), - } - .init_from_env_vars() - } -} - -impl BatchConfigBuilder { - /// Set max_queue_size for [`BatchConfigBuilder`]. - /// It's the maximum queue size to buffer logs for delayed processing. - /// If the queue gets full it will drop the logs. - /// The default value of is 2048. - pub fn with_max_queue_size(mut self, max_queue_size: usize) -> Self { - self.max_queue_size = max_queue_size; - self - } - - /// Set scheduled_delay for [`BatchConfigBuilder`]. - /// It's the delay interval in milliseconds between two consecutive processing of batches. - /// The default value is 1000 milliseconds. - pub fn with_scheduled_delay(mut self, scheduled_delay: Duration) -> Self { - self.scheduled_delay = scheduled_delay; - self - } - - /// Set max_export_timeout for [`BatchConfigBuilder`]. - /// It's the maximum duration to export a batch of data. - /// The default value is 30000 milliseconds. - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - pub fn with_max_export_timeout(mut self, max_export_timeout: Duration) -> Self { - self.max_export_timeout = max_export_timeout; - self - } - - /// Set max_export_batch_size for [`BatchConfigBuilder`]. - /// It's the maximum number of logs to process in a single batch. If there are - /// more than one batch worth of logs then it processes multiple batches - /// of logs one batch after the other without any delay. - /// The default value is 512. - pub fn with_max_export_batch_size(mut self, max_export_batch_size: usize) -> Self { - self.max_export_batch_size = max_export_batch_size; - self - } - - /// Builds a `BatchConfig` enforcing the following invariants: - /// * `max_export_batch_size` must be less than or equal to `max_queue_size`. - pub fn build(self) -> BatchConfig { - // max export batch size must be less or equal to max queue size. - // we set max export batch size to max queue size if it's larger than max queue size. - let max_export_batch_size = min(self.max_export_batch_size, self.max_queue_size); - - BatchConfig { - max_queue_size: self.max_queue_size, - scheduled_delay: self.scheduled_delay, - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - max_export_timeout: self.max_export_timeout, - max_export_batch_size, - } - } - - fn init_from_env_vars(mut self) -> Self { - if let Some(max_queue_size) = env::var(OTEL_BLRP_MAX_QUEUE_SIZE) - .ok() - .and_then(|queue_size| usize::from_str(&queue_size).ok()) - { - self.max_queue_size = max_queue_size; - } - - if let Some(max_export_batch_size) = env::var(OTEL_BLRP_MAX_EXPORT_BATCH_SIZE) - .ok() - .and_then(|batch_size| usize::from_str(&batch_size).ok()) - { - self.max_export_batch_size = max_export_batch_size; - } - - if let Some(scheduled_delay) = env::var(OTEL_BLRP_SCHEDULE_DELAY) - .ok() - .and_then(|delay| u64::from_str(&delay).ok()) - { - self.scheduled_delay = Duration::from_millis(scheduled_delay); - } - - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - if let Some(max_export_timeout) = env::var(OTEL_BLRP_EXPORT_TIMEOUT) - .ok() - .and_then(|s| u64::from_str(&s).ok()) - { - self.max_export_timeout = Duration::from_millis(max_export_timeout); - } - - self - } -} - #[cfg(all(test, feature = "testing", feature = "logs"))] -mod tests { - use super::{ - BatchLogProcessor, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, OTEL_BLRP_MAX_QUEUE_SIZE, - OTEL_BLRP_SCHEDULE_DELAY, - }; - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - use super::{OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT}; +pub(crate) mod tests { use crate::logs::{LogBatch, LogExporter, SdkLogRecord}; + use crate::Resource; use crate::{ error::OTelSdkResult, - logs::{ - log_processor::{ - OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, - OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, - }, - BatchConfig, BatchConfigBuilder, InMemoryLogExporter, InMemoryLogExporterBuilder, - LogProcessor, SdkLoggerProvider, SimpleLogProcessor, - }, - Resource, + logs::{LogProcessor, SdkLoggerProvider}, }; use opentelemetry::logs::AnyValue; use opentelemetry::logs::LogRecord as _; use opentelemetry::logs::{Logger, LoggerProvider}; - use opentelemetry::KeyValue; use opentelemetry::{InstrumentationScope, Key}; - use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; - use std::time::Duration; #[derive(Debug, Clone)] - struct MockLogExporter { - resource: Arc>>, + pub(crate) struct MockLogExporter { + pub resource: Arc>>, } impl LogExporter for MockLogExporter { @@ -925,294 +112,11 @@ mod tests { // Implementation specific to the MockLogExporter, not part of the LogExporter trait impl MockLogExporter { - fn get_resource(&self) -> Option { + pub(crate) fn get_resource(&self) -> Option { (*self.resource).lock().unwrap().clone() } } - #[test] - fn test_default_const_values() { - assert_eq!(OTEL_BLRP_SCHEDULE_DELAY, "OTEL_BLRP_SCHEDULE_DELAY"); - assert_eq!(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, 1_000); - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - assert_eq!(OTEL_BLRP_EXPORT_TIMEOUT, "OTEL_BLRP_EXPORT_TIMEOUT"); - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - assert_eq!(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, 30_000); - assert_eq!(OTEL_BLRP_MAX_QUEUE_SIZE, "OTEL_BLRP_MAX_QUEUE_SIZE"); - assert_eq!(OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, 2_048); - assert_eq!( - OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, - "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE" - ); - assert_eq!(OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, 512); - } - - #[test] - fn test_default_batch_config_adheres_to_specification() { - // The following environment variables are expected to be unset so that their default values are used. - let env_vars = vec![ - OTEL_BLRP_SCHEDULE_DELAY, - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - OTEL_BLRP_EXPORT_TIMEOUT, - OTEL_BLRP_MAX_QUEUE_SIZE, - OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, - ]; - - let config = temp_env::with_vars_unset(env_vars, BatchConfig::default); - - assert_eq!( - config.scheduled_delay, - Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) - ); - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - assert_eq!( - config.max_export_timeout, - Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT) - ); - assert_eq!(config.max_queue_size, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT); - assert_eq!( - config.max_export_batch_size, - OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT - ); - } - - #[test] - fn test_batch_config_configurable_by_env_vars() { - let env_vars = vec![ - (OTEL_BLRP_SCHEDULE_DELAY, Some("2000")), - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - (OTEL_BLRP_EXPORT_TIMEOUT, Some("60000")), - (OTEL_BLRP_MAX_QUEUE_SIZE, Some("4096")), - (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("1024")), - ]; - - let config = temp_env::with_vars(env_vars, BatchConfig::default); - - assert_eq!(config.scheduled_delay, Duration::from_millis(2000)); - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - assert_eq!(config.max_export_timeout, Duration::from_millis(60000)); - assert_eq!(config.max_queue_size, 4096); - assert_eq!(config.max_export_batch_size, 1024); - } - - #[test] - fn test_batch_config_max_export_batch_size_validation() { - let env_vars = vec![ - (OTEL_BLRP_MAX_QUEUE_SIZE, Some("256")), - (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("1024")), - ]; - - let config = temp_env::with_vars(env_vars, BatchConfig::default); - - assert_eq!(config.max_queue_size, 256); - assert_eq!(config.max_export_batch_size, 256); - assert_eq!( - config.scheduled_delay, - Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) - ); - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - assert_eq!( - config.max_export_timeout, - Duration::from_millis(OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT) - ); - } - - #[test] - fn test_batch_config_with_fields() { - let batch_builder = BatchConfigBuilder::default() - .with_max_export_batch_size(1) - .with_scheduled_delay(Duration::from_millis(2)) - .with_max_queue_size(4); - - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - let batch_builder = batch_builder.with_max_export_timeout(Duration::from_millis(3)); - let batch = batch_builder.build(); - - assert_eq!(batch.max_export_batch_size, 1); - assert_eq!(batch.scheduled_delay, Duration::from_millis(2)); - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - assert_eq!(batch.max_export_timeout, Duration::from_millis(3)); - assert_eq!(batch.max_queue_size, 4); - } - - #[test] - fn test_build_batch_log_processor_builder() { - let mut env_vars = vec![ - (OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, Some("500")), - (OTEL_BLRP_SCHEDULE_DELAY, Some("I am not number")), - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - (OTEL_BLRP_EXPORT_TIMEOUT, Some("2046")), - ]; - temp_env::with_vars(env_vars.clone(), || { - let builder = BatchLogProcessor::builder(InMemoryLogExporter::default()); - - assert_eq!(builder.config.max_export_batch_size, 500); - assert_eq!( - builder.config.scheduled_delay, - Duration::from_millis(OTEL_BLRP_SCHEDULE_DELAY_DEFAULT) - ); - assert_eq!( - builder.config.max_queue_size, - OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT - ); - - #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] - assert_eq!( - builder.config.max_export_timeout, - Duration::from_millis(2046) - ); - }); - - env_vars.push((OTEL_BLRP_MAX_QUEUE_SIZE, Some("120"))); - - temp_env::with_vars(env_vars, || { - let builder = BatchLogProcessor::builder(InMemoryLogExporter::default()); - assert_eq!(builder.config.max_export_batch_size, 120); - assert_eq!(builder.config.max_queue_size, 120); - }); - } - - #[test] - fn test_build_batch_log_processor_builder_with_custom_config() { - let expected = BatchConfigBuilder::default() - .with_max_export_batch_size(1) - .with_scheduled_delay(Duration::from_millis(2)) - .with_max_queue_size(4) - .build(); - - let builder = - BatchLogProcessor::builder(InMemoryLogExporter::default()).with_batch_config(expected); - - let actual = &builder.config; - assert_eq!(actual.max_export_batch_size, 1); - assert_eq!(actual.scheduled_delay, Duration::from_millis(2)); - assert_eq!(actual.max_queue_size, 4); - } - - #[test] - fn test_set_resource_simple_processor() { - let exporter = MockLogExporter { - resource: Arc::new(Mutex::new(None)), - }; - let processor = SimpleLogProcessor::new(exporter.clone()); - let _ = SdkLoggerProvider::builder() - .with_log_processor(processor) - .with_resource( - Resource::builder_empty() - .with_attributes([ - KeyValue::new("k1", "v1"), - KeyValue::new("k2", "v3"), - KeyValue::new("k3", "v3"), - KeyValue::new("k4", "v4"), - KeyValue::new("k5", "v5"), - ]) - .build(), - ) - .build(); - assert_eq!(exporter.get_resource().unwrap().into_iter().count(), 5); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - async fn test_set_resource_batch_processor() { - let exporter = MockLogExporter { - resource: Arc::new(Mutex::new(None)), - }; - let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); - let provider = SdkLoggerProvider::builder() - .with_log_processor(processor) - .with_resource( - Resource::builder_empty() - .with_attributes([ - KeyValue::new("k1", "v1"), - KeyValue::new("k2", "v3"), - KeyValue::new("k3", "v3"), - KeyValue::new("k4", "v4"), - KeyValue::new("k5", "v5"), - ]) - .build(), - ) - .build(); - - // wait for the batch processor to process the resource. - tokio::time::sleep(Duration::from_millis(100)).await; - - assert_eq!(exporter.get_resource().unwrap().into_iter().count(), 5); - let _ = provider.shutdown(); - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_batch_shutdown() { - // assert we will receive an error - // setup - let exporter = InMemoryLogExporterBuilder::default() - .keep_records_on_shutdown() - .build(); - let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); - - let mut record = SdkLogRecord::new(); - let instrumentation = InstrumentationScope::default(); - - processor.emit(&mut record, &instrumentation); - processor.force_flush().unwrap(); - processor.shutdown().unwrap(); - // todo: expect to see errors here. How should we assert this? - processor.emit(&mut record, &instrumentation); - assert_eq!(1, exporter.get_emitted_logs().unwrap().len()) - } - - #[test] - fn test_simple_shutdown() { - let exporter = InMemoryLogExporterBuilder::default() - .keep_records_on_shutdown() - .build(); - let processor = SimpleLogProcessor::new(exporter.clone()); - - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - - processor.emit(&mut record, &instrumentation); - - processor.shutdown().unwrap(); - - let is_shutdown = processor - .is_shutdown - .load(std::sync::atomic::Ordering::Relaxed); - assert!(is_shutdown); - - processor.emit(&mut record, &instrumentation); - - assert_eq!(1, exporter.get_emitted_logs().unwrap().len()) - } - - #[tokio::test(flavor = "current_thread")] - async fn test_batch_log_processor_shutdown_under_async_runtime_current_flavor_multi_thread() { - let exporter = InMemoryLogExporterBuilder::default().build(); - let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); - - processor.shutdown().unwrap(); - } - - #[tokio::test(flavor = "current_thread")] - async fn test_batch_log_processor_shutdown_with_async_runtime_current_flavor_current_thread() { - let exporter = InMemoryLogExporterBuilder::default().build(); - let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); - processor.shutdown().unwrap(); - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_batch_log_processor_shutdown_with_async_runtime_multi_flavor_multi_thread() { - let exporter = InMemoryLogExporterBuilder::default().build(); - let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); - processor.shutdown().unwrap(); - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_batch_log_processor_shutdown_with_async_runtime_multi_flavor_current_thread() { - let exporter = InMemoryLogExporterBuilder::default().build(); - let processor = BatchLogProcessor::new(exporter.clone(), BatchConfig::default()); - processor.shutdown().unwrap(); - } - #[derive(Debug)] struct FirstProcessor { pub(crate) logs: Arc>>, @@ -1272,6 +176,7 @@ mod tests { Ok(()) } } + #[test] fn test_log_data_modification_by_multiple_processors() { let first_processor_logs = Arc::new(Mutex::new(Vec::new())); @@ -1319,237 +224,4 @@ mod tests { == AnyValue::String("Updated by FirstProcessor".into()) ); } - - #[test] - fn test_simple_processor_sync_exporter_without_runtime() { - let exporter = InMemoryLogExporterBuilder::default().build(); - let processor = SimpleLogProcessor::new(exporter.clone()); - - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - - processor.emit(&mut record, &instrumentation); - - assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - async fn test_simple_processor_sync_exporter_with_runtime() { - let exporter = InMemoryLogExporterBuilder::default().build(); - let processor = SimpleLogProcessor::new(exporter.clone()); - - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - - processor.emit(&mut record, &instrumentation); - - assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); - } - - #[tokio::test(flavor = "multi_thread")] - async fn test_simple_processor_sync_exporter_with_multi_thread_runtime() { - let exporter = InMemoryLogExporterBuilder::default().build(); - let processor = Arc::new(SimpleLogProcessor::new(exporter.clone())); - - let mut handles = vec![]; - for _ in 0..10 { - let processor_clone = Arc::clone(&processor); - let handle = tokio::spawn(async move { - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - processor_clone.emit(&mut record, &instrumentation); - }); - handles.push(handle); - } - - for handle in handles { - handle.await.unwrap(); - } - - assert_eq!(exporter.get_emitted_logs().unwrap().len(), 10); - } - - #[tokio::test(flavor = "current_thread")] - async fn test_simple_processor_sync_exporter_with_current_thread_runtime() { - let exporter = InMemoryLogExporterBuilder::default().build(); - let processor = SimpleLogProcessor::new(exporter.clone()); - - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - - processor.emit(&mut record, &instrumentation); - - assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); - } - - #[derive(Debug, Clone)] - struct LogExporterThatRequiresTokio { - export_count: Arc, - } - - impl LogExporterThatRequiresTokio { - /// Creates a new instance of `LogExporterThatRequiresTokio`. - fn new() -> Self { - LogExporterThatRequiresTokio { - export_count: Arc::new(AtomicUsize::new(0)), - } - } - - /// Returns the number of logs stored in the exporter. - fn len(&self) -> usize { - self.export_count.load(Ordering::Acquire) - } - } - - impl LogExporter for LogExporterThatRequiresTokio { - #[allow(clippy::manual_async_fn)] - fn export( - &self, - batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - // Simulate minimal dependency on tokio by sleeping asynchronously for a short duration - async move { - tokio::time::sleep(Duration::from_millis(50)).await; - - for _ in batch.iter() { - self.export_count.fetch_add(1, Ordering::Acquire); - } - Ok(()) - } - } - } - - #[test] - fn test_simple_processor_async_exporter_without_runtime() { - // Use `catch_unwind` to catch the panic caused by missing Tokio runtime - let result = std::panic::catch_unwind(|| { - let exporter = LogExporterThatRequiresTokio::new(); - let processor = SimpleLogProcessor::new(exporter.clone()); - - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - - // This will panic because an tokio async operation within exporter without a runtime. - processor.emit(&mut record, &instrumentation); - }); - - // Verify that the panic occurred and check the panic message for the absence of a Tokio runtime - assert!( - result.is_err(), - "The test should fail due to missing Tokio runtime, but it did not." - ); - let panic_payload = result.unwrap_err(); - let panic_message = panic_payload - .downcast_ref::() - .map(|s| s.as_str()) - .or_else(|| panic_payload.downcast_ref::<&str>().copied()) - .unwrap_or("No panic message"); - - assert!( - panic_message.contains("no reactor running") - || panic_message.contains("must be called from the context of a Tokio 1.x runtime"), - "Expected panic message about missing Tokio runtime, but got: {}", - panic_message - ); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 4)] - #[ignore] - // This test demonstrates a potential deadlock scenario in a multi-threaded Tokio runtime. - // It spawns Tokio tasks equal to the number of runtime worker threads (4) to emit log events. - // Each task attempts to acquire a mutex on the exporter in `SimpleLogProcessor::emit`. - // Only one task obtains the lock, while the others are blocked, waiting for its release. - // - // The task holding the lock invokes the LogExporterThatRequiresTokio, which performs an - // asynchronous operation (e.g., network I/O simulated by `tokio::sleep`). This operation - // requires yielding control back to the Tokio runtime to make progress. - // - // However, all worker threads are occupied: - // - One thread is executing the async exporter operation - // - Three threads are blocked waiting for the mutex - // - // This leads to a deadlock as there are no available threads to drive the async operation - // to completion, preventing the mutex from being released. Consequently, neither the blocked - // tasks nor the exporter can proceed. - async fn test_simple_processor_async_exporter_with_all_runtime_worker_threads_blocked() { - let exporter = LogExporterThatRequiresTokio::new(); - let processor = Arc::new(SimpleLogProcessor::new(exporter.clone())); - - let concurrent_emit = 4; // number of worker threads - - let mut handles = vec![]; - // try send `concurrent_emit` events concurrently - for _ in 0..concurrent_emit { - let processor_clone = Arc::clone(&processor); - let handle = tokio::spawn(async move { - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - processor_clone.emit(&mut record, &instrumentation); - }); - handles.push(handle); - } - - // below code won't get executed - for handle in handles { - handle.await.unwrap(); - } - assert_eq!(exporter.len(), concurrent_emit); - } - - #[tokio::test(flavor = "multi_thread", worker_threads = 1)] - // This test uses a multi-threaded runtime setup with a single worker thread. Note that even - // though only one worker thread is created, it is distinct from the main thread. The processor - // emits a log event, and the exporter performs an async operation that requires the runtime. - // The single worker thread handles this operation without deadlocking, as long as no other - // tasks occupy the runtime. - async fn test_simple_processor_async_exporter_with_runtime() { - let exporter = LogExporterThatRequiresTokio::new(); - let processor = SimpleLogProcessor::new(exporter.clone()); - - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - - processor.emit(&mut record, &instrumentation); - - assert_eq!(exporter.len(), 1); - } - - #[tokio::test(flavor = "multi_thread")] - // This test uses a multi-threaded runtime setup with the default number of worker threads. - // The processor emits a log event, and the exporter, which requires the runtime for its async - // operations, can access one of the available worker threads to complete its task. As there - // are multiple threads, the exporter can proceed without blocking other tasks, ensuring the - // test completes successfully. - async fn test_simple_processor_async_exporter_with_multi_thread_runtime() { - let exporter = LogExporterThatRequiresTokio::new(); - - let processor = SimpleLogProcessor::new(exporter.clone()); - - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - - processor.emit(&mut record, &instrumentation); - - assert_eq!(exporter.len(), 1); - } - - #[tokio::test(flavor = "current_thread")] - #[ignore] - // This test uses a current-thread runtime, where all operations run on the main thread. - // The processor emits a log event while the runtime is blocked using `futures::block_on` - // to complete the export operation. The exporter, which performs an async operation and - // requires the runtime, cannot progress because the main thread is already blocked. - // This results in a deadlock, as the runtime cannot move forward. - async fn test_simple_processor_async_exporter_with_current_thread_runtime() { - let exporter = LogExporterThatRequiresTokio::new(); - - let processor = SimpleLogProcessor::new(exporter.clone()); - - let mut record: SdkLogRecord = SdkLogRecord::new(); - let instrumentation: InstrumentationScope = Default::default(); - - processor.emit(&mut record, &instrumentation); - - assert_eq!(exporter.len(), 1); - } } diff --git a/opentelemetry-sdk/src/logs/log_processor_with_async_runtime.rs b/opentelemetry-sdk/src/logs/log_processor_with_async_runtime.rs index c668fed6c6..6727d21bb7 100644 --- a/opentelemetry-sdk/src/logs/log_processor_with_async_runtime.rs +++ b/opentelemetry-sdk/src/logs/log_processor_with_async_runtime.rs @@ -283,7 +283,7 @@ where #[cfg(all(test, feature = "testing", feature = "logs"))] mod tests { use crate::error::OTelSdkResult; - use crate::logs::log_processor::{ + use crate::logs::batch_log_processor::{ OTEL_BLRP_EXPORT_TIMEOUT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE, OTEL_BLRP_MAX_QUEUE_SIZE, OTEL_BLRP_SCHEDULE_DELAY, }; @@ -294,7 +294,7 @@ mod tests { use crate::runtime; use crate::{ logs::{ - log_processor::{ + batch_log_processor::{ OTEL_BLRP_EXPORT_TIMEOUT_DEFAULT, OTEL_BLRP_MAX_EXPORT_BATCH_SIZE_DEFAULT, OTEL_BLRP_MAX_QUEUE_SIZE_DEFAULT, OTEL_BLRP_SCHEDULE_DELAY_DEFAULT, }, diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs index 71588a1bf0..99f4e40f89 100644 --- a/opentelemetry-sdk/src/logs/mod.rs +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -1,10 +1,11 @@ //! # OpenTelemetry Log SDK -mod error; +mod batch_log_processor; mod export; mod log_processor; mod logger; mod logger_provider; pub(crate) mod record; +mod simple_log_processor; /// In-Memory log exporter for testing purpose. #[cfg(any(feature = "testing", test))] @@ -14,15 +15,16 @@ pub mod in_memory_exporter; #[cfg_attr(docsrs, doc(cfg(any(feature = "testing", test))))] pub use in_memory_exporter::{InMemoryLogExporter, InMemoryLogExporterBuilder}; +pub use batch_log_processor::{ + BatchConfig, BatchConfigBuilder, BatchLogProcessor, BatchLogProcessorBuilder, +}; pub use error::{LogError, LogResult}; pub use export::{LogBatch, LogExporter}; -pub use log_processor::{ - BatchConfig, BatchConfigBuilder, BatchLogProcessor, BatchLogProcessorBuilder, LogProcessor, - SimpleLogProcessor, -}; +pub use log_processor::LogProcessor; pub use logger::SdkLogger; pub use logger_provider::{LoggerProviderBuilder, SdkLoggerProvider}; pub use record::{SdkLogRecord, TraceContext}; +pub use simple_log_processor::SimpleLogProcessor; #[cfg(feature = "experimental_logs_batch_log_processor_with_async_runtime")] /// Module for BatchLogProcessor with async runtime. diff --git a/opentelemetry-sdk/src/logs/simple_log_processor.rs b/opentelemetry-sdk/src/logs/simple_log_processor.rs new file mode 100644 index 0000000000..672570dff3 --- /dev/null +++ b/opentelemetry-sdk/src/logs/simple_log_processor.rs @@ -0,0 +1,430 @@ +//! # OpenTelemetry Simple Log Processor +//! The `SimpleLogProcessor` is one implementation of the `LogProcessor` interface. +//! +//! It forwards log records to the exporter immediately after they are emitted +//! (or one exporter after another if applicable). This processor is +//! **synchronous** and is designed for debugging or testing purposes. It is +//! **not suitable for production** environments due to its lack of batching, +//! performance optimizations, or support for high-throughput scenarios. +//! +//! ## Diagram +//! +//! ```ascii +//! +-----+---------------+ +-----------------------+ +-------------------+ +//! | | | | | | | +//! | SDK | Logger.emit() +---> (Simple)LogProcessor +---> LogExporter | +//! +-----+---------------+ +-----------------------+ +-------------------+ +//! ``` + +use crate::error::{OTelSdkError, OTelSdkResult}; +use crate::logs::log_processor::LogProcessor; +use crate::{ + logs::{LogBatch, LogExporter, SdkLogRecord}, + Resource, +}; + +use opentelemetry::{otel_debug, otel_error, otel_warn, InstrumentationScope}; + +use std::fmt::Debug; +use std::sync::atomic::AtomicBool; +use std::sync::Mutex; + +/// A [`LogProcessor`] designed for testing and debugging purpose, that immediately +/// exports log records as they are emitted. Log records are exported synchronously +/// in the same thread that emits the log record. +/// When using this processor with the OTLP Exporter, the following exporter +/// features are supported: +/// - `grpc-tonic`: This requires LoggerProvider to be created within a tokio +/// runtime. Logs can be emitted from any thread, including tokio runtime +/// threads. +/// - `reqwest-blocking-client`: LoggerProvider may be created anywhere, but +/// logs must be emitted from a non-tokio runtime thread. +/// - `reqwest-client`: LoggerProvider may be created anywhere, but logs must be +/// emitted from a tokio runtime thread. +/// +/// ## Example +/// +/// ### Using a SimpleLogProcessor +/// +/// ```rust +/// use opentelemetry_sdk::logs::{SimpleLogProcessor, LoggerProvider, LogExporter}; +/// use opentelemetry::global; +/// use opentelemetry_sdk::logs::InMemoryLogExporter; +/// +/// let exporter = InMemoryLogExporter::default(); // Replace with an actual exporter +/// let provider = LoggerProvider::builder() +/// .with_simple_exporter(exporter) +/// .build(); +/// +/// ``` +/// +#[derive(Debug)] +pub struct SimpleLogProcessor { + exporter: Mutex, + is_shutdown: AtomicBool, +} + +impl SimpleLogProcessor { + pub(crate) fn new(exporter: T) -> Self { + SimpleLogProcessor { + exporter: Mutex::new(exporter), + is_shutdown: AtomicBool::new(false), + } + } +} + +impl LogProcessor for SimpleLogProcessor { + fn emit(&self, record: &mut SdkLogRecord, instrumentation: &InstrumentationScope) { + // noop after shutdown + if self.is_shutdown.load(std::sync::atomic::Ordering::Relaxed) { + // this is a warning, as the user is trying to log after the processor has been shutdown + otel_warn!( + name: "SimpleLogProcessor.Emit.ProcessorShutdown", + ); + return; + } + + let result = self + .exporter + .lock() + .map_err(|_| OTelSdkError::InternalFailure("SimpleLogProcessor mutex poison".into())) + .and_then(|exporter| { + let log_tuple = &[(record as &SdkLogRecord, instrumentation)]; + futures_executor::block_on(exporter.export(LogBatch::new(log_tuple))) + }); + // Handle errors with specific static names + match result { + Err(OTelSdkError::InternalFailure(_)) => { + // logging as debug as this is not a user error + otel_debug!( + name: "SimpleLogProcessor.Emit.MutexPoisoning", + ); + } + Err(err) => { + otel_error!( + name: "SimpleLogProcessor.Emit.ExportError", + error = format!("{}",err) + ); + } + _ => {} + } + } + + fn force_flush(&self) -> OTelSdkResult { + Ok(()) + } + + fn shutdown(&self) -> OTelSdkResult { + self.is_shutdown + .store(true, std::sync::atomic::Ordering::Relaxed); + if let Ok(mut exporter) = self.exporter.lock() { + exporter.shutdown() + } else { + Err(OTelSdkError::InternalFailure( + "SimpleLogProcessor mutex poison at shutdown".into(), + )) + } + } + + fn set_resource(&self, resource: &Resource) { + if let Ok(mut exporter) = self.exporter.lock() { + exporter.set_resource(resource); + } + } +} + +#[cfg(all(test, feature = "testing", feature = "logs"))] +mod tests { + use crate::logs::log_processor::tests::MockLogExporter; + use crate::logs::{LogBatch, LogExporter, SdkLogRecord}; + use crate::{ + error::OTelSdkResult, + logs::{InMemoryLogExporterBuilder, LogProcessor, SdkLoggerProvider, SimpleLogProcessor}, + Resource, + }; + use opentelemetry::InstrumentationScope; + use opentelemetry::KeyValue; + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::{Arc, Mutex}; + use std::time::Duration; + + #[derive(Debug, Clone)] + struct LogExporterThatRequiresTokio { + export_count: Arc, + } + + impl LogExporterThatRequiresTokio { + /// Creates a new instance of `LogExporterThatRequiresTokio`. + fn new() -> Self { + LogExporterThatRequiresTokio { + export_count: Arc::new(AtomicUsize::new(0)), + } + } + + /// Returns the number of logs stored in the exporter. + fn len(&self) -> usize { + self.export_count.load(Ordering::Acquire) + } + } + + impl LogExporter for LogExporterThatRequiresTokio { + #[allow(clippy::manual_async_fn)] + fn export( + &self, + batch: LogBatch<'_>, + ) -> impl std::future::Future + Send { + // Simulate minimal dependency on tokio by sleeping asynchronously for a short duration + async move { + tokio::time::sleep(Duration::from_millis(50)).await; + + for _ in batch.iter() { + self.export_count.fetch_add(1, Ordering::Acquire); + } + Ok(()) + } + } + } + + #[test] + fn test_set_resource_simple_processor() { + let exporter = MockLogExporter { + resource: Arc::new(Mutex::new(None)), + }; + let processor = SimpleLogProcessor::new(exporter.clone()); + let _ = SdkLoggerProvider::builder() + .with_log_processor(processor) + .with_resource( + Resource::builder_empty() + .with_attributes([ + KeyValue::new("k1", "v1"), + KeyValue::new("k2", "v3"), + KeyValue::new("k3", "v3"), + KeyValue::new("k4", "v4"), + KeyValue::new("k5", "v5"), + ]) + .build(), + ) + .build(); + assert_eq!(exporter.get_resource().unwrap().into_iter().count(), 5); + } + + #[test] + fn test_simple_shutdown() { + let exporter = InMemoryLogExporterBuilder::default() + .keep_records_on_shutdown() + .build(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + processor.shutdown().unwrap(); + + let is_shutdown = processor + .is_shutdown + .load(std::sync::atomic::Ordering::Relaxed); + assert!(is_shutdown); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(1, exporter.get_emitted_logs().unwrap().len()) + } + + #[test] + fn test_simple_processor_sync_exporter_without_runtime() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn test_simple_processor_sync_exporter_with_runtime() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_simple_processor_sync_exporter_with_multi_thread_runtime() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = Arc::new(SimpleLogProcessor::new(exporter.clone())); + + let mut handles = vec![]; + for _ in 0..10 { + let processor_clone = Arc::clone(&processor); + let handle = tokio::spawn(async move { + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + processor_clone.emit(&mut record, &instrumentation); + }); + handles.push(handle); + } + + for handle in handles { + handle.await.unwrap(); + } + + assert_eq!(exporter.get_emitted_logs().unwrap().len(), 10); + } + + #[tokio::test(flavor = "current_thread")] + async fn test_simple_processor_sync_exporter_with_current_thread_runtime() { + let exporter = InMemoryLogExporterBuilder::default().build(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.get_emitted_logs().unwrap().len(), 1); + } + + #[test] + fn test_simple_processor_async_exporter_without_runtime() { + // Use `catch_unwind` to catch the panic caused by missing Tokio runtime + let result = std::panic::catch_unwind(|| { + let exporter = LogExporterThatRequiresTokio::new(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + + // This will panic because an tokio async operation within exporter without a runtime. + processor.emit(&mut record, &instrumentation); + }); + + // Verify that the panic occurred and check the panic message for the absence of a Tokio runtime + assert!( + result.is_err(), + "The test should fail due to missing Tokio runtime, but it did not." + ); + let panic_payload = result.unwrap_err(); + let panic_message = panic_payload + .downcast_ref::() + .map(|s| s.as_str()) + .or_else(|| panic_payload.downcast_ref::<&str>().copied()) + .unwrap_or("No panic message"); + + assert!( + panic_message.contains("no reactor running") + || panic_message.contains("must be called from the context of a Tokio 1.x runtime"), + "Expected panic message about missing Tokio runtime, but got: {}", + panic_message + ); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 4)] + #[ignore] + // This test demonstrates a potential deadlock scenario in a multi-threaded Tokio runtime. + // It spawns Tokio tasks equal to the number of runtime worker threads (4) to emit log events. + // Each task attempts to acquire a mutex on the exporter in `SimpleLogProcessor::emit`. + // Only one task obtains the lock, while the others are blocked, waiting for its release. + // + // The task holding the lock invokes the LogExporterThatRequiresTokio, which performs an + // asynchronous operation (e.g., network I/O simulated by `tokio::sleep`). This operation + // requires yielding control back to the Tokio runtime to make progress. + // + // However, all worker threads are occupied: + // - One thread is executing the async exporter operation + // - Three threads are blocked waiting for the mutex + // + // This leads to a deadlock as there are no available threads to drive the async operation + // to completion, preventing the mutex from being released. Consequently, neither the blocked + // tasks nor the exporter can proceed. + async fn test_simple_processor_async_exporter_with_all_runtime_worker_threads_blocked() { + let exporter = LogExporterThatRequiresTokio::new(); + let processor = Arc::new(SimpleLogProcessor::new(exporter.clone())); + + let concurrent_emit = 4; // number of worker threads + + let mut handles = vec![]; + // try send `concurrent_emit` events concurrently + for _ in 0..concurrent_emit { + let processor_clone = Arc::clone(&processor); + let handle = tokio::spawn(async move { + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + processor_clone.emit(&mut record, &instrumentation); + }); + handles.push(handle); + } + + // below code won't get executed + for handle in handles { + handle.await.unwrap(); + } + assert_eq!(exporter.len(), concurrent_emit); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + // This test uses a multi-threaded runtime setup with a single worker thread. Note that even + // though only one worker thread is created, it is distinct from the main thread. The processor + // emits a log event, and the exporter performs an async operation that requires the runtime. + // The single worker thread handles this operation without deadlocking, as long as no other + // tasks occupy the runtime. + async fn test_simple_processor_async_exporter_with_runtime() { + let exporter = LogExporterThatRequiresTokio::new(); + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.len(), 1); + } + + #[tokio::test(flavor = "multi_thread")] + // This test uses a multi-threaded runtime setup with the default number of worker threads. + // The processor emits a log event, and the exporter, which requires the runtime for its async + // operations, can access one of the available worker threads to complete its task. As there + // are multiple threads, the exporter can proceed without blocking other tasks, ensuring the + // test completes successfully. + async fn test_simple_processor_async_exporter_with_multi_thread_runtime() { + let exporter = LogExporterThatRequiresTokio::new(); + + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.len(), 1); + } + + #[tokio::test(flavor = "current_thread")] + #[ignore] + // This test uses a current-thread runtime, where all operations run on the main thread. + // The processor emits a log event while the runtime is blocked using `futures::block_on` + // to complete the export operation. The exporter, which performs an async operation and + // requires the runtime, cannot progress because the main thread is already blocked. + // This results in a deadlock, as the runtime cannot move forward. + async fn test_simple_processor_async_exporter_with_current_thread_runtime() { + let exporter = LogExporterThatRequiresTokio::new(); + + let processor = SimpleLogProcessor::new(exporter.clone()); + + let mut record: SdkLogRecord = SdkLogRecord::new(); + let instrumentation: InstrumentationScope = Default::default(); + + processor.emit(&mut record, &instrumentation); + + assert_eq!(exporter.len(), 1); + } +} diff --git a/opentelemetry-sdk/src/metrics/exporter.rs b/opentelemetry-sdk/src/metrics/exporter.rs index d657c7238b..ce13a69c6c 100644 --- a/opentelemetry-sdk/src/metrics/exporter.rs +++ b/opentelemetry-sdk/src/metrics/exporter.rs @@ -1,6 +1,4 @@ //! Interfaces for exporting metrics -use async_trait::async_trait; - use crate::error::OTelSdkResult; use crate::metrics::data::ResourceMetrics; @@ -10,17 +8,19 @@ use super::Temporality; /// Exporter handles the delivery of metric data to external receivers. /// /// This is the final component in the metric push pipeline. -#[async_trait] pub trait PushMetricExporter: Send + Sync + 'static { /// Export serializes and transmits metric data to a receiver. /// /// All retry logic must be contained in this function. The SDK does not /// implement any retry logic. All errors returned by this function are /// considered unrecoverable and will be logged. - async fn export(&self, metrics: &mut ResourceMetrics) -> OTelSdkResult; + fn export( + &self, + metrics: &mut ResourceMetrics, + ) -> impl std::future::Future + Send; /// Flushes any metric data held by an exporter. - async fn force_flush(&self) -> OTelSdkResult; + fn force_flush(&self) -> OTelSdkResult; /// Releases any held computational resources. /// diff --git a/opentelemetry-sdk/src/metrics/in_memory_exporter.rs b/opentelemetry-sdk/src/metrics/in_memory_exporter.rs index eeaf640c45..50f03edbf9 100644 --- a/opentelemetry-sdk/src/metrics/in_memory_exporter.rs +++ b/opentelemetry-sdk/src/metrics/in_memory_exporter.rs @@ -5,7 +5,6 @@ use crate::metrics::exporter::PushMetricExporter; use crate::metrics::MetricError; use crate::metrics::MetricResult; use crate::metrics::Temporality; -use async_trait::async_trait; use std::collections::VecDeque; use std::fmt; use std::sync::{Arc, Mutex}; @@ -263,7 +262,6 @@ impl InMemoryMetricExporter { } } -#[async_trait] impl PushMetricExporter for InMemoryMetricExporter { async fn export(&self, metrics: &mut ResourceMetrics) -> OTelSdkResult { self.metrics @@ -274,7 +272,7 @@ impl PushMetricExporter for InMemoryMetricExporter { .map_err(|_| OTelSdkError::InternalFailure("Failed to lock metrics".to_string())) } - async fn force_flush(&self) -> OTelSdkResult { + fn force_flush(&self) -> OTelSdkResult { Ok(()) // In this implementation, flush does nothing } diff --git a/opentelemetry-sdk/src/metrics/periodic_reader.rs b/opentelemetry-sdk/src/metrics/periodic_reader.rs index dda7f51fa2..79f4e9e802 100644 --- a/opentelemetry-sdk/src/metrics/periodic_reader.rs +++ b/opentelemetry-sdk/src/metrics/periodic_reader.rs @@ -59,7 +59,7 @@ where } /// Create a [PeriodicReader] with the given config. - pub fn build(self) -> PeriodicReader { + pub fn build(self) -> PeriodicReader { PeriodicReader::new(self.exporter, self.interval) } } @@ -124,24 +124,25 @@ where /// # drop(reader); /// # } /// ``` -#[derive(Clone)] -pub struct PeriodicReader { - inner: Arc, +pub struct PeriodicReader { + inner: Arc>, } -impl PeriodicReader { +impl Clone for PeriodicReader { + fn clone(&self) -> Self { + Self { + inner: Arc::clone(&self.inner), + } + } +} + +impl PeriodicReader { /// Configuration options for a periodic reader with own thread - pub fn builder(exporter: E) -> PeriodicReaderBuilder - where - E: PushMetricExporter, - { + pub fn builder(exporter: E) -> PeriodicReaderBuilder { PeriodicReaderBuilder::new(exporter) } - fn new(exporter: E, interval: Duration) -> Self - where - E: PushMetricExporter, - { + fn new(exporter: E, interval: Duration) -> Self { let (message_sender, message_receiver): (Sender, Receiver) = mpsc::channel(); let exporter_arc = Arc::new(exporter); @@ -333,19 +334,19 @@ impl PeriodicReader { } } -impl fmt::Debug for PeriodicReader { +impl fmt::Debug for PeriodicReader { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PeriodicReader").finish() } } -struct PeriodicReaderInner { - exporter: Arc, +struct PeriodicReaderInner { + exporter: Arc, message_sender: mpsc::Sender, producer: Mutex>>, } -impl PeriodicReaderInner { +impl PeriodicReaderInner { fn register_pipeline(&self, producer: Weak) { let mut inner = self.producer.lock().expect("lock poisoned"); *inner = Some(producer); @@ -472,7 +473,7 @@ enum Message { Shutdown(Sender), } -impl MetricReader for PeriodicReader { +impl MetricReader for PeriodicReader { fn register_pipeline(&self, pipeline: Weak) { self.inner.register_pipeline(pipeline); } @@ -516,7 +517,6 @@ mod tests { }, Resource, }; - use async_trait::async_trait; use opentelemetry::metrics::MeterProvider; use std::{ sync::{ @@ -548,7 +548,6 @@ mod tests { } } - #[async_trait] impl PushMetricExporter for MetricExporterThatFailsOnlyOnFirst { async fn export(&self, _metrics: &mut ResourceMetrics) -> OTelSdkResult { if self.count.fetch_add(1, Ordering::Relaxed) == 0 { @@ -558,7 +557,7 @@ mod tests { } } - async fn force_flush(&self) -> OTelSdkResult { + fn force_flush(&self) -> OTelSdkResult { Ok(()) } @@ -576,13 +575,12 @@ mod tests { is_shutdown: Arc, } - #[async_trait] impl PushMetricExporter for MockMetricExporter { async fn export(&self, _metrics: &mut ResourceMetrics) -> OTelSdkResult { Ok(()) } - async fn force_flush(&self) -> OTelSdkResult { + fn force_flush(&self) -> OTelSdkResult { Ok(()) } diff --git a/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs b/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs index 9315d38b91..5ba1de731f 100644 --- a/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs +++ b/opentelemetry-sdk/src/metrics/periodic_reader_with_async_runtime.rs @@ -103,10 +103,10 @@ where } /// Create a [PeriodicReader] with the given config. - pub fn build(self) -> PeriodicReader { + pub fn build(self) -> PeriodicReader { let (message_sender, message_receiver) = mpsc::channel(256); - let worker = move |reader: &PeriodicReader| { + let worker = move |reader: &PeriodicReader| { let runtime = self.runtime.clone(); let reader = reader.clone(); self.runtime.spawn(Box::pin(async move { @@ -184,33 +184,40 @@ where /// # drop(reader); /// # } /// ``` -#[derive(Clone)] -pub struct PeriodicReader { - exporter: Arc, - inner: Arc>, +pub struct PeriodicReader { + exporter: Arc, + inner: Arc>>, } -impl PeriodicReader { +impl Clone for PeriodicReader { + fn clone(&self) -> Self { + Self { + exporter: Arc::clone(&self.exporter), + inner: Arc::clone(&self.inner), + } + } +} + +impl PeriodicReader { /// Configuration options for a periodic reader - pub fn builder(exporter: E, runtime: RT) -> PeriodicReaderBuilder + pub fn builder(exporter: E, runtime: RT) -> PeriodicReaderBuilder where - E: PushMetricExporter, RT: Runtime, { PeriodicReaderBuilder::new(exporter, runtime) } } -impl fmt::Debug for PeriodicReader { +impl fmt::Debug for PeriodicReader { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PeriodicReader").finish() } } -struct PeriodicReaderInner { +struct PeriodicReaderInner { message_sender: mpsc::Sender, is_shutdown: bool, - sdk_producer_or_worker: ProducerOrWorker, + sdk_producer_or_worker: ProducerOrWorker, } #[derive(Debug)] @@ -220,19 +227,20 @@ enum Message { Shutdown(oneshot::Sender), } -enum ProducerOrWorker { +enum ProducerOrWorker { Producer(Weak), - Worker(Box), + #[allow(clippy::type_complexity)] + Worker(Box) + Send + Sync>), } -struct PeriodicReaderWorker { - reader: PeriodicReader, +struct PeriodicReaderWorker { + reader: PeriodicReader, timeout: Duration, runtime: RT, rm: ResourceMetrics, } -impl PeriodicReaderWorker { +impl PeriodicReaderWorker { async fn collect_and_export(&mut self) -> OTelSdkResult { self.reader .collect(&mut self.rm) @@ -323,7 +331,7 @@ impl PeriodicReaderWorker { } } -impl MetricReader for PeriodicReader { +impl MetricReader for PeriodicReader { fn register_pipeline(&self, pipeline: Weak) { let mut inner = match self.inner.lock() { Ok(guard) => guard, diff --git a/opentelemetry-sdk/src/trace/span_processor.rs b/opentelemetry-sdk/src/trace/span_processor.rs index ed981012c9..8ba2941fbf 100644 --- a/opentelemetry-sdk/src/trace/span_processor.rs +++ b/opentelemetry-sdk/src/trace/span_processor.rs @@ -1268,7 +1268,7 @@ mod tests { processor.on_end(span); } - tokio::time::sleep(Duration::from_millis(200)).await; + tokio::time::sleep(Duration::from_millis(1000)).await; let exported_spans = exporter_shared.lock().unwrap(); assert_eq!(exported_spans.len(), 4); @@ -1292,7 +1292,7 @@ mod tests { processor.on_end(span); } - tokio::time::sleep(Duration::from_millis(200)).await; + tokio::time::sleep(Duration::from_millis(1000)).await; let exported_spans = exporter_shared.lock().unwrap(); assert_eq!(exported_spans.len(), 4); @@ -1327,7 +1327,7 @@ mod tests { } // Allow time for batching and export - tokio::time::sleep(Duration::from_millis(200)).await; + tokio::time::sleep(Duration::from_millis(1000)).await; // Verify exported spans let exported_spans = exporter_shared.lock().unwrap(); diff --git a/opentelemetry-stdout/Cargo.toml b/opentelemetry-stdout/Cargo.toml index 80b539e642..48ad6bfafd 100644 --- a/opentelemetry-stdout/Cargo.toml +++ b/opentelemetry-stdout/Cargo.toml @@ -22,23 +22,19 @@ rustdoc-args = ["--cfg", "docsrs"] [features] default = ["trace", "metrics", "logs"] trace = ["opentelemetry/trace", "opentelemetry_sdk/trace", "futures-util"] -metrics = ["async-trait", "opentelemetry/metrics", "opentelemetry_sdk/metrics"] -logs = ["opentelemetry/logs", "opentelemetry_sdk/logs", "async-trait", "thiserror", "opentelemetry_sdk/spec_unstable_logs_enabled"] +metrics = ["opentelemetry/metrics", "opentelemetry_sdk/metrics"] +logs = ["opentelemetry/logs", "opentelemetry_sdk/logs", "opentelemetry_sdk/spec_unstable_logs_enabled"] [dependencies] -async-trait = { workspace = true, optional = true } chrono = { version = "0.4.34", default-features = false, features = ["now"] } -thiserror = { workspace = true, optional = true } futures-util = { workspace = true, optional = true } opentelemetry = { version = "0.28", path = "../opentelemetry" } opentelemetry_sdk = { version = "0.28", path = "../opentelemetry-sdk" } -serde = { workspace = true, features = ["derive"] } [dev-dependencies] opentelemetry = { path = "../opentelemetry", features = ["metrics"] } opentelemetry_sdk = { path = "../opentelemetry-sdk", features = ["rt-tokio", "metrics"] } opentelemetry-appender-tracing = { path = "../opentelemetry-appender-tracing"} -opentelemetry-semantic-conventions = { path = "../opentelemetry-semantic-conventions" } tracing = { workspace = true, features = ["std"]} tracing-subscriber = { workspace = true, features = ["registry", "std"] } tokio = { workspace = true, features = ["full"] } diff --git a/opentelemetry-stdout/src/logs/exporter.rs b/opentelemetry-stdout/src/logs/exporter.rs index 4acd45a5a7..518dbfe2a3 100644 --- a/opentelemetry-stdout/src/logs/exporter.rs +++ b/opentelemetry-stdout/src/logs/exporter.rs @@ -30,36 +30,30 @@ impl fmt::Debug for LogExporter { } impl opentelemetry_sdk::logs::LogExporter for LogExporter { - /// Export spans to stdout - #[allow(clippy::manual_async_fn)] - fn export( - &self, - batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - async move { - if self.is_shutdown.load(atomic::Ordering::SeqCst) { - Err(OTelSdkError::AlreadyShutdown) + /// Export logs to stdout + async fn export(&self, batch: LogBatch<'_>) -> OTelSdkResult { + if self.is_shutdown.load(atomic::Ordering::SeqCst) { + Err(OTelSdkError::AlreadyShutdown) + } else { + println!("Logs"); + if self + .resource_emitted + .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) + .is_err() + { + print_logs(batch); } else { - println!("Logs"); - if self - .resource_emitted - .compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst) - .is_err() - { - print_logs(batch); - } else { - println!("Resource"); - if let Some(schema_url) = self.resource.schema_url() { - println!("\t Resource SchemaUrl: {:?}", schema_url); - } - self.resource.iter().for_each(|(k, v)| { - println!("\t -> {}={:?}", k, v); - }); - print_logs(batch); + println!("Resource"); + if let Some(schema_url) = self.resource.schema_url() { + println!("\t Resource SchemaUrl: {:?}", schema_url); } - - Ok(()) + self.resource.iter().for_each(|(k, v)| { + println!("\t -> {}={:?}", k, v); + }); + print_logs(batch); } + + Ok(()) } } diff --git a/opentelemetry-stdout/src/metrics/exporter.rs b/opentelemetry-stdout/src/metrics/exporter.rs index 005be41d61..a30e649baf 100644 --- a/opentelemetry-stdout/src/metrics/exporter.rs +++ b/opentelemetry-stdout/src/metrics/exporter.rs @@ -1,4 +1,3 @@ -use async_trait::async_trait; use chrono::{DateTime, Utc}; use core::{f64, fmt}; use opentelemetry_sdk::metrics::Temporality; @@ -39,7 +38,6 @@ impl fmt::Debug for MetricExporter { } } -#[async_trait] impl PushMetricExporter for MetricExporter { /// Write Metrics to stdout async fn export(&self, metrics: &mut ResourceMetrics) -> OTelSdkResult { @@ -60,7 +58,7 @@ impl PushMetricExporter for MetricExporter { } } - async fn force_flush(&self) -> OTelSdkResult { + fn force_flush(&self) -> OTelSdkResult { // exporter holds no state, nothing to flush Ok(()) } diff --git a/stress/src/logs.rs b/stress/src/logs.rs index f1a7ae22cb..65c0943b35 100644 --- a/stress/src/logs.rs +++ b/stress/src/logs.rs @@ -24,11 +24,8 @@ mod throughput; struct MockLogExporter; impl LogExporter for MockLogExporter { - fn export( - &self, - _batch: LogBatch<'_>, - ) -> impl std::future::Future + Send { - async { Ok(()) } + async fn export(&self, _batch: LogBatch<'_>) -> OTelSdkResult { + Ok(()) } }