diff --git a/opentelemetry-api/Cargo.toml b/opentelemetry-api/Cargo.toml index 51f08b2553..5c20e0fbc2 100644 --- a/opentelemetry-api/Cargo.toml +++ b/opentelemetry-api/Cargo.toml @@ -29,3 +29,4 @@ default = ["trace"] trace = ["pin-project-lite"] metrics = ["fnv"] testing = ["trace"] +logs = [] \ No newline at end of file diff --git a/opentelemetry-api/src/common.rs b/opentelemetry-api/src/common.rs index 987865cd3f..216a7f0df8 100644 --- a/opentelemetry-api/src/common.rs +++ b/opentelemetry-api/src/common.rs @@ -99,6 +99,16 @@ impl From> for Key { } } +impl From> for Key { + /// Convert a `Cow<'static, str>` to a `Key` + fn from(string: Cow<'static, str>) -> Self { + match string { + Cow::Borrowed(s) => Key(OtelString::Static(s)), + Cow::Owned(s) => Key(OtelString::Owned(s)), + } + } +} + impl fmt::Debug for Key { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { self.0.fmt(fmt) diff --git a/opentelemetry-api/src/global/error_handler.rs b/opentelemetry-api/src/global/error_handler.rs index 5ff8783eea..290a8a2043 100644 --- a/opentelemetry-api/src/global/error_handler.rs +++ b/opentelemetry-api/src/global/error_handler.rs @@ -1,6 +1,8 @@ use std::sync::PoisonError; use std::sync::RwLock; +#[cfg(feature = "logs")] +use crate::logs::LogError; #[cfg(feature = "metrics")] use crate::metrics::MetricsError; #[cfg(feature = "trace")] @@ -23,6 +25,13 @@ pub enum Error { #[error(transparent)] /// An issue raised by the metrics module. Metric(#[from] MetricsError), + + #[cfg(feature = "logs")] + #[cfg_attr(docsrs, doc(cfg(feature = "logs")))] + #[error(transparent)] + /// Failed to export logs. + Log(#[from] LogError), + #[error("{0}")] /// Other types of failures not covered by the variants above. Other(String), @@ -49,6 +58,9 @@ pub fn handle_error>(err: T) { #[cfg(feature = "trace")] #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] Error::Trace(err) => eprintln!("OpenTelemetry trace error occurred. {}", err), + #[cfg(feature = "logs")] + #[cfg_attr(docsrs, doc(cfg(feature = "logs")))] + Error::Log(err) => eprintln!("OpenTelemetry log error occurred. {}", err), Error::Other(err_msg) => eprintln!("OpenTelemetry error occurred. {}", err_msg), }, } diff --git a/opentelemetry-api/src/global/logs.rs b/opentelemetry-api/src/global/logs.rs new file mode 100644 index 0000000000..986679a412 --- /dev/null +++ b/opentelemetry-api/src/global/logs.rs @@ -0,0 +1,158 @@ +use std::{ + borrow::Cow, + fmt, mem, + sync::{Arc, RwLock}, +}; + +use once_cell::sync::Lazy; + +use crate::{ + logs::{Logger, LoggerProvider, NoopLoggerProvider}, + KeyValue, +}; + +/// Allows a specific [`LoggerProvider`] to be used generically, by mirroring +/// the interface, and boxing the returned types. +/// +/// [`LoggerProvider`]: crate::logs::LoggerProvider. +pub trait ObjectSafeLoggerProvider { + /// Creates a versioned named [`Logger`] instance that is a trait object + /// through the underlying [`LoggerProvider`]. + /// + /// [`Logger`]: crate::logs::Logger + /// [`LoggerProvider`]: crate::logs::LoggerProvider + fn versioned_logger_boxed( + &self, + name: Cow<'static, str>, + version: Option>, + schema_url: Option>, + attributes: Option>, + include_trace_context: bool, + ) -> Box; +} + +impl ObjectSafeLoggerProvider for P +where + L: Logger + Send + Sync + 'static, + P: LoggerProvider, +{ + fn versioned_logger_boxed( + &self, + name: Cow<'static, str>, + version: Option>, + schema_url: Option>, + attributes: Option>, + include_trace_context: bool, + ) -> Box { + Box::new(self.versioned_logger( + name, + version, + schema_url, + attributes, + include_trace_context, + )) + } +} + +pub struct BoxedLogger(Box); + +impl fmt::Debug for BoxedLogger { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("BoxedLogger") + } +} + +impl Logger for BoxedLogger { + fn emit(&self, record: crate::logs::LogRecord) { + self.0.emit(record) + } +} + +#[derive(Clone)] +/// Represents the globally configured [`LoggerProvider`] instance. +pub struct GlobalLoggerProvider { + provider: Arc, +} + +impl fmt::Debug for GlobalLoggerProvider { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("GlobalLoggerProvider") + } +} + +impl GlobalLoggerProvider { + fn new< + L: Logger + Send + Sync + 'static, + P: LoggerProvider + Send + Sync + 'static, + >( + provider: P, + ) -> Self { + GlobalLoggerProvider { + provider: Arc::new(provider), + } + } +} + +impl LoggerProvider for GlobalLoggerProvider { + type Logger = BoxedLogger; + + fn versioned_logger( + &self, + name: impl Into>, + version: Option>, + schema_url: Option>, + attributes: Option>, + include_trace_context: bool, + ) -> Self::Logger { + BoxedLogger(self.provider.versioned_logger_boxed( + name.into(), + version, + schema_url, + attributes, + include_trace_context, + )) + } +} + +static GLOBAL_LOGGER_PROVIDER: Lazy> = + Lazy::new(|| RwLock::new(GlobalLoggerProvider::new(NoopLoggerProvider::new()))); + +/// Returns an instance of the currently configured global [`LoggerProvider`] +/// through [`GlobalLoggerProvider`]. +/// +/// [`LoggerProvider`]: crate::logs::LoggerProvider +pub fn logger_provider() -> GlobalLoggerProvider { + GLOBAL_LOGGER_PROVIDER + .read() + .expect("GLOBAL_LOGGER_PROVIDER RwLock poisoned") + .clone() +} + +/// Creates a named instance of [`Logger`] via the configured +/// [`GlobalLoggerProvider`]. +/// +/// If `name` is an empty string, the provider will use a default name. +/// +/// [`Logger`]: crate::logs::Logger +pub fn logger(name: Cow<'static, str>) -> BoxedLogger { + logger_provider().logger(name) +} + +/// Sets the given [`LoggerProvider`] instance as the current global provider, +/// returning the [`LoggerProvider`] instance that was previously set as global +/// provider. +pub fn set_logger_provider(new_provider: P) -> GlobalLoggerProvider +where + L: Logger + Send + Sync + 'static, + P: LoggerProvider + Send + Sync + 'static, +{ + let mut provider = GLOBAL_LOGGER_PROVIDER + .write() + .expect("GLOBAL_LOGGER_PROVIDER RwLock poisoned"); + mem::replace(&mut *provider, GlobalLoggerProvider::new(new_provider)) +} + +/// Shut down the current global [`LoggerProvider`]. +pub fn shutdown_logger_provider() { + let _ = set_logger_provider(NoopLoggerProvider::new()); +} diff --git a/opentelemetry-api/src/global/mod.rs b/opentelemetry-api/src/global/mod.rs index ec32099c6b..966f532637 100644 --- a/opentelemetry-api/src/global/mod.rs +++ b/opentelemetry-api/src/global/mod.rs @@ -142,6 +142,8 @@ //! [`set_meter_provider`]: crate::global::set_meter_provider mod error_handler; +#[cfg(feature = "logs")] +mod logs; #[cfg(feature = "metrics")] mod metrics; #[cfg(feature = "trace")] @@ -150,6 +152,12 @@ mod propagation; mod trace; pub use error_handler::{handle_error, set_error_handler, Error}; +#[cfg(feature = "logs")] +#[cfg_attr(docsrs, doc(cfg(feature = "logs")))] +pub use logs::{ + logger, logger_provider, set_logger_provider, shutdown_logger_provider, GlobalLoggerProvider, + ObjectSafeLoggerProvider, +}; #[cfg(feature = "metrics")] #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] pub use metrics::{ diff --git a/opentelemetry-api/src/lib.rs b/opentelemetry-api/src/lib.rs index e19182726f..45fecee53c 100644 --- a/opentelemetry-api/src/lib.rs +++ b/opentelemetry-api/src/lib.rs @@ -67,6 +67,10 @@ pub mod propagation; #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] pub mod trace; +#[cfg(feature = "logs")] +#[cfg_attr(docsrs, doc(cfg(feature = "logs")))] +pub mod logs; + #[doc(hidden)] #[cfg(any(feature = "metrics", feature = "trace"))] pub mod time { diff --git a/opentelemetry-api/src/logs/logger.rs b/opentelemetry-api/src/logs/logger.rs new file mode 100644 index 0000000000..04c68d334d --- /dev/null +++ b/opentelemetry-api/src/logs/logger.rs @@ -0,0 +1,52 @@ +use std::borrow::Cow; + +use crate::{logs::LogRecord, KeyValue}; + +/// The interface for emitting [`LogRecord`]s. +pub trait Logger { + /// Emit a [`LogRecord`]. If this `Logger` was created with + /// `include_trace_context` set to `true`, the logger will set the record's + /// [`TraceContext`] to the active trace context, using the current thread's + /// [`Context`]. + /// + /// [`Context`]: crate::Context + /// [`TraceContext`]: crate::logs::TraceContext + fn emit(&self, record: LogRecord); +} + +/// Interfaces that can create [`Logger`] instances. +pub trait LoggerProvider { + /// The [`Logger`] type that this provider will return. + type Logger: Logger; + + /// Returns a new versioned logger with a given name. + /// + /// The `name` should be the application name or the name of the library + /// providing instrumentation. If the name is empty, then an + /// implementation-defined default name may be used instead. + /// + /// If `include_trace_context` is `true`, the newly created [`Logger`] + /// should set the [`TraceContext`] associated with a record to the + /// current thread's active trace context, using [`Context`]. + /// + /// [`Context`]: crate::Context + /// [`TraceContext`]: crate::logs::TraceContext + + fn versioned_logger( + &self, + name: impl Into>, + version: Option>, + schema_url: Option>, + attributes: Option>, + include_trace_context: bool, + ) -> Self::Logger; + + /// Returns a new logger with the given name. + /// + /// The `name` should be the application name or the name of the library + /// providing instrumentation. If the name is empty, then an + /// implementation-defined default name may be used instead. + fn logger(&self, name: impl Into>) -> Self::Logger { + self.versioned_logger(name, None, None, None, true) + } +} diff --git a/opentelemetry-api/src/logs/mod.rs b/opentelemetry-api/src/logs/mod.rs new file mode 100644 index 0000000000..dae7a3c8d4 --- /dev/null +++ b/opentelemetry-api/src/logs/mod.rs @@ -0,0 +1,72 @@ +//! # OpenTelemetry Logs API + +use crate::ExportError; +use futures_channel::{mpsc::TrySendError, oneshot::Canceled}; +use std::time::Duration; +use thiserror::Error; + +mod logger; +mod noop; +mod record; + +pub use logger::{Logger, LoggerProvider}; +pub use noop::NoopLoggerProvider; +pub use record::{AnyValue, LogRecord, LogRecordBuilder, Severity, TraceContext}; + +/// Describe the result of operations in log SDK. +pub type LogResult = Result; + +#[derive(Error, Debug)] +#[non_exhaustive] +/// Errors returned by the log SDK. +pub enum LogError { + /// Export failed with the error returned by the exporter. + #[error("Exporter {} encountered the following errors: {0}", .0.exporter_name())] + ExportFailed(Box), + + /// Export failed to finish after certain period and processor stopped the export. + #[error("Exporter timed out after {} seconds", .0.as_secs())] + ExportTimedOut(Duration), + + /// Other errors propagated from log SDK that weren't covered above. + #[error(transparent)] + Other(#[from] Box), +} + +impl From for LogError +where + T: ExportError, +{ + fn from(err: T) -> Self { + LogError::ExportFailed(Box::new(err)) + } +} + +impl From> for LogError { + fn from(err: TrySendError) -> Self { + LogError::Other(Box::new(err.into_send_error())) + } +} + +impl From for LogError { + fn from(err: Canceled) -> Self { + LogError::Other(Box::new(err)) + } +} + +impl From for LogError { + fn from(err_msg: String) -> Self { + LogError::Other(Box::new(Custom(err_msg))) + } +} + +impl From<&'static str> for LogError { + fn from(err_msg: &'static str) -> Self { + LogError::Other(Box::new(Custom(err_msg.into()))) + } +} + +/// Wrap type for string +#[derive(Error, Debug)] +#[error("{0}")] +struct Custom(String); diff --git a/opentelemetry-api/src/logs/noop.rs b/opentelemetry-api/src/logs/noop.rs new file mode 100644 index 0000000000..172b8ad3bb --- /dev/null +++ b/opentelemetry-api/src/logs/noop.rs @@ -0,0 +1,40 @@ +use std::borrow::Cow; + +use crate::{ + logs::{LogRecord, Logger, LoggerProvider}, + KeyValue, +}; + +/// A no-op implementation of a [`LoggerProvider`]. +#[derive(Clone, Debug, Default)] +pub struct NoopLoggerProvider(()); + +impl NoopLoggerProvider { + /// Create a new no-op logger provider. + pub fn new() -> Self { + NoopLoggerProvider(()) + } +} + +impl LoggerProvider for NoopLoggerProvider { + type Logger = NoopLogger; + + fn versioned_logger( + &self, + _name: impl Into>, + _version: Option>, + _schema_url: Option>, + _attributes: Option>, + _include_trace_context: bool, + ) -> Self::Logger { + NoopLogger(()) + } +} + +/// A no-op implementation of a [`Logger`] +#[derive(Clone, Debug)] +pub struct NoopLogger(()); + +impl Logger for NoopLogger { + fn emit(&self, _record: LogRecord) {} +} diff --git a/opentelemetry-api/src/logs/record.rs b/opentelemetry-api/src/logs/record.rs new file mode 100644 index 0000000000..f0dbf6b319 --- /dev/null +++ b/opentelemetry-api/src/logs/record.rs @@ -0,0 +1,366 @@ +use crate::{ + trace::{OrderMap, SpanContext, SpanId, TraceContextExt, TraceFlags, TraceId}, + Array, Key, StringValue, Value, +}; +use std::{borrow::Cow, time::SystemTime}; + +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +/// LogRecord represents all data carried by a log record, and +/// is provided to `LogExporter`s as input. +pub struct LogRecord { + /// Record timestamp + pub timestamp: Option, + + /// Timestamp for when the record was observed by OpenTelemetry + pub observed_timestamp: Option, + + /// Trace context for logs associated with spans + pub trace_context: Option, + + /// The original severity string from the source + pub severity_text: Option>, + /// The corresponding severity value, normalized + pub severity_number: Option, + + /// Record body + pub body: Option, + + /// Additional attributes associated with this record + pub attributes: Option>, +} + +impl LogRecord { + /// Create a [`LogRecordBuilder`] to create a new Log Record + pub fn builder() -> LogRecordBuilder { + LogRecordBuilder::new() + } +} + +/// TraceContext stores the trace data for logs that have an associated +/// span. +#[derive(Debug, Clone)] +#[non_exhaustive] +pub struct TraceContext { + /// Trace id + pub trace_id: TraceId, + /// Span Id + pub span_id: SpanId, + /// Trace flags + pub trace_flags: Option, +} + +impl From<&SpanContext> for TraceContext { + fn from(span_context: &SpanContext) -> Self { + TraceContext { + trace_id: span_context.trace_id(), + span_id: span_context.span_id(), + trace_flags: Some(span_context.trace_flags()), + } + } +} + +/// Value types for representing arbitrary values in a log record. +#[derive(Debug, Clone)] +pub enum AnyValue { + /// An integer value + Int(i64), + /// A double value + Double(f64), + /// A string value + String(StringValue), + /// A boolean value + Boolean(bool), + /// A byte array + Bytes(Vec), + /// An array of `Any` values + ListAny(Vec), + /// A map of string keys to `Any` values, arbitrarily nested. + Map(OrderMap), +} + +macro_rules! impl_trivial_from { + ($t:ty, $variant:path) => { + impl From<$t> for AnyValue { + fn from(val: $t) -> AnyValue { + $variant(val.into()) + } + } + }; +} + +impl_trivial_from!(i8, AnyValue::Int); +impl_trivial_from!(i16, AnyValue::Int); +impl_trivial_from!(i32, AnyValue::Int); +impl_trivial_from!(i64, AnyValue::Int); + +impl_trivial_from!(u8, AnyValue::Int); +impl_trivial_from!(u16, AnyValue::Int); +impl_trivial_from!(u32, AnyValue::Int); + +impl_trivial_from!(f64, AnyValue::Double); +impl_trivial_from!(f32, AnyValue::Double); + +impl_trivial_from!(String, AnyValue::String); +impl_trivial_from!(Cow<'static, str>, AnyValue::String); +impl_trivial_from!(&'static str, AnyValue::String); +impl_trivial_from!(StringValue, AnyValue::String); + +impl_trivial_from!(bool, AnyValue::Boolean); + +impl> FromIterator for AnyValue { + /// Creates an [`AnyValue::ListAny`] value from a sequence of `Into` values. + fn from_iter>(iter: I) -> Self { + AnyValue::ListAny(iter.into_iter().map(Into::into).collect()) + } +} + +impl, V: Into> FromIterator<(K, V)> for AnyValue { + /// Creates an [`AnyValue::Map`] value from a sequence of key-value pairs + /// that can be converted into a `Key` and `AnyValue` respectively. + fn from_iter>(iter: I) -> Self { + AnyValue::Map(OrderMap::from_iter( + iter.into_iter().map(|(k, v)| (k.into(), v.into())), + )) + } +} + +impl From for AnyValue { + fn from(value: Value) -> Self { + match value { + Value::Bool(b) => b.into(), + Value::I64(i) => i.into(), + Value::F64(f) => f.into(), + Value::String(s) => s.into(), + Value::Array(a) => match a { + Array::Bool(b) => AnyValue::from_iter(b), + Array::F64(f) => AnyValue::from_iter(f), + Array::I64(i) => AnyValue::from_iter(i), + Array::String(s) => AnyValue::from_iter(s), + }, + } + } +} + +/// A normalized severity value. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord, PartialOrd)] +pub enum Severity { + /// TRACE + Trace = 1, + /// TRACE2 + Trace2 = 2, + /// TRACE3 + Trace3 = 3, + /// TRACE4 + Trace4 = 4, + /// DEBUG + Debug = 5, + /// DEBUG2 + Debug2 = 6, + /// DEBUG3 + Debug3 = 7, + /// DEBUG4 + Debug4 = 8, + /// INFO + Info = 9, + /// INFO2 + Info2 = 10, + /// INFO3 + Info3 = 11, + /// INFO4 + Info4 = 12, + /// WARN + Warn = 13, + /// WARN2 + Warn2 = 14, + /// WARN3 + Warn3 = 15, + /// WARN4 + Warn4 = 16, + /// ERROR + Error = 17, + /// ERROR2 + Error2 = 18, + /// ERROR3 + Error3 = 19, + /// ERROR4 + Error4 = 20, + /// FATAL + Fatal = 21, + /// FATAL2 + Fatal2 = 22, + /// FATAL3 + Fatal3 = 23, + /// FATAL4 + Fatal4 = 24, +} + +impl Severity { + /// Return the string representing the short name for the `Severity` + /// value as specified by the OpenTelemetry logs data model. + pub const fn name(&self) -> &'static str { + match &self { + Severity::Trace => "TRACE", + Severity::Trace2 => "TRACE2", + Severity::Trace3 => "TRACE3", + Severity::Trace4 => "TRACE4", + + Severity::Debug => "DEBUG", + Severity::Debug2 => "DEBUG2", + Severity::Debug3 => "DEBUG3", + Severity::Debug4 => "DEBUG4", + + Severity::Info => "INFO", + Severity::Info2 => "INFO2", + Severity::Info3 => "INFO3", + Severity::Info4 => "INFO4", + + Severity::Warn => "WARN", + Severity::Warn2 => "WARN2", + Severity::Warn3 => "WARN3", + Severity::Warn4 => "WARN4", + + Severity::Error => "ERROR", + Severity::Error2 => "ERROR2", + Severity::Error3 => "ERROR3", + Severity::Error4 => "ERROR4", + + Severity::Fatal => "FATAL", + Severity::Fatal2 => "FATAL2", + Severity::Fatal3 => "FATAL3", + Severity::Fatal4 => "FATAL4", + } + } +} + +/// A builder for [`LogRecord`] values. +#[derive(Debug, Clone)] +pub struct LogRecordBuilder { + record: LogRecord, +} + +impl LogRecordBuilder { + /// Create a new LogRecordBuilder + pub fn new() -> Self { + Self { + record: Default::default(), + } + } + + /// Assign timestamp + pub fn with_timestamp(self, timestamp: SystemTime) -> Self { + Self { + record: LogRecord { + timestamp: Some(timestamp), + ..self.record + }, + } + } + + /// Assign observed timestamp + pub fn with_observed_timestamp(self, timestamp: SystemTime) -> Self { + Self { + record: LogRecord { + observed_timestamp: Some(timestamp), + ..self.record + }, + } + } + + /// Assign the record's [`TraceContext`] + pub fn with_span_context(self, span_context: &SpanContext) -> Self { + Self { + record: LogRecord { + trace_context: Some(TraceContext { + span_id: span_context.span_id(), + trace_id: span_context.trace_id(), + trace_flags: Some(span_context.trace_flags()), + }), + ..self.record + }, + } + } + + /// Assign the record's [`TraceContext`] from a `TraceContextExt` trait + pub fn with_context(self, context: &T) -> Self + where + T: TraceContextExt, + { + if context.has_active_span() { + self.with_span_context(context.span().span_context()) + } else { + self + } + } + + /// Assign severity text + pub fn with_severity_text(self, severity: T) -> Self + where + T: Into>, + { + Self { + record: LogRecord { + severity_text: Some(severity.into()), + ..self.record + }, + } + } + + /// Assign severity number + pub fn with_severity_number(self, severity: Severity) -> Self { + Self { + record: LogRecord { + severity_number: Some(severity), + ..self.record + }, + } + } + + /// Assign body + pub fn with_body(self, body: AnyValue) -> Self { + Self { + record: LogRecord { + body: Some(body), + ..self.record + }, + } + } + + /// Assign attributes, overriding previously set attributes + pub fn with_attributes(self, attributes: OrderMap) -> Self { + Self { + record: LogRecord { + attributes: Some(attributes), + ..self.record + }, + } + } + + /// Set a single attribute for this record + pub fn with_attribute(mut self, key: K, value: V) -> Self + where + K: Into, + V: Into, + { + if let Some(ref mut map) = self.record.attributes { + map.insert(key.into(), value.into()); + } else { + let mut map = OrderMap::with_capacity(1); + map.insert(key.into(), value.into()); + self.record.attributes = Some(map); + } + + self + } + + /// Build the record, consuming the Builder + pub fn build(self) -> LogRecord { + self.record + } +} + +impl Default for LogRecordBuilder { + fn default() -> Self { + Self::new() + } +} diff --git a/opentelemetry-otlp/Cargo.toml b/opentelemetry-otlp/Cargo.toml index 9fbd124d80..cf61850479 100644 --- a/opentelemetry-otlp/Cargo.toml +++ b/opentelemetry-otlp/Cargo.toml @@ -62,6 +62,7 @@ tokio = { version = "1.0", features = ["macros", "rt-multi-thread"] } # telemetry pillars and functions trace = ["opentelemetry_api/trace", "opentelemetry_sdk/trace", "opentelemetry-proto/traces"] metrics = ["opentelemetry_api/metrics", "opentelemetry_sdk/metrics", "opentelemetry-proto/metrics", "grpc-tonic"] +logs = ["opentelemetry_api/logs", "opentelemetry_sdk/logs", "opentelemetry-proto/logs"] # add ons serialize = ["serde"] diff --git a/opentelemetry-otlp/src/lib.rs b/opentelemetry-otlp/src/lib.rs index ccb54d2c95..8fee1df2c0 100644 --- a/opentelemetry-otlp/src/lib.rs +++ b/opentelemetry-otlp/src/lib.rs @@ -181,6 +181,8 @@ #![cfg_attr(test, deny(warnings))] mod exporter; +#[cfg(feature = "logs")] +mod logs; #[cfg(feature = "metrics")] mod metric; #[cfg(feature = "trace")] @@ -200,6 +202,9 @@ pub use crate::metric::{ OTEL_EXPORTER_OTLP_METRICS_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT, }; +#[cfg(feature = "logs")] +pub use crate::logs::*; + pub use crate::exporter::{ HasExportConfig, WithExportConfig, OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_ENDPOINT_DEFAULT, OTEL_EXPORTER_OTLP_PROTOCOL, diff --git a/opentelemetry-otlp/src/logs.rs b/opentelemetry-otlp/src/logs.rs new file mode 100644 index 0000000000..8550704c33 --- /dev/null +++ b/opentelemetry-otlp/src/logs.rs @@ -0,0 +1,494 @@ +//! OTLP - Log Exporter +//! +//! Defines a [LogExporter] to send logs via the OpenTelemetry Protocol (OTLP) + +#[cfg(feature = "grpc-tonic")] +use { + crate::exporter::tonic::{TonicConfig, TonicExporterBuilder}, + opentelemetry_proto::tonic::collector::logs::v1::{ + logs_service_client::LogsServiceClient as TonicLogsServiceClient, + ExportLogsServiceRequest as TonicRequest, + }, + tonic::{ + metadata::{KeyAndValueRef, MetadataMap}, + transport::Channel as TonicChannel, + Request, + }, +}; + +#[cfg(feature = "grpc-sys")] +use { + crate::exporter::grpcio::{GrpcioConfig, GrpcioExporterBuilder}, + grpcio::{ + CallOption, Channel as GrpcChannel, ChannelBuilder, ChannelCredentialsBuilder, Environment, + MetadataBuilder, + }, + opentelemetry_proto::grpcio::{ + logs_service::ExportLogsServiceRequest as GrpcRequest, + logs_service_grpc::LogsServiceClient as GrpcioLogServiceClient, + }, + std::sync::Arc, +}; + +#[cfg(feature = "http-proto")] +use { + crate::exporter::http::{HttpConfig, HttpExporterBuilder}, + http::{ + header::{HeaderName, HeaderValue, CONTENT_TYPE}, + Method, Uri, + }, + opentelemetry_http::HttpClient, + opentelemetry_proto::tonic::collector::logs::v1::ExportLogsServiceRequest as ProstRequest, + prost::Message, + std::convert::TryFrom, +}; + +#[cfg(any(feature = "grpc-sys", feature = "http-proto"))] +use std::collections::HashMap; + +use crate::exporter::ExportConfig; +use crate::OtlpPipeline; +use async_trait::async_trait; +use std::{ + borrow::Cow, + fmt::{self, Debug}, + time::Duration, +}; + +use opentelemetry_api::logs::{LogError, LoggerProvider}; +use opentelemetry_sdk::{self, export::logs::LogData, logs::LogRuntime}; + +impl OtlpPipeline { + /// Create a OTLP logging pipeline. + pub fn logging(self) -> OtlpLogPipeline { + OtlpLogPipeline::default() + } +} + +/// OTLP log exporter builder +#[derive(Debug)] +#[allow(clippy::large_enum_variant)] +#[non_exhaustive] +pub enum LogExporterBuilder { + /// Tonic log exporter builder + #[cfg(feature = "grpc-tonic")] + Tonic(TonicExporterBuilder), + /// Grpc log exporter builder + #[cfg(feature = "grpc-sys")] + Grpcio(GrpcioExporterBuilder), + /// Http log exporter builder + #[cfg(feature = "http-proto")] + Http(HttpExporterBuilder), +} + +impl LogExporterBuilder { + /// Build a OTLP log exporter using the given configuration. + pub fn build_log_exporter(self) -> Result { + match self { + #[cfg(feature = "grpc-tonic")] + LogExporterBuilder::Tonic(builder) => Ok(match builder.channel { + Some(channel) => LogExporter::from_tonic_channel( + builder.exporter_config, + builder.tonic_config, + channel, + ), + None => LogExporter::new_tonic(builder.exporter_config, builder.tonic_config), + }?), + #[cfg(feature = "grpc-sys")] + LogExporterBuilder::Grpcio(builder) => Ok(LogExporter::new_grpcio( + builder.exporter_config, + builder.grpcio_config, + )), + #[cfg(feature = "http-proto")] + LogExporterBuilder::Http(builder) => Ok(LogExporter::new_http( + builder.exporter_config, + builder.http_config, + )?), + } + } +} + +#[cfg(feature = "grpc-tonic")] +impl From for LogExporterBuilder { + fn from(exporter: TonicExporterBuilder) -> Self { + LogExporterBuilder::Tonic(exporter) + } +} + +#[cfg(feature = "grpc-sys")] +impl From for LogExporterBuilder { + fn from(exporter: GrpcioExporterBuilder) -> Self { + LogExporterBuilder::Grpcio(exporter) + } +} + +#[cfg(feature = "http-proto")] +impl From for LogExporterBuilder { + fn from(exporter: HttpExporterBuilder) -> Self { + LogExporterBuilder::Http(exporter) + } +} + +/// OTLP exporter that sends log data +pub enum LogExporter { + #[cfg(feature = "grpc-tonic")] + /// Log Exporter using tonic as grpc layer. + Tonic { + /// Duration of timeout when sending logs to backend. + timeout: Duration, + /// Additional headers of the outbound requests. + metadata: Option, + /// The Grpc log exporter + log_exporter: TonicLogsServiceClient, + }, + #[cfg(feature = "grpc-sys")] + /// Log Exporter using grpcio as grpc layer + Grpcio { + /// Duration of timeout when sending logs to backend. + timeout: Duration, + /// Additional headers of the outbound requests. + headers: Option>, + /// The Grpc log exporter + log_exporter: GrpcioLogServiceClient, + }, + #[cfg(feature = "http-proto")] + /// Log Exporter using HTTP transport + Http { + /// Duration of timeout when sending logs to backend. + timeout: Duration, + /// Additional headers of the outbound requests. + headers: Option>, + /// The Collector URL + collector_endpoint: Uri, + /// The HTTP log exporter + log_exporter: Option>, + }, +} + +impl Debug for LogExporter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + #[cfg(feature = "grpc-tonic")] + LogExporter::Tonic { + metadata, timeout, .. + } => f + .debug_struct("Exporter") + .field("metadata", &metadata) + .field("timeout", &timeout) + .field("log_exporter", &"LogServiceClient") + .finish(), + #[cfg(feature = "grpc-sys")] + LogExporter::Grpcio { + headers, timeout, .. + } => f + .debug_struct("Exporter") + .field("headers", &headers) + .field("timeout", &timeout) + .field("log_exporter", &"LogServiceClient") + .finish(), + #[cfg(feature = "http-proto")] + LogExporter::Http { + headers, timeout, .. + } => f + .debug_struct("Exporter") + .field("headers", &headers) + .field("timeout", &timeout) + .field("log_exporter", &"LogServiceClient") + .finish(), + } + } +} + +impl LogExporter { + /// Builds a new log exporter with the given configuration. + #[cfg(feature = "grpc-tonic")] + pub fn new_tonic( + config: ExportConfig, + tonic_config: TonicConfig, + ) -> Result { + let endpoint = TonicChannel::from_shared(config.endpoint.clone())?; + + #[cfg(feature = "tls")] + let channel = match tonic_config.tls_config.as_ref() { + Some(tls_config) => endpoint.tls_config(tls_config.clone())?, + None => endpoint, + } + .timeout(config.timeout) + .connect_lazy(); + + #[cfg(not(feature = "tls"))] + let channel = endpoint.timeout(config.timeout).connect_lazy(); + + LogExporter::from_tonic_channel(config, tonic_config, channel) + } + + /// Builds a new log exporter with given tonic channel. + /// + /// This allows users to bring their own custom channel like UDS. + /// However, users MUST make sure the [`ExportConfig::timeout`] is + /// the same as the channel's timeout. + #[cfg(feature = "grpc-tonic")] + pub fn from_tonic_channel( + config: ExportConfig, + tonic_config: TonicConfig, + channel: tonic::transport::Channel, + ) -> Result { + Ok(LogExporter::Tonic { + timeout: config.timeout, + metadata: tonic_config.metadata, + log_exporter: TonicLogsServiceClient::new(channel), + }) + } + + /// Builds a new log exporter with the given configuration + #[cfg(feature = "grpc-sys")] + pub fn new_grpcio(config: ExportConfig, grpcio_config: GrpcioConfig) -> Self { + let mut builder: ChannelBuilder = ChannelBuilder::new(Arc::new(Environment::new( + grpcio_config.completion_queue_count, + ))); + + if let Some(compression) = grpcio_config.compression { + builder = builder.default_compression_algorithm(compression.into()); + } + + let channel: GrpcChannel = match (grpcio_config.credentials, grpcio_config.use_tls) { + (None, Some(true)) => builder + .set_credentials(ChannelCredentialsBuilder::new().build()) + .connect(config.endpoint.as_str()), + (None, _) => builder.connect(config.endpoint.as_str()), + (Some(credentials), _) => builder + .set_credentials( + ChannelCredentialsBuilder::new() + .cert(credentials.cert.into(), credentials.key.into()) + .build(), + ) + .connect(config.endpoint.as_str()), + }; + + LogExporter::Grpcio { + log_exporter: GrpcioLogServiceClient::new(channel), + timeout: config.timeout, + headers: grpcio_config.headers, + } + } + + /// Builds a new log exporter with the given configuration + #[cfg(feature = "http-proto")] + pub fn new_http(config: ExportConfig, http_config: HttpConfig) -> Result { + let url: Uri = config + .endpoint + .parse() + .map_err::(Into::into)?; + + Ok(LogExporter::Http { + log_exporter: http_config.client, + timeout: config.timeout, + collector_endpoint: url, + headers: http_config.headers, + }) + } +} + +#[async_trait] +impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { + async fn export(&mut self, batch: Vec) -> opentelemetry_api::logs::LogResult<()> { + match self { + #[cfg(feature = "grpc-sys")] + LogExporter::Grpcio { + timeout, + headers, + log_exporter, + } => { + let request = GrpcRequest { + resource_logs: protobuf::RepeatedField::from_vec( + batch.into_iter().map(Into::into).collect(), + ), + unknown_fields: Default::default(), + cached_size: Default::default(), + }; + + let mut call_options = CallOption::default().timeout(*timeout); + + if let Some(headers) = headers.clone() { + let mut metadata_builder: MetadataBuilder = MetadataBuilder::new(); + + for (key, value) in headers { + let _ = metadata_builder.add_str(key.as_str(), value.as_str()); + } + + call_options = call_options.headers(metadata_builder.build()); + } + + let receiver = log_exporter + .export_async_opt(&request, call_options) + .map_err::(Into::into)?; + receiver.await.map_err::(Into::into)?; + Ok(()) + } + #[cfg(feature = "grpc-tonic")] + LogExporter::Tonic { + log_exporter, + metadata, + .. + } => { + let mut request = Request::new(TonicRequest { + resource_logs: batch.into_iter().map(Into::into).collect(), + }); + + if let Some(metadata) = metadata { + for key_and_value in metadata.iter() { + match key_and_value { + KeyAndValueRef::Ascii(key, value) => { + request.metadata_mut().append(key, value.to_owned()) + } + KeyAndValueRef::Binary(key, value) => { + request.metadata_mut().append_bin(key, value.to_owned()) + } + }; + } + } + + log_exporter + .to_owned() + .export(request) + .await + .map_err::(Into::into)?; + + Ok(()) + } + + #[cfg(feature = "http-proto")] + LogExporter::Http { + log_exporter, + collector_endpoint, + headers, + .. + } => { + let req = ProstRequest { + resource_logs: batch.into_iter().map(Into::into).collect(), + }; + + let mut buf = vec![]; + req.encode(&mut buf) + .map_err::(Into::into)?; + + let mut request = http::Request::builder() + .method(Method::POST) + .uri(collector_endpoint.clone()) + .header(CONTENT_TYPE, "application/x-protobuf") + .body(buf) + .map_err::(Into::into)?; + + if let Some(headers) = headers.clone() { + for (k, val) in headers { + let value = HeaderValue::from_str(val.as_ref()) + .map_err::(Into::into)?; + let key = + HeaderName::try_from(&k).map_err::(Into::into)?; + request.headers_mut().insert(key, value); + } + } + + if let Some(client) = log_exporter { + client.send(request).await?; + Ok(()) + } else { + Err(crate::Error::NoHttpClient.into()) + } + } + } + } +} + +/// Recommended configuration for an OTLP exporter pipeline. +#[derive(Default, Debug)] +pub struct OtlpLogPipeline { + exporter_builder: Option, + log_config: Option, +} + +impl OtlpLogPipeline { + /// Set the OTLP log exporter builder. + pub fn with_exporter>(mut self, pipeline: B) -> Self { + self.exporter_builder = Some(pipeline.into()); + self + } + + /// Returns a [`Logger`] with the name `opentelemetry-otlp` and the + /// current crate version, using the configured log exporter. + /// + /// [`Logger`]: opentelemetry_sdk::logs::Logger + pub fn simple( + self, + include_trace_context: bool, + ) -> Result { + Ok(build_simple_with_exporter( + self.exporter_builder + .ok_or(crate::Error::NoExporterBuilder)? + .build_log_exporter()?, + self.log_config, + include_trace_context, + )) + } + + /// Returns a [`Logger`] with the name `opentelemetry-otlp` and the + /// current crate version, using the configured log exporter and a + /// batch log processor. + /// + /// [`Logger`]: opentelemetry_sdk::logs::Logger + pub fn batch( + self, + runtime: R, + include_trace_context: bool, + ) -> Result { + Ok(build_batch_with_exporter( + self.exporter_builder + .ok_or(crate::Error::NoExporterBuilder)? + .build_log_exporter()?, + self.log_config, + runtime, + include_trace_context, + )) + } +} + +fn build_simple_with_exporter( + exporter: LogExporter, + log_config: Option, + include_trace_context: bool, +) -> opentelemetry_sdk::logs::Logger { + let mut provider_builder = + opentelemetry_sdk::logs::LoggerProvider::builder().with_simple_exporter(exporter); + if let Some(config) = log_config { + provider_builder = provider_builder.with_config(config); + } + let provider = provider_builder.build(); + provider.versioned_logger( + Cow::Borrowed("opentelemetry-otlp"), + Some(Cow::Borrowed(env!("CARGO_PKG_VERSION"))), + None, + None, + include_trace_context, + ) +} + +fn build_batch_with_exporter( + exporter: LogExporter, + log_config: Option, + runtime: R, + include_trace_context: bool, +) -> opentelemetry_sdk::logs::Logger { + let mut provider_builder = + opentelemetry_sdk::logs::LoggerProvider::builder().with_batch_exporter(exporter, runtime); + if let Some(config) = log_config { + provider_builder = provider_builder.with_config(config); + } + let provider = provider_builder.build(); + provider.versioned_logger( + Cow::Borrowed("opentelemetry-otlp"), + Some(Cow::Borrowed("CARGO_PKG_VERSION")), + None, + None, + include_trace_context, + ) +} diff --git a/opentelemetry-proto/Cargo.toml b/opentelemetry-proto/Cargo.toml index 19669233b8..41d94fe4cc 100644 --- a/opentelemetry-proto/Cargo.toml +++ b/opentelemetry-proto/Cargo.toml @@ -47,8 +47,8 @@ grpcio = { version = "0.12", optional = true } tonic = { version = "0.9.0", optional = true } prost = { version = "0.11.0", optional = true } protobuf = { version = "2.18", optional = true } # todo: update to 3.0 so we have docs for generated types. -opentelemetry_api = { version = "0.19", default-features = false, features = ["trace", "metrics"], path = "../opentelemetry-api" } -opentelemetry_sdk = { version = "0.19", default-features = false, features = ["trace", "metrics"], path = "../opentelemetry-sdk" } +opentelemetry_api = { version = "0.19", default-features = false, features = ["trace", "metrics", "logs"], path = "../opentelemetry-api" } +opentelemetry_sdk = { version = "0.19", default-features = false, features = ["trace", "metrics", "logs"], path = "../opentelemetry-sdk" } futures = { version = "0.3", default-features = false, features = ["std"] } futures-util = { version = "0.3", default-features = false, features = ["std"] } serde = { version = "1.0", optional = true } diff --git a/opentelemetry-proto/src/proto.rs b/opentelemetry-proto/src/proto.rs index 5a16c8298b..dd8f0d99aa 100644 --- a/opentelemetry-proto/src/proto.rs +++ b/opentelemetry-proto/src/proto.rs @@ -72,6 +72,12 @@ pub mod tonic { /// Generated files using [`grpcio`](https://docs.rs/crate/grpcio) and [`protobuf`](https://docs.rs/crate/protobuf/latest) pub mod grpcio { pub mod common; + #[cfg(feature = "logs")] + pub mod logs; + #[cfg(feature = "logs")] + pub mod logs_service; + #[cfg(feature = "logs")] + pub mod logs_service_grpc; #[cfg(feature = "metrics")] pub mod metrics; #[cfg(feature = "metrics")] diff --git a/opentelemetry-proto/src/proto/grpcio/logs.rs b/opentelemetry-proto/src/proto/grpcio/logs.rs new file mode 100644 index 0000000000..2fa6bd55f3 --- /dev/null +++ b/opentelemetry-proto/src/proto/grpcio/logs.rs @@ -0,0 +1,1481 @@ +// This file is generated by rust-protobuf 2.28.0. Do not edit +// @generated + +// https://github.com/rust-lang/rust-clippy/issues/702 +#![allow(unknown_lints)] +#![allow(clippy::all)] + +#![allow(unused_attributes)] +#![cfg_attr(rustfmt, rustfmt::skip)] + +#![allow(box_pointers)] +#![allow(dead_code)] +#![allow(missing_docs)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(trivial_casts)] +#![allow(unused_imports)] +#![allow(unused_results)] +//! Generated file from `opentelemetry/proto/logs/v1/logs.proto` + +/// Generated files are compatible only with the same version +/// of protobuf runtime. +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; + +#[derive(PartialEq,Clone,Default)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct LogsData { + // message fields + pub resource_logs: ::protobuf::RepeatedField, + // special fields + #[cfg_attr(feature = "with-serde", serde(skip))] + pub unknown_fields: ::protobuf::UnknownFields, + #[cfg_attr(feature = "with-serde", serde(skip))] + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a LogsData { + fn default() -> &'a LogsData { + ::default_instance() + } +} + +impl LogsData { + pub fn new() -> LogsData { + ::std::default::Default::default() + } + + // repeated .opentelemetry.proto.logs.v1.ResourceLogs resource_logs = 1; + + + pub fn get_resource_logs(&self) -> &[ResourceLogs] { + &self.resource_logs + } + pub fn clear_resource_logs(&mut self) { + self.resource_logs.clear(); + } + + // Param is passed by value, moved + pub fn set_resource_logs(&mut self, v: ::protobuf::RepeatedField) { + self.resource_logs = v; + } + + // Mutable pointer to the field. + pub fn mut_resource_logs(&mut self) -> &mut ::protobuf::RepeatedField { + &mut self.resource_logs + } + + // Take field + pub fn take_resource_logs(&mut self) -> ::protobuf::RepeatedField { + ::std::mem::replace(&mut self.resource_logs, ::protobuf::RepeatedField::new()) + } +} + +impl ::protobuf::Message for LogsData { + fn is_initialized(&self) -> bool { + for v in &self.resource_logs { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.resource_logs)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + for value in &self.resource_logs { + let len = value.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }; + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + for v in &self.resource_logs { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }; + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> LogsData { + LogsData::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "resource_logs", + |m: &LogsData| { &m.resource_logs }, + |m: &mut LogsData| { &mut m.resource_logs }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "LogsData", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static LogsData { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(LogsData::new) + } +} + +impl ::protobuf::Clear for LogsData { + fn clear(&mut self) { + self.resource_logs.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for LogsData { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for LogsData { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct ResourceLogs { + // message fields + pub resource: ::protobuf::SingularPtrField, + pub scope_logs: ::protobuf::RepeatedField, + pub schema_url: ::std::string::String, + // special fields + #[cfg_attr(feature = "with-serde", serde(skip))] + pub unknown_fields: ::protobuf::UnknownFields, + #[cfg_attr(feature = "with-serde", serde(skip))] + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a ResourceLogs { + fn default() -> &'a ResourceLogs { + ::default_instance() + } +} + +impl ResourceLogs { + pub fn new() -> ResourceLogs { + ::std::default::Default::default() + } + + // .opentelemetry.proto.resource.v1.Resource resource = 1; + + + pub fn get_resource(&self) -> &super::resource::Resource { + self.resource.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_resource(&mut self) { + self.resource.clear(); + } + + pub fn has_resource(&self) -> bool { + self.resource.is_some() + } + + // Param is passed by value, moved + pub fn set_resource(&mut self, v: super::resource::Resource) { + self.resource = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_resource(&mut self) -> &mut super::resource::Resource { + if self.resource.is_none() { + self.resource.set_default(); + } + self.resource.as_mut().unwrap() + } + + // Take field + pub fn take_resource(&mut self) -> super::resource::Resource { + self.resource.take().unwrap_or_else(|| super::resource::Resource::new()) + } + + // repeated .opentelemetry.proto.logs.v1.ScopeLogs scope_logs = 2; + + + pub fn get_scope_logs(&self) -> &[ScopeLogs] { + &self.scope_logs + } + pub fn clear_scope_logs(&mut self) { + self.scope_logs.clear(); + } + + // Param is passed by value, moved + pub fn set_scope_logs(&mut self, v: ::protobuf::RepeatedField) { + self.scope_logs = v; + } + + // Mutable pointer to the field. + pub fn mut_scope_logs(&mut self) -> &mut ::protobuf::RepeatedField { + &mut self.scope_logs + } + + // Take field + pub fn take_scope_logs(&mut self) -> ::protobuf::RepeatedField { + ::std::mem::replace(&mut self.scope_logs, ::protobuf::RepeatedField::new()) + } + + // string schema_url = 3; + + + pub fn get_schema_url(&self) -> &str { + &self.schema_url + } + pub fn clear_schema_url(&mut self) { + self.schema_url.clear(); + } + + // Param is passed by value, moved + pub fn set_schema_url(&mut self, v: ::std::string::String) { + self.schema_url = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_schema_url(&mut self) -> &mut ::std::string::String { + &mut self.schema_url + } + + // Take field + pub fn take_schema_url(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.schema_url, ::std::string::String::new()) + } +} + +impl ::protobuf::Message for ResourceLogs { + fn is_initialized(&self) -> bool { + for v in &self.resource { + if !v.is_initialized() { + return false; + } + }; + for v in &self.scope_logs { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.resource)?; + }, + 2 => { + ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.scope_logs)?; + }, + 3 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.schema_url)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if let Some(ref v) = self.resource.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + for value in &self.scope_logs { + let len = value.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }; + if !self.schema_url.is_empty() { + my_size += ::protobuf::rt::string_size(3, &self.schema_url); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if let Some(ref v) = self.resource.as_ref() { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + for v in &self.scope_logs { + os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }; + if !self.schema_url.is_empty() { + os.write_string(3, &self.schema_url)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> ResourceLogs { + ResourceLogs::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "resource", + |m: &ResourceLogs| { &m.resource }, + |m: &mut ResourceLogs| { &mut m.resource }, + )); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "scope_logs", + |m: &ResourceLogs| { &m.scope_logs }, + |m: &mut ResourceLogs| { &mut m.scope_logs }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "schema_url", + |m: &ResourceLogs| { &m.schema_url }, + |m: &mut ResourceLogs| { &mut m.schema_url }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "ResourceLogs", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static ResourceLogs { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(ResourceLogs::new) + } +} + +impl ::protobuf::Clear for ResourceLogs { + fn clear(&mut self) { + self.resource.clear(); + self.scope_logs.clear(); + self.schema_url.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for ResourceLogs { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for ResourceLogs { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct ScopeLogs { + // message fields + pub scope: ::protobuf::SingularPtrField, + pub log_records: ::protobuf::RepeatedField, + pub schema_url: ::std::string::String, + // special fields + #[cfg_attr(feature = "with-serde", serde(skip))] + pub unknown_fields: ::protobuf::UnknownFields, + #[cfg_attr(feature = "with-serde", serde(skip))] + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a ScopeLogs { + fn default() -> &'a ScopeLogs { + ::default_instance() + } +} + +impl ScopeLogs { + pub fn new() -> ScopeLogs { + ::std::default::Default::default() + } + + // .opentelemetry.proto.common.v1.InstrumentationScope scope = 1; + + + pub fn get_scope(&self) -> &super::common::InstrumentationScope { + self.scope.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_scope(&mut self) { + self.scope.clear(); + } + + pub fn has_scope(&self) -> bool { + self.scope.is_some() + } + + // Param is passed by value, moved + pub fn set_scope(&mut self, v: super::common::InstrumentationScope) { + self.scope = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_scope(&mut self) -> &mut super::common::InstrumentationScope { + if self.scope.is_none() { + self.scope.set_default(); + } + self.scope.as_mut().unwrap() + } + + // Take field + pub fn take_scope(&mut self) -> super::common::InstrumentationScope { + self.scope.take().unwrap_or_else(|| super::common::InstrumentationScope::new()) + } + + // repeated .opentelemetry.proto.logs.v1.LogRecord log_records = 2; + + + pub fn get_log_records(&self) -> &[LogRecord] { + &self.log_records + } + pub fn clear_log_records(&mut self) { + self.log_records.clear(); + } + + // Param is passed by value, moved + pub fn set_log_records(&mut self, v: ::protobuf::RepeatedField) { + self.log_records = v; + } + + // Mutable pointer to the field. + pub fn mut_log_records(&mut self) -> &mut ::protobuf::RepeatedField { + &mut self.log_records + } + + // Take field + pub fn take_log_records(&mut self) -> ::protobuf::RepeatedField { + ::std::mem::replace(&mut self.log_records, ::protobuf::RepeatedField::new()) + } + + // string schema_url = 3; + + + pub fn get_schema_url(&self) -> &str { + &self.schema_url + } + pub fn clear_schema_url(&mut self) { + self.schema_url.clear(); + } + + // Param is passed by value, moved + pub fn set_schema_url(&mut self, v: ::std::string::String) { + self.schema_url = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_schema_url(&mut self) -> &mut ::std::string::String { + &mut self.schema_url + } + + // Take field + pub fn take_schema_url(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.schema_url, ::std::string::String::new()) + } +} + +impl ::protobuf::Message for ScopeLogs { + fn is_initialized(&self) -> bool { + for v in &self.scope { + if !v.is_initialized() { + return false; + } + }; + for v in &self.log_records { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.scope)?; + }, + 2 => { + ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.log_records)?; + }, + 3 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.schema_url)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if let Some(ref v) = self.scope.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + for value in &self.log_records { + let len = value.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }; + if !self.schema_url.is_empty() { + my_size += ::protobuf::rt::string_size(3, &self.schema_url); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if let Some(ref v) = self.scope.as_ref() { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + for v in &self.log_records { + os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }; + if !self.schema_url.is_empty() { + os.write_string(3, &self.schema_url)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> ScopeLogs { + ScopeLogs::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "scope", + |m: &ScopeLogs| { &m.scope }, + |m: &mut ScopeLogs| { &mut m.scope }, + )); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "log_records", + |m: &ScopeLogs| { &m.log_records }, + |m: &mut ScopeLogs| { &mut m.log_records }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "schema_url", + |m: &ScopeLogs| { &m.schema_url }, + |m: &mut ScopeLogs| { &mut m.schema_url }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "ScopeLogs", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static ScopeLogs { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(ScopeLogs::new) + } +} + +impl ::protobuf::Clear for ScopeLogs { + fn clear(&mut self) { + self.scope.clear(); + self.log_records.clear(); + self.schema_url.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for ScopeLogs { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for ScopeLogs { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct LogRecord { + // message fields + pub time_unix_nano: u64, + pub observed_time_unix_nano: u64, + pub severity_number: SeverityNumber, + pub severity_text: ::std::string::String, + pub body: ::protobuf::SingularPtrField, + pub attributes: ::protobuf::RepeatedField, + pub dropped_attributes_count: u32, + pub flags: u32, + pub trace_id: ::std::vec::Vec, + pub span_id: ::std::vec::Vec, + // special fields + #[cfg_attr(feature = "with-serde", serde(skip))] + pub unknown_fields: ::protobuf::UnknownFields, + #[cfg_attr(feature = "with-serde", serde(skip))] + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a LogRecord { + fn default() -> &'a LogRecord { + ::default_instance() + } +} + +impl LogRecord { + pub fn new() -> LogRecord { + ::std::default::Default::default() + } + + // fixed64 time_unix_nano = 1; + + + pub fn get_time_unix_nano(&self) -> u64 { + self.time_unix_nano + } + pub fn clear_time_unix_nano(&mut self) { + self.time_unix_nano = 0; + } + + // Param is passed by value, moved + pub fn set_time_unix_nano(&mut self, v: u64) { + self.time_unix_nano = v; + } + + // fixed64 observed_time_unix_nano = 11; + + + pub fn get_observed_time_unix_nano(&self) -> u64 { + self.observed_time_unix_nano + } + pub fn clear_observed_time_unix_nano(&mut self) { + self.observed_time_unix_nano = 0; + } + + // Param is passed by value, moved + pub fn set_observed_time_unix_nano(&mut self, v: u64) { + self.observed_time_unix_nano = v; + } + + // .opentelemetry.proto.logs.v1.SeverityNumber severity_number = 2; + + + pub fn get_severity_number(&self) -> SeverityNumber { + self.severity_number + } + pub fn clear_severity_number(&mut self) { + self.severity_number = SeverityNumber::SEVERITY_NUMBER_UNSPECIFIED; + } + + // Param is passed by value, moved + pub fn set_severity_number(&mut self, v: SeverityNumber) { + self.severity_number = v; + } + + // string severity_text = 3; + + + pub fn get_severity_text(&self) -> &str { + &self.severity_text + } + pub fn clear_severity_text(&mut self) { + self.severity_text.clear(); + } + + // Param is passed by value, moved + pub fn set_severity_text(&mut self, v: ::std::string::String) { + self.severity_text = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_severity_text(&mut self) -> &mut ::std::string::String { + &mut self.severity_text + } + + // Take field + pub fn take_severity_text(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.severity_text, ::std::string::String::new()) + } + + // .opentelemetry.proto.common.v1.AnyValue body = 5; + + + pub fn get_body(&self) -> &super::common::AnyValue { + self.body.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_body(&mut self) { + self.body.clear(); + } + + pub fn has_body(&self) -> bool { + self.body.is_some() + } + + // Param is passed by value, moved + pub fn set_body(&mut self, v: super::common::AnyValue) { + self.body = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_body(&mut self) -> &mut super::common::AnyValue { + if self.body.is_none() { + self.body.set_default(); + } + self.body.as_mut().unwrap() + } + + // Take field + pub fn take_body(&mut self) -> super::common::AnyValue { + self.body.take().unwrap_or_else(|| super::common::AnyValue::new()) + } + + // repeated .opentelemetry.proto.common.v1.KeyValue attributes = 6; + + + pub fn get_attributes(&self) -> &[super::common::KeyValue] { + &self.attributes + } + pub fn clear_attributes(&mut self) { + self.attributes.clear(); + } + + // Param is passed by value, moved + pub fn set_attributes(&mut self, v: ::protobuf::RepeatedField) { + self.attributes = v; + } + + // Mutable pointer to the field. + pub fn mut_attributes(&mut self) -> &mut ::protobuf::RepeatedField { + &mut self.attributes + } + + // Take field + pub fn take_attributes(&mut self) -> ::protobuf::RepeatedField { + ::std::mem::replace(&mut self.attributes, ::protobuf::RepeatedField::new()) + } + + // uint32 dropped_attributes_count = 7; + + + pub fn get_dropped_attributes_count(&self) -> u32 { + self.dropped_attributes_count + } + pub fn clear_dropped_attributes_count(&mut self) { + self.dropped_attributes_count = 0; + } + + // Param is passed by value, moved + pub fn set_dropped_attributes_count(&mut self, v: u32) { + self.dropped_attributes_count = v; + } + + // fixed32 flags = 8; + + + pub fn get_flags(&self) -> u32 { + self.flags + } + pub fn clear_flags(&mut self) { + self.flags = 0; + } + + // Param is passed by value, moved + pub fn set_flags(&mut self, v: u32) { + self.flags = v; + } + + // bytes trace_id = 9; + + + pub fn get_trace_id(&self) -> &[u8] { + &self.trace_id + } + pub fn clear_trace_id(&mut self) { + self.trace_id.clear(); + } + + // Param is passed by value, moved + pub fn set_trace_id(&mut self, v: ::std::vec::Vec) { + self.trace_id = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_trace_id(&mut self) -> &mut ::std::vec::Vec { + &mut self.trace_id + } + + // Take field + pub fn take_trace_id(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.trace_id, ::std::vec::Vec::new()) + } + + // bytes span_id = 10; + + + pub fn get_span_id(&self) -> &[u8] { + &self.span_id + } + pub fn clear_span_id(&mut self) { + self.span_id.clear(); + } + + // Param is passed by value, moved + pub fn set_span_id(&mut self, v: ::std::vec::Vec) { + self.span_id = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_span_id(&mut self) -> &mut ::std::vec::Vec { + &mut self.span_id + } + + // Take field + pub fn take_span_id(&mut self) -> ::std::vec::Vec { + ::std::mem::replace(&mut self.span_id, ::std::vec::Vec::new()) + } +} + +impl ::protobuf::Message for LogRecord { + fn is_initialized(&self) -> bool { + for v in &self.body { + if !v.is_initialized() { + return false; + } + }; + for v in &self.attributes { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + if wire_type != ::protobuf::wire_format::WireTypeFixed64 { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_fixed64()?; + self.time_unix_nano = tmp; + }, + 11 => { + if wire_type != ::protobuf::wire_format::WireTypeFixed64 { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_fixed64()?; + self.observed_time_unix_nano = tmp; + }, + 2 => { + ::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.severity_number, 2, &mut self.unknown_fields)? + }, + 3 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.severity_text)?; + }, + 5 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.body)?; + }, + 6 => { + ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.attributes)?; + }, + 7 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_uint32()?; + self.dropped_attributes_count = tmp; + }, + 8 => { + if wire_type != ::protobuf::wire_format::WireTypeFixed32 { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_fixed32()?; + self.flags = tmp; + }, + 9 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.trace_id)?; + }, + 10 => { + ::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.span_id)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if self.time_unix_nano != 0 { + my_size += 9; + } + if self.observed_time_unix_nano != 0 { + my_size += 9; + } + if self.severity_number != SeverityNumber::SEVERITY_NUMBER_UNSPECIFIED { + my_size += ::protobuf::rt::enum_size(2, self.severity_number); + } + if !self.severity_text.is_empty() { + my_size += ::protobuf::rt::string_size(3, &self.severity_text); + } + if let Some(ref v) = self.body.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + for value in &self.attributes { + let len = value.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }; + if self.dropped_attributes_count != 0 { + my_size += ::protobuf::rt::value_size(7, self.dropped_attributes_count, ::protobuf::wire_format::WireTypeVarint); + } + if self.flags != 0 { + my_size += 5; + } + if !self.trace_id.is_empty() { + my_size += ::protobuf::rt::bytes_size(9, &self.trace_id); + } + if !self.span_id.is_empty() { + my_size += ::protobuf::rt::bytes_size(10, &self.span_id); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if self.time_unix_nano != 0 { + os.write_fixed64(1, self.time_unix_nano)?; + } + if self.observed_time_unix_nano != 0 { + os.write_fixed64(11, self.observed_time_unix_nano)?; + } + if self.severity_number != SeverityNumber::SEVERITY_NUMBER_UNSPECIFIED { + os.write_enum(2, ::protobuf::ProtobufEnum::value(&self.severity_number))?; + } + if !self.severity_text.is_empty() { + os.write_string(3, &self.severity_text)?; + } + if let Some(ref v) = self.body.as_ref() { + os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + for v in &self.attributes { + os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }; + if self.dropped_attributes_count != 0 { + os.write_uint32(7, self.dropped_attributes_count)?; + } + if self.flags != 0 { + os.write_fixed32(8, self.flags)?; + } + if !self.trace_id.is_empty() { + os.write_bytes(9, &self.trace_id)?; + } + if !self.span_id.is_empty() { + os.write_bytes(10, &self.span_id)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> LogRecord { + LogRecord::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeFixed64>( + "time_unix_nano", + |m: &LogRecord| { &m.time_unix_nano }, + |m: &mut LogRecord| { &mut m.time_unix_nano }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeFixed64>( + "observed_time_unix_nano", + |m: &LogRecord| { &m.observed_time_unix_nano }, + |m: &mut LogRecord| { &mut m.observed_time_unix_nano }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum>( + "severity_number", + |m: &LogRecord| { &m.severity_number }, + |m: &mut LogRecord| { &mut m.severity_number }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "severity_text", + |m: &LogRecord| { &m.severity_text }, + |m: &mut LogRecord| { &mut m.severity_text }, + )); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "body", + |m: &LogRecord| { &m.body }, + |m: &mut LogRecord| { &mut m.body }, + )); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "attributes", + |m: &LogRecord| { &m.attributes }, + |m: &mut LogRecord| { &mut m.attributes }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeUint32>( + "dropped_attributes_count", + |m: &LogRecord| { &m.dropped_attributes_count }, + |m: &mut LogRecord| { &mut m.dropped_attributes_count }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeFixed32>( + "flags", + |m: &LogRecord| { &m.flags }, + |m: &mut LogRecord| { &mut m.flags }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "trace_id", + |m: &LogRecord| { &m.trace_id }, + |m: &mut LogRecord| { &mut m.trace_id }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>( + "span_id", + |m: &LogRecord| { &m.span_id }, + |m: &mut LogRecord| { &mut m.span_id }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "LogRecord", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static LogRecord { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(LogRecord::new) + } +} + +impl ::protobuf::Clear for LogRecord { + fn clear(&mut self) { + self.time_unix_nano = 0; + self.observed_time_unix_nano = 0; + self.severity_number = SeverityNumber::SEVERITY_NUMBER_UNSPECIFIED; + self.severity_text.clear(); + self.body.clear(); + self.attributes.clear(); + self.dropped_attributes_count = 0; + self.flags = 0; + self.trace_id.clear(); + self.span_id.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for LogRecord { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for LogRecord { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(Clone,PartialEq,Eq,Debug,Hash)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub enum SeverityNumber { + SEVERITY_NUMBER_UNSPECIFIED = 0, + SEVERITY_NUMBER_TRACE = 1, + SEVERITY_NUMBER_TRACE2 = 2, + SEVERITY_NUMBER_TRACE3 = 3, + SEVERITY_NUMBER_TRACE4 = 4, + SEVERITY_NUMBER_DEBUG = 5, + SEVERITY_NUMBER_DEBUG2 = 6, + SEVERITY_NUMBER_DEBUG3 = 7, + SEVERITY_NUMBER_DEBUG4 = 8, + SEVERITY_NUMBER_INFO = 9, + SEVERITY_NUMBER_INFO2 = 10, + SEVERITY_NUMBER_INFO3 = 11, + SEVERITY_NUMBER_INFO4 = 12, + SEVERITY_NUMBER_WARN = 13, + SEVERITY_NUMBER_WARN2 = 14, + SEVERITY_NUMBER_WARN3 = 15, + SEVERITY_NUMBER_WARN4 = 16, + SEVERITY_NUMBER_ERROR = 17, + SEVERITY_NUMBER_ERROR2 = 18, + SEVERITY_NUMBER_ERROR3 = 19, + SEVERITY_NUMBER_ERROR4 = 20, + SEVERITY_NUMBER_FATAL = 21, + SEVERITY_NUMBER_FATAL2 = 22, + SEVERITY_NUMBER_FATAL3 = 23, + SEVERITY_NUMBER_FATAL4 = 24, +} + +impl ::protobuf::ProtobufEnum for SeverityNumber { + fn value(&self) -> i32 { + *self as i32 + } + + fn from_i32(value: i32) -> ::std::option::Option { + match value { + 0 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_UNSPECIFIED), + 1 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_TRACE), + 2 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_TRACE2), + 3 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_TRACE3), + 4 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_TRACE4), + 5 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_DEBUG), + 6 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_DEBUG2), + 7 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_DEBUG3), + 8 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_DEBUG4), + 9 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_INFO), + 10 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_INFO2), + 11 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_INFO3), + 12 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_INFO4), + 13 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_WARN), + 14 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_WARN2), + 15 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_WARN3), + 16 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_WARN4), + 17 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_ERROR), + 18 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_ERROR2), + 19 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_ERROR3), + 20 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_ERROR4), + 21 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_FATAL), + 22 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_FATAL2), + 23 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_FATAL3), + 24 => ::std::option::Option::Some(SeverityNumber::SEVERITY_NUMBER_FATAL4), + _ => ::std::option::Option::None + } + } + + fn values() -> &'static [Self] { + static values: &'static [SeverityNumber] = &[ + SeverityNumber::SEVERITY_NUMBER_UNSPECIFIED, + SeverityNumber::SEVERITY_NUMBER_TRACE, + SeverityNumber::SEVERITY_NUMBER_TRACE2, + SeverityNumber::SEVERITY_NUMBER_TRACE3, + SeverityNumber::SEVERITY_NUMBER_TRACE4, + SeverityNumber::SEVERITY_NUMBER_DEBUG, + SeverityNumber::SEVERITY_NUMBER_DEBUG2, + SeverityNumber::SEVERITY_NUMBER_DEBUG3, + SeverityNumber::SEVERITY_NUMBER_DEBUG4, + SeverityNumber::SEVERITY_NUMBER_INFO, + SeverityNumber::SEVERITY_NUMBER_INFO2, + SeverityNumber::SEVERITY_NUMBER_INFO3, + SeverityNumber::SEVERITY_NUMBER_INFO4, + SeverityNumber::SEVERITY_NUMBER_WARN, + SeverityNumber::SEVERITY_NUMBER_WARN2, + SeverityNumber::SEVERITY_NUMBER_WARN3, + SeverityNumber::SEVERITY_NUMBER_WARN4, + SeverityNumber::SEVERITY_NUMBER_ERROR, + SeverityNumber::SEVERITY_NUMBER_ERROR2, + SeverityNumber::SEVERITY_NUMBER_ERROR3, + SeverityNumber::SEVERITY_NUMBER_ERROR4, + SeverityNumber::SEVERITY_NUMBER_FATAL, + SeverityNumber::SEVERITY_NUMBER_FATAL2, + SeverityNumber::SEVERITY_NUMBER_FATAL3, + SeverityNumber::SEVERITY_NUMBER_FATAL4, + ]; + values + } + + fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + ::protobuf::reflect::EnumDescriptor::new_pb_name::("SeverityNumber", file_descriptor_proto()) + }) + } +} + +impl ::std::marker::Copy for SeverityNumber { +} + +impl ::std::default::Default for SeverityNumber { + fn default() -> Self { + SeverityNumber::SEVERITY_NUMBER_UNSPECIFIED + } +} + +impl ::protobuf::reflect::ProtobufValue for SeverityNumber { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self)) + } +} + +#[derive(Clone,PartialEq,Eq,Debug,Hash)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub enum LogRecordFlags { + LOG_RECORD_FLAG_UNSPECIFIED = 0, + LOG_RECORD_FLAG_TRACE_FLAGS_MASK = 255, +} + +impl ::protobuf::ProtobufEnum for LogRecordFlags { + fn value(&self) -> i32 { + *self as i32 + } + + fn from_i32(value: i32) -> ::std::option::Option { + match value { + 0 => ::std::option::Option::Some(LogRecordFlags::LOG_RECORD_FLAG_UNSPECIFIED), + 255 => ::std::option::Option::Some(LogRecordFlags::LOG_RECORD_FLAG_TRACE_FLAGS_MASK), + _ => ::std::option::Option::None + } + } + + fn values() -> &'static [Self] { + static values: &'static [LogRecordFlags] = &[ + LogRecordFlags::LOG_RECORD_FLAG_UNSPECIFIED, + LogRecordFlags::LOG_RECORD_FLAG_TRACE_FLAGS_MASK, + ]; + values + } + + fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + ::protobuf::reflect::EnumDescriptor::new_pb_name::("LogRecordFlags", file_descriptor_proto()) + }) + } +} + +impl ::std::marker::Copy for LogRecordFlags { +} + +impl ::std::default::Default for LogRecordFlags { + fn default() -> Self { + LogRecordFlags::LOG_RECORD_FLAG_UNSPECIFIED + } +} + +impl ::protobuf::reflect::ProtobufValue for LogRecordFlags { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self)) + } +} + +static file_descriptor_proto_data: &'static [u8] = b"\ + \n&opentelemetry/proto/logs/v1/logs.proto\x12\x1bopentelemetry.proto.log\ + s.v1\x1a*opentelemetry/proto/common/v1/common.proto\x1a.opentelemetry/pr\ + oto/resource/v1/resource.proto\"Z\n\x08LogsData\x12N\n\rresource_logs\ + \x18\x01\x20\x03(\x0b2).opentelemetry.proto.logs.v1.ResourceLogsR\x0cres\ + ourceLogs\"\xc3\x01\n\x0cResourceLogs\x12E\n\x08resource\x18\x01\x20\x01\ + (\x0b2).opentelemetry.proto.resource.v1.ResourceR\x08resource\x12E\n\nsc\ + ope_logs\x18\x02\x20\x03(\x0b2&.opentelemetry.proto.logs.v1.ScopeLogsR\t\ + scopeLogs\x12\x1d\n\nschema_url\x18\x03\x20\x01(\tR\tschemaUrlJ\x06\x08\ + \xe8\x07\x10\xe9\x07\"\xbe\x01\n\tScopeLogs\x12I\n\x05scope\x18\x01\x20\ + \x01(\x0b23.opentelemetry.proto.common.v1.InstrumentationScopeR\x05scope\ + \x12G\n\x0blog_records\x18\x02\x20\x03(\x0b2&.opentelemetry.proto.logs.v\ + 1.LogRecordR\nlogRecords\x12\x1d\n\nschema_url\x18\x03\x20\x01(\tR\tsche\ + maUrl\"\xf3\x03\n\tLogRecord\x12$\n\x0etime_unix_nano\x18\x01\x20\x01(\ + \x06R\x0ctimeUnixNano\x125\n\x17observed_time_unix_nano\x18\x0b\x20\x01(\ + \x06R\x14observedTimeUnixNano\x12T\n\x0fseverity_number\x18\x02\x20\x01(\ + \x0e2+.opentelemetry.proto.logs.v1.SeverityNumberR\x0eseverityNumber\x12\ + #\n\rseverity_text\x18\x03\x20\x01(\tR\x0cseverityText\x12;\n\x04body\ + \x18\x05\x20\x01(\x0b2'.opentelemetry.proto.common.v1.AnyValueR\x04body\ + \x12G\n\nattributes\x18\x06\x20\x03(\x0b2'.opentelemetry.proto.common.v1\ + .KeyValueR\nattributes\x128\n\x18dropped_attributes_count\x18\x07\x20\ + \x01(\rR\x16droppedAttributesCount\x12\x14\n\x05flags\x18\x08\x20\x01(\ + \x07R\x05flags\x12\x19\n\x08trace_id\x18\t\x20\x01(\x0cR\x07traceId\x12\ + \x17\n\x07span_id\x18\n\x20\x01(\x0cR\x06spanIdJ\x04\x08\x04\x10\x05*\ + \xc3\x05\n\x0eSeverityNumber\x12\x1f\n\x1bSEVERITY_NUMBER_UNSPECIFIED\ + \x10\0\x12\x19\n\x15SEVERITY_NUMBER_TRACE\x10\x01\x12\x1a\n\x16SEVERITY_\ + NUMBER_TRACE2\x10\x02\x12\x1a\n\x16SEVERITY_NUMBER_TRACE3\x10\x03\x12\ + \x1a\n\x16SEVERITY_NUMBER_TRACE4\x10\x04\x12\x19\n\x15SEVERITY_NUMBER_DE\ + BUG\x10\x05\x12\x1a\n\x16SEVERITY_NUMBER_DEBUG2\x10\x06\x12\x1a\n\x16SEV\ + ERITY_NUMBER_DEBUG3\x10\x07\x12\x1a\n\x16SEVERITY_NUMBER_DEBUG4\x10\x08\ + \x12\x18\n\x14SEVERITY_NUMBER_INFO\x10\t\x12\x19\n\x15SEVERITY_NUMBER_IN\ + FO2\x10\n\x12\x19\n\x15SEVERITY_NUMBER_INFO3\x10\x0b\x12\x19\n\x15SEVERI\ + TY_NUMBER_INFO4\x10\x0c\x12\x18\n\x14SEVERITY_NUMBER_WARN\x10\r\x12\x19\ + \n\x15SEVERITY_NUMBER_WARN2\x10\x0e\x12\x19\n\x15SEVERITY_NUMBER_WARN3\ + \x10\x0f\x12\x19\n\x15SEVERITY_NUMBER_WARN4\x10\x10\x12\x19\n\x15SEVERIT\ + Y_NUMBER_ERROR\x10\x11\x12\x1a\n\x16SEVERITY_NUMBER_ERROR2\x10\x12\x12\ + \x1a\n\x16SEVERITY_NUMBER_ERROR3\x10\x13\x12\x1a\n\x16SEVERITY_NUMBER_ER\ + ROR4\x10\x14\x12\x19\n\x15SEVERITY_NUMBER_FATAL\x10\x15\x12\x1a\n\x16SEV\ + ERITY_NUMBER_FATAL2\x10\x16\x12\x1a\n\x16SEVERITY_NUMBER_FATAL3\x10\x17\ + \x12\x1a\n\x16SEVERITY_NUMBER_FATAL4\x10\x18*X\n\x0eLogRecordFlags\x12\ + \x1f\n\x1bLOG_RECORD_FLAG_UNSPECIFIED\x10\0\x12%\n\x20LOG_RECORD_FLAG_TR\ + ACE_FLAGS_MASK\x10\xff\x01Bs\n\x1eio.opentelemetry.proto.logs.v1B\tLogsP\ + rotoP\x01Z&go.opentelemetry.io/proto/otlp/logs/v1\xaa\x02\x1bOpenTelemet\ + ry.Proto.Logs.V1b\x06proto3\ +"; + +static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; + +fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { + ::protobuf::Message::parse_from_bytes(file_descriptor_proto_data).unwrap() +} + +pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { + file_descriptor_proto_lazy.get(|| { + parse_descriptor_proto() + }) +} diff --git a/opentelemetry-proto/src/proto/grpcio/logs_service.rs b/opentelemetry-proto/src/proto/grpcio/logs_service.rs new file mode 100644 index 0000000000..81010efb2c --- /dev/null +++ b/opentelemetry-proto/src/proto/grpcio/logs_service.rs @@ -0,0 +1,596 @@ +// This file is generated by rust-protobuf 2.28.0. Do not edit +// @generated + +// https://github.com/rust-lang/rust-clippy/issues/702 +#![allow(unknown_lints)] +#![allow(clippy::all)] + +#![allow(unused_attributes)] +#![cfg_attr(rustfmt, rustfmt::skip)] + +#![allow(box_pointers)] +#![allow(dead_code)] +#![allow(missing_docs)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(trivial_casts)] +#![allow(unused_imports)] +#![allow(unused_results)] +//! Generated file from `opentelemetry/proto/collector/logs/v1/logs_service.proto` + +/// Generated files are compatible only with the same version +/// of protobuf runtime. +// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_28_0; + +#[derive(PartialEq,Clone,Default)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct ExportLogsServiceRequest { + // message fields + pub resource_logs: ::protobuf::RepeatedField, + // special fields + #[cfg_attr(feature = "with-serde", serde(skip))] + pub unknown_fields: ::protobuf::UnknownFields, + #[cfg_attr(feature = "with-serde", serde(skip))] + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a ExportLogsServiceRequest { + fn default() -> &'a ExportLogsServiceRequest { + ::default_instance() + } +} + +impl ExportLogsServiceRequest { + pub fn new() -> ExportLogsServiceRequest { + ::std::default::Default::default() + } + + // repeated .opentelemetry.proto.logs.v1.ResourceLogs resource_logs = 1; + + + pub fn get_resource_logs(&self) -> &[super::logs::ResourceLogs] { + &self.resource_logs + } + pub fn clear_resource_logs(&mut self) { + self.resource_logs.clear(); + } + + // Param is passed by value, moved + pub fn set_resource_logs(&mut self, v: ::protobuf::RepeatedField) { + self.resource_logs = v; + } + + // Mutable pointer to the field. + pub fn mut_resource_logs(&mut self) -> &mut ::protobuf::RepeatedField { + &mut self.resource_logs + } + + // Take field + pub fn take_resource_logs(&mut self) -> ::protobuf::RepeatedField { + ::std::mem::replace(&mut self.resource_logs, ::protobuf::RepeatedField::new()) + } +} + +impl ::protobuf::Message for ExportLogsServiceRequest { + fn is_initialized(&self) -> bool { + for v in &self.resource_logs { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.resource_logs)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + for value in &self.resource_logs { + let len = value.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + }; + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + for v in &self.resource_logs { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + }; + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> ExportLogsServiceRequest { + ExportLogsServiceRequest::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "resource_logs", + |m: &ExportLogsServiceRequest| { &m.resource_logs }, + |m: &mut ExportLogsServiceRequest| { &mut m.resource_logs }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "ExportLogsServiceRequest", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static ExportLogsServiceRequest { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(ExportLogsServiceRequest::new) + } +} + +impl ::protobuf::Clear for ExportLogsServiceRequest { + fn clear(&mut self) { + self.resource_logs.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for ExportLogsServiceRequest { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for ExportLogsServiceRequest { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct ExportLogsServiceResponse { + // message fields + pub partial_success: ::protobuf::SingularPtrField, + // special fields + #[cfg_attr(feature = "with-serde", serde(skip))] + pub unknown_fields: ::protobuf::UnknownFields, + #[cfg_attr(feature = "with-serde", serde(skip))] + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a ExportLogsServiceResponse { + fn default() -> &'a ExportLogsServiceResponse { + ::default_instance() + } +} + +impl ExportLogsServiceResponse { + pub fn new() -> ExportLogsServiceResponse { + ::std::default::Default::default() + } + + // .opentelemetry.proto.collector.logs.v1.ExportLogsPartialSuccess partial_success = 1; + + + pub fn get_partial_success(&self) -> &ExportLogsPartialSuccess { + self.partial_success.as_ref().unwrap_or_else(|| ::default_instance()) + } + pub fn clear_partial_success(&mut self) { + self.partial_success.clear(); + } + + pub fn has_partial_success(&self) -> bool { + self.partial_success.is_some() + } + + // Param is passed by value, moved + pub fn set_partial_success(&mut self, v: ExportLogsPartialSuccess) { + self.partial_success = ::protobuf::SingularPtrField::some(v); + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_partial_success(&mut self) -> &mut ExportLogsPartialSuccess { + if self.partial_success.is_none() { + self.partial_success.set_default(); + } + self.partial_success.as_mut().unwrap() + } + + // Take field + pub fn take_partial_success(&mut self) -> ExportLogsPartialSuccess { + self.partial_success.take().unwrap_or_else(|| ExportLogsPartialSuccess::new()) + } +} + +impl ::protobuf::Message for ExportLogsServiceResponse { + fn is_initialized(&self) -> bool { + for v in &self.partial_success { + if !v.is_initialized() { + return false; + } + }; + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + ::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.partial_success)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if let Some(ref v) = self.partial_success.as_ref() { + let len = v.compute_size(); + my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len; + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if let Some(ref v) = self.partial_success.as_ref() { + os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?; + os.write_raw_varint32(v.get_cached_size())?; + v.write_to_with_cached_sizes(os)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> ExportLogsServiceResponse { + ExportLogsServiceResponse::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage>( + "partial_success", + |m: &ExportLogsServiceResponse| { &m.partial_success }, + |m: &mut ExportLogsServiceResponse| { &mut m.partial_success }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "ExportLogsServiceResponse", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static ExportLogsServiceResponse { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(ExportLogsServiceResponse::new) + } +} + +impl ::protobuf::Clear for ExportLogsServiceResponse { + fn clear(&mut self) { + self.partial_success.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for ExportLogsServiceResponse { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for ExportLogsServiceResponse { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +#[derive(PartialEq,Clone,Default)] +#[cfg_attr(feature = "with-serde", derive(::serde::Serialize, ::serde::Deserialize))] +pub struct ExportLogsPartialSuccess { + // message fields + pub rejected_log_records: i64, + pub error_message: ::std::string::String, + // special fields + #[cfg_attr(feature = "with-serde", serde(skip))] + pub unknown_fields: ::protobuf::UnknownFields, + #[cfg_attr(feature = "with-serde", serde(skip))] + pub cached_size: ::protobuf::CachedSize, +} + +impl<'a> ::std::default::Default for &'a ExportLogsPartialSuccess { + fn default() -> &'a ExportLogsPartialSuccess { + ::default_instance() + } +} + +impl ExportLogsPartialSuccess { + pub fn new() -> ExportLogsPartialSuccess { + ::std::default::Default::default() + } + + // int64 rejected_log_records = 1; + + + pub fn get_rejected_log_records(&self) -> i64 { + self.rejected_log_records + } + pub fn clear_rejected_log_records(&mut self) { + self.rejected_log_records = 0; + } + + // Param is passed by value, moved + pub fn set_rejected_log_records(&mut self, v: i64) { + self.rejected_log_records = v; + } + + // string error_message = 2; + + + pub fn get_error_message(&self) -> &str { + &self.error_message + } + pub fn clear_error_message(&mut self) { + self.error_message.clear(); + } + + // Param is passed by value, moved + pub fn set_error_message(&mut self, v: ::std::string::String) { + self.error_message = v; + } + + // Mutable pointer to the field. + // If field is not initialized, it is initialized with default value first. + pub fn mut_error_message(&mut self) -> &mut ::std::string::String { + &mut self.error_message + } + + // Take field + pub fn take_error_message(&mut self) -> ::std::string::String { + ::std::mem::replace(&mut self.error_message, ::std::string::String::new()) + } +} + +impl ::protobuf::Message for ExportLogsPartialSuccess { + fn is_initialized(&self) -> bool { + true + } + + fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> { + while !is.eof()? { + let (field_number, wire_type) = is.read_tag_unpack()?; + match field_number { + 1 => { + if wire_type != ::protobuf::wire_format::WireTypeVarint { + return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type)); + } + let tmp = is.read_int64()?; + self.rejected_log_records = tmp; + }, + 2 => { + ::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.error_message)?; + }, + _ => { + ::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?; + }, + }; + } + ::std::result::Result::Ok(()) + } + + // Compute sizes of nested messages + #[allow(unused_variables)] + fn compute_size(&self) -> u32 { + let mut my_size = 0; + if self.rejected_log_records != 0 { + my_size += ::protobuf::rt::value_size(1, self.rejected_log_records, ::protobuf::wire_format::WireTypeVarint); + } + if !self.error_message.is_empty() { + my_size += ::protobuf::rt::string_size(2, &self.error_message); + } + my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields()); + self.cached_size.set(my_size); + my_size + } + + fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> { + if self.rejected_log_records != 0 { + os.write_int64(1, self.rejected_log_records)?; + } + if !self.error_message.is_empty() { + os.write_string(2, &self.error_message)?; + } + os.write_unknown_fields(self.get_unknown_fields())?; + ::std::result::Result::Ok(()) + } + + fn get_cached_size(&self) -> u32 { + self.cached_size.get() + } + + fn get_unknown_fields(&self) -> &::protobuf::UnknownFields { + &self.unknown_fields + } + + fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields { + &mut self.unknown_fields + } + + fn as_any(&self) -> &dyn (::std::any::Any) { + self as &dyn (::std::any::Any) + } + fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) { + self as &mut dyn (::std::any::Any) + } + fn into_any(self: ::std::boxed::Box) -> ::std::boxed::Box { + self + } + + fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor { + Self::descriptor_static() + } + + fn new() -> ExportLogsPartialSuccess { + ExportLogsPartialSuccess::new() + } + + fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor { + static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT; + descriptor.get(|| { + let mut fields = ::std::vec::Vec::new(); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>( + "rejected_log_records", + |m: &ExportLogsPartialSuccess| { &m.rejected_log_records }, + |m: &mut ExportLogsPartialSuccess| { &mut m.rejected_log_records }, + )); + fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>( + "error_message", + |m: &ExportLogsPartialSuccess| { &m.error_message }, + |m: &mut ExportLogsPartialSuccess| { &mut m.error_message }, + )); + ::protobuf::reflect::MessageDescriptor::new_pb_name::( + "ExportLogsPartialSuccess", + fields, + file_descriptor_proto() + ) + }) + } + + fn default_instance() -> &'static ExportLogsPartialSuccess { + static instance: ::protobuf::rt::LazyV2 = ::protobuf::rt::LazyV2::INIT; + instance.get(ExportLogsPartialSuccess::new) + } +} + +impl ::protobuf::Clear for ExportLogsPartialSuccess { + fn clear(&mut self) { + self.rejected_log_records = 0; + self.error_message.clear(); + self.unknown_fields.clear(); + } +} + +impl ::std::fmt::Debug for ExportLogsPartialSuccess { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + ::protobuf::text_format::fmt(self, f) + } +} + +impl ::protobuf::reflect::ProtobufValue for ExportLogsPartialSuccess { + fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef { + ::protobuf::reflect::ReflectValueRef::Message(self) + } +} + +static file_descriptor_proto_data: &'static [u8] = b"\ + \n8opentelemetry/proto/collector/logs/v1/logs_service.proto\x12%opentele\ + metry.proto.collector.logs.v1\x1a&opentelemetry/proto/logs/v1/logs.proto\ + \"j\n\x18ExportLogsServiceRequest\x12N\n\rresource_logs\x18\x01\x20\x03(\ + \x0b2).opentelemetry.proto.logs.v1.ResourceLogsR\x0cresourceLogs\"\x85\ + \x01\n\x19ExportLogsServiceResponse\x12h\n\x0fpartial_success\x18\x01\ + \x20\x01(\x0b2?.opentelemetry.proto.collector.logs.v1.ExportLogsPartialS\ + uccessR\x0epartialSuccess\"q\n\x18ExportLogsPartialSuccess\x120\n\x14rej\ + ected_log_records\x18\x01\x20\x01(\x03R\x12rejectedLogRecords\x12#\n\rer\ + ror_message\x18\x02\x20\x01(\tR\x0cerrorMessage2\x9d\x01\n\x0bLogsServic\ + e\x12\x8d\x01\n\x06Export\x12?.opentelemetry.proto.collector.logs.v1.Exp\ + ortLogsServiceRequest\x1a@.opentelemetry.proto.collector.logs.v1.ExportL\ + ogsServiceResponse\"\0B\x98\x01\n(io.opentelemetry.proto.collector.logs.\ + v1B\x10LogsServiceProtoP\x01Z0go.opentelemetry.io/proto/otlp/collector/l\ + ogs/v1\xaa\x02%OpenTelemetry.Proto.Collector.Logs.V1b\x06proto3\ +"; + +static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT; + +fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto { + ::protobuf::Message::parse_from_bytes(file_descriptor_proto_data).unwrap() +} + +pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto { + file_descriptor_proto_lazy.get(|| { + parse_descriptor_proto() + }) +} diff --git a/opentelemetry-proto/src/proto/grpcio/logs_service_grpc.rs b/opentelemetry-proto/src/proto/grpcio/logs_service_grpc.rs new file mode 100644 index 0000000000..6f91702fa9 --- /dev/null +++ b/opentelemetry-proto/src/proto/grpcio/logs_service_grpc.rs @@ -0,0 +1,69 @@ +// This file is generated. Do not edit +// @generated + +// https://github.com/Manishearth/rust-clippy/issues/702 +#![allow(unknown_lints)] +#![allow(clippy::all)] + +#![allow(box_pointers)] +#![allow(dead_code)] +#![allow(missing_docs)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(non_upper_case_globals)] +#![allow(trivial_casts)] +#![allow(unsafe_code)] +#![allow(unused_imports)] +#![allow(unused_results)] + +const METHOD_LOGS_SERVICE_EXPORT: ::grpcio::Method = ::grpcio::Method { + ty: ::grpcio::MethodType::Unary, + name: "/opentelemetry.proto.collector.logs.v1.LogsService/Export", + req_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de }, + resp_mar: ::grpcio::Marshaller { ser: ::grpcio::pb_ser, de: ::grpcio::pb_de }, +}; + +#[derive(Clone)] +pub struct LogsServiceClient { + client: ::grpcio::Client, +} + +impl LogsServiceClient { + pub fn new(channel: ::grpcio::Channel) -> Self { + LogsServiceClient { + client: ::grpcio::Client::new(channel), + } + } + + pub fn export_opt(&self, req: &super::logs_service::ExportLogsServiceRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result { + self.client.unary_call(&METHOD_LOGS_SERVICE_EXPORT, req, opt) + } + + pub fn export(&self, req: &super::logs_service::ExportLogsServiceRequest) -> ::grpcio::Result { + self.export_opt(req, ::grpcio::CallOption::default()) + } + + pub fn export_async_opt(&self, req: &super::logs_service::ExportLogsServiceRequest, opt: ::grpcio::CallOption) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver> { + self.client.unary_call_async(&METHOD_LOGS_SERVICE_EXPORT, req, opt) + } + + pub fn export_async(&self, req: &super::logs_service::ExportLogsServiceRequest) -> ::grpcio::Result<::grpcio::ClientUnaryReceiver> { + self.export_async_opt(req, ::grpcio::CallOption::default()) + } + pub fn spawn(&self, f: F) where F: ::futures::Future + Send + 'static { + self.client.spawn(f) + } +} + +pub trait LogsService { + fn export(&mut self, ctx: ::grpcio::RpcContext, req: super::logs_service::ExportLogsServiceRequest, sink: ::grpcio::UnarySink); +} + +pub fn create_logs_service(s: S) -> ::grpcio::Service { + let mut builder = ::grpcio::ServiceBuilder::new(); + let mut instance = s; + builder = builder.add_unary_handler(&METHOD_LOGS_SERVICE_EXPORT, move |ctx, req, resp| { + instance.export(ctx, req, resp) + }); + builder.build() +} diff --git a/opentelemetry-proto/src/transform/common.rs b/opentelemetry-proto/src/transform/common.rs index feb1c5e98c..f7215fde2a 100644 --- a/opentelemetry-proto/src/transform/common.rs +++ b/opentelemetry-proto/src/transform/common.rs @@ -1,7 +1,7 @@ -#[cfg(feature = "traces")] +#[cfg(any(feature = "traces", feature = "logs"))] use std::time::{Duration, SystemTime, UNIX_EPOCH}; -#[cfg(feature = "traces")] +#[cfg(any(feature = "traces", feature = "logs"))] pub(crate) fn to_nanos(time: SystemTime) -> u64 { time.duration_since(UNIX_EPOCH) .unwrap_or_else(|_| Duration::from_secs(0)) @@ -17,6 +17,9 @@ pub mod tonic { use opentelemetry_sdk::trace::EvictedHashMap; use std::borrow::Cow; + #[cfg(any(feature = "traces", feature = "logs"))] + use opentelemetry_sdk::Resource; + impl From for InstrumentationScope { fn from(library: opentelemetry_sdk::InstrumentationLibrary) -> Self { InstrumentationScope { @@ -44,6 +47,7 @@ pub mod tonic { } /// Wrapper type for Vec<[`KeyValue`](crate::proto::tonic::common::v1::KeyValue)> + #[derive(Default)] pub struct Attributes(pub ::std::vec::Vec); impl From for Attributes { @@ -73,6 +77,20 @@ pub mod tonic { } } + #[cfg(feature = "logs")] + impl, V: Into> FromIterator<(K, V)> for Attributes { + fn from_iter>(iter: T) -> Self { + Attributes( + iter.into_iter() + .map(|(k, v)| KeyValue { + key: k.into(), + value: Some(v.into()), + }) + .collect(), + ) + } + } + impl From for AnyValue { fn from(value: Value) -> Self { AnyValue { @@ -103,14 +121,25 @@ pub mod tonic { ArrayValue { values } } + + #[cfg(any(feature = "traces", feature = "logs"))] + pub(crate) fn resource_attributes(resource: &Resource) -> Attributes { + resource + .iter() + .map(|(k, v)| opentelemetry_api::KeyValue::new(k.clone(), v.clone())) + .collect::>() + .into() + } } #[cfg(feature = "gen-protoc")] pub mod grpcio { use crate::proto::grpcio::common::{AnyValue, ArrayValue, InstrumentationScope, KeyValue}; use opentelemetry_api::{Array, Value}; - use opentelemetry_sdk::trace::EvictedHashMap; + use opentelemetry_sdk::{trace::EvictedHashMap, Resource}; use protobuf::RepeatedField; + #[cfg(feature = "logs")] + use protobuf::SingularPtrField; use std::borrow::Cow; impl From for InstrumentationScope { @@ -123,6 +152,7 @@ pub mod grpcio { } } + #[derive(Default)] pub struct Attributes(pub ::protobuf::RepeatedField); impl From for Attributes { @@ -156,6 +186,21 @@ pub mod grpcio { } } + #[cfg(feature = "logs")] + impl, V: Into> FromIterator<(K, V)> for Attributes { + fn from_iter>(iter: T) -> Self { + Attributes(RepeatedField::from_vec( + iter.into_iter() + .map(|(k, v)| KeyValue { + key: k.into(), + value: SingularPtrField::some(v.into()), + ..Default::default() + }) + .collect(), + )) + } + } + impl From for AnyValue { fn from(value: Value) -> Self { let mut any_value = AnyValue::new(); @@ -190,4 +235,13 @@ pub mod grpcio { array_value.set_values(values); array_value } + + #[cfg(any(feature = "traces", feature = "logs"))] + pub(crate) fn resource_attributes(resource: &Resource) -> Attributes { + resource + .iter() + .map(|(k, v)| opentelemetry_api::KeyValue::new(k.clone(), v.clone())) + .collect::>() + .into() + } } diff --git a/opentelemetry-proto/src/transform/logs.rs b/opentelemetry-proto/src/transform/logs.rs new file mode 100644 index 0000000000..59d984bec5 --- /dev/null +++ b/opentelemetry-proto/src/transform/logs.rs @@ -0,0 +1,290 @@ +use crate::transform::common::to_nanos; + +#[cfg(feature = "gen-tonic")] +pub mod tonic { + use crate::{ + tonic::{ + common::v1::{any_value::Value, AnyValue, ArrayValue, KeyValue, KeyValueList}, + logs::v1::{LogRecord, ResourceLogs, ScopeLogs, SeverityNumber}, + resource::v1::Resource, + Attributes, + }, + transform::common::tonic::resource_attributes, + }; + use opentelemetry_api::logs::{AnyValue as LogsAnyValue, Severity}; + + use super::*; + + impl From for AnyValue { + fn from(value: LogsAnyValue) -> Self { + AnyValue { + value: Some(value.into()), + } + } + } + + impl From for Value { + fn from(value: LogsAnyValue) -> Self { + match value { + LogsAnyValue::Double(f) => Value::DoubleValue(f), + LogsAnyValue::Int(i) => Value::IntValue(i), + LogsAnyValue::String(s) => Value::StringValue(s.into()), + LogsAnyValue::Boolean(b) => Value::BoolValue(b), + LogsAnyValue::ListAny(v) => Value::ArrayValue(ArrayValue { + values: v + .into_iter() + .map(|v| AnyValue { + value: Some(v.into()), + }) + .collect(), + }), + LogsAnyValue::Map(m) => Value::KvlistValue(KeyValueList { + values: m + .into_iter() + .map(|(key, value)| KeyValue { + key: key.into(), + value: Some(AnyValue { + value: Some(value.into()), + }), + }) + .collect(), + }), + LogsAnyValue::Bytes(v) => Value::BytesValue(v), + } + } + } + + impl From for LogRecord { + fn from(log_record: opentelemetry_api::logs::LogRecord) -> Self { + let trace_context = log_record.trace_context.as_ref(); + let severity_number = match log_record.severity_number { + Some(Severity::Trace) => SeverityNumber::Trace, + Some(Severity::Trace2) => SeverityNumber::Trace2, + Some(Severity::Trace3) => SeverityNumber::Trace3, + Some(Severity::Trace4) => SeverityNumber::Trace4, + Some(Severity::Debug) => SeverityNumber::Debug, + Some(Severity::Debug2) => SeverityNumber::Debug2, + Some(Severity::Debug3) => SeverityNumber::Debug3, + Some(Severity::Debug4) => SeverityNumber::Debug4, + Some(Severity::Info) => SeverityNumber::Info, + Some(Severity::Info2) => SeverityNumber::Info2, + Some(Severity::Info3) => SeverityNumber::Info3, + Some(Severity::Info4) => SeverityNumber::Info4, + Some(Severity::Warn) => SeverityNumber::Warn, + Some(Severity::Warn2) => SeverityNumber::Warn2, + Some(Severity::Warn3) => SeverityNumber::Warn3, + Some(Severity::Warn4) => SeverityNumber::Warn4, + Some(Severity::Error) => SeverityNumber::Error, + Some(Severity::Error2) => SeverityNumber::Error2, + Some(Severity::Error3) => SeverityNumber::Error3, + Some(Severity::Error4) => SeverityNumber::Error4, + Some(Severity::Fatal) => SeverityNumber::Fatal, + Some(Severity::Fatal2) => SeverityNumber::Fatal2, + Some(Severity::Fatal3) => SeverityNumber::Fatal3, + Some(Severity::Fatal4) => SeverityNumber::Fatal4, + None => SeverityNumber::Unspecified, + }; + + LogRecord { + time_unix_nano: log_record.timestamp.map(to_nanos).unwrap_or_default(), + observed_time_unix_nano: log_record + .observed_timestamp + .map(to_nanos) + .unwrap_or_default(), + severity_number: severity_number.into(), + severity_text: log_record.severity_text.map(Into::into).unwrap_or_default(), + body: log_record.body.map(Into::into), + attributes: log_record + .attributes + .map(|attrs| Attributes::from_iter(attrs.into_iter())) + .unwrap_or_default() + .0, + dropped_attributes_count: 0, + flags: trace_context + .map(|ctx| { + ctx.trace_flags + .map(|flags| flags.to_u8() as u32) + .unwrap_or_default() + }) + .unwrap_or_default(), + span_id: trace_context + .map(|ctx| ctx.span_id.to_bytes().to_vec()) + .unwrap_or_default(), + trace_id: trace_context + .map(|ctx| ctx.trace_id.to_bytes().to_vec()) + .unwrap_or_default(), + } + } + } + + impl From for ResourceLogs { + fn from(log_data: opentelemetry_sdk::export::logs::LogData) -> Self { + ResourceLogs { + resource: Some(Resource { + attributes: resource_attributes(&log_data.resource).0, + dropped_attributes_count: 0, + }), + schema_url: "".to_string(), + scope_logs: vec![ScopeLogs { + schema_url: log_data + .instrumentation + .schema_url + .clone() + .map(Into::into) + .unwrap_or_default(), + scope: Some(log_data.instrumentation.into()), + log_records: vec![log_data.record.into()], + }], + } + } + } +} + +#[cfg(feature = "gen-protoc")] +pub mod grpcio { + use crate::{ + grpcio::Attributes, + proto::grpcio::{ + common::{AnyValue, AnyValue_oneof_value, ArrayValue, KeyValue, KeyValueList}, + logs::{LogRecord, ResourceLogs, ScopeLogs, SeverityNumber}, + resource::Resource, + }, + transform::common::grpcio::resource_attributes, + }; + use opentelemetry_api::logs::{AnyValue as LogsAnyValue, Severity}; + use protobuf::{RepeatedField, SingularPtrField}; + + use super::*; + + impl From for AnyValue { + fn from(value: LogsAnyValue) -> Self { + AnyValue { + value: Some(value.into()), + ..Default::default() + } + } + } + + impl From for AnyValue_oneof_value { + fn from(value: LogsAnyValue) -> Self { + match value { + LogsAnyValue::Double(f) => AnyValue_oneof_value::double_value(f), + LogsAnyValue::Int(i) => AnyValue_oneof_value::int_value(i), + LogsAnyValue::String(s) => AnyValue_oneof_value::string_value(s.into()), + LogsAnyValue::Boolean(b) => AnyValue_oneof_value::bool_value(b), + LogsAnyValue::ListAny(v) => AnyValue_oneof_value::array_value(ArrayValue { + values: RepeatedField::from_vec( + v.into_iter() + .map(|v| AnyValue { + value: Some(v.into()), + ..Default::default() + }) + .collect(), + ), + ..Default::default() + }), + LogsAnyValue::Map(m) => AnyValue_oneof_value::kvlist_value(KeyValueList { + values: RepeatedField::from_vec( + m.into_iter() + .map(|(key, value)| KeyValue { + key: key.into(), + value: SingularPtrField::some(AnyValue { + value: Some(value.into()), + ..Default::default() + }), + ..Default::default() + }) + .collect(), + ), + ..Default::default() + }), + LogsAnyValue::Bytes(v) => AnyValue_oneof_value::bytes_value(v), + } + } + } + + impl From for LogRecord { + fn from(log_record: opentelemetry_api::logs::LogRecord) -> Self { + let trace_context = log_record.trace_context.as_ref(); + let severity_number = match log_record.severity_number { + Some(Severity::Trace) => SeverityNumber::SEVERITY_NUMBER_TRACE, + Some(Severity::Trace2) => SeverityNumber::SEVERITY_NUMBER_TRACE2, + Some(Severity::Trace3) => SeverityNumber::SEVERITY_NUMBER_TRACE3, + Some(Severity::Trace4) => SeverityNumber::SEVERITY_NUMBER_TRACE4, + Some(Severity::Debug) => SeverityNumber::SEVERITY_NUMBER_DEBUG, + Some(Severity::Debug2) => SeverityNumber::SEVERITY_NUMBER_DEBUG2, + Some(Severity::Debug3) => SeverityNumber::SEVERITY_NUMBER_DEBUG3, + Some(Severity::Debug4) => SeverityNumber::SEVERITY_NUMBER_DEBUG4, + Some(Severity::Info) => SeverityNumber::SEVERITY_NUMBER_INFO, + Some(Severity::Info2) => SeverityNumber::SEVERITY_NUMBER_INFO2, + Some(Severity::Info3) => SeverityNumber::SEVERITY_NUMBER_INFO3, + Some(Severity::Info4) => SeverityNumber::SEVERITY_NUMBER_INFO4, + Some(Severity::Warn) => SeverityNumber::SEVERITY_NUMBER_WARN, + Some(Severity::Warn2) => SeverityNumber::SEVERITY_NUMBER_WARN2, + Some(Severity::Warn3) => SeverityNumber::SEVERITY_NUMBER_WARN3, + Some(Severity::Warn4) => SeverityNumber::SEVERITY_NUMBER_WARN4, + Some(Severity::Error) => SeverityNumber::SEVERITY_NUMBER_ERROR, + Some(Severity::Error2) => SeverityNumber::SEVERITY_NUMBER_ERROR2, + Some(Severity::Error3) => SeverityNumber::SEVERITY_NUMBER_ERROR3, + Some(Severity::Error4) => SeverityNumber::SEVERITY_NUMBER_ERROR4, + Some(Severity::Fatal) => SeverityNumber::SEVERITY_NUMBER_FATAL, + Some(Severity::Fatal2) => SeverityNumber::SEVERITY_NUMBER_FATAL2, + Some(Severity::Fatal3) => SeverityNumber::SEVERITY_NUMBER_FATAL3, + Some(Severity::Fatal4) => SeverityNumber::SEVERITY_NUMBER_FATAL4, + None => SeverityNumber::SEVERITY_NUMBER_UNSPECIFIED, + }; + + LogRecord { + time_unix_nano: log_record.timestamp.map(to_nanos).unwrap_or(0), + severity_number, + severity_text: log_record.severity_text.map(Into::into).unwrap_or_default(), + body: log_record.body.map(Into::into).into(), + attributes: log_record + .attributes + .map(|attrs| Attributes::from_iter(attrs.into_iter())) + .unwrap_or_default() + .0, + dropped_attributes_count: 0, + flags: trace_context + .map(|ctx| { + ctx.trace_flags + .map(|flags| flags.to_u8() as u32) + .unwrap_or_default() + }) + .unwrap_or_default(), + span_id: trace_context + .map(|ctx| ctx.span_id.to_bytes().to_vec()) + .unwrap_or_default(), + trace_id: trace_context + .map(|ctx| ctx.trace_id.to_bytes().to_vec()) + .unwrap_or_default(), + ..Default::default() + } + } + } + + impl From for ResourceLogs { + fn from(log_data: opentelemetry_sdk::export::logs::LogData) -> Self { + ResourceLogs { + resource: SingularPtrField::some(Resource { + attributes: resource_attributes(&log_data.resource).0, + dropped_attributes_count: 0, + ..Default::default() + }), + schema_url: "".to_string(), + scope_logs: RepeatedField::from_vec(vec![ScopeLogs { + schema_url: log_data + .instrumentation + .schema_url + .clone() + .map(Into::into) + .unwrap_or_default(), + scope: SingularPtrField::some(log_data.instrumentation.into()), + log_records: RepeatedField::from_vec(vec![log_data.record.into()]), + ..Default::default() + }]), + ..Default::default() + } + } + } +} diff --git a/opentelemetry-proto/src/transform/mod.rs b/opentelemetry-proto/src/transform/mod.rs index 985d05d107..36d19281a9 100644 --- a/opentelemetry-proto/src/transform/mod.rs +++ b/opentelemetry-proto/src/transform/mod.rs @@ -6,5 +6,8 @@ pub mod metrics; #[cfg(feature = "traces")] pub mod traces; +#[cfg(feature = "logs")] +pub mod logs; + #[cfg(feature = "zpages")] pub mod tracez; diff --git a/opentelemetry-proto/src/transform/traces.rs b/opentelemetry-proto/src/transform/traces.rs index dae81dd251..939aca647b 100644 --- a/opentelemetry-proto/src/transform/traces.rs +++ b/opentelemetry-proto/src/transform/traces.rs @@ -7,7 +7,7 @@ pub mod tonic { use super::*; use crate::proto::tonic::resource::v1::Resource; use crate::proto::tonic::trace::v1::{span, status, ResourceSpans, ScopeSpans, Span, Status}; - use crate::transform::common::tonic::Attributes; + use crate::transform::common::tonic::{resource_attributes, Attributes}; use opentelemetry_api::trace; impl From for span::SpanKind { @@ -107,14 +107,6 @@ pub mod tonic { } } } - - fn resource_attributes(resource: &opentelemetry_sdk::Resource) -> Attributes { - resource - .iter() - .map(|(k, v)| opentelemetry_api::KeyValue::new(k.clone(), v.clone())) - .collect::>() - .into() - } } #[cfg(feature = "gen-protoc")] @@ -125,7 +117,7 @@ pub mod grpcio { ResourceSpans, ScopeSpans, Span, Span_Event, Span_Link, Span_SpanKind, Status, Status_StatusCode, }; - use crate::transform::common::grpcio::Attributes; + use crate::transform::common::grpcio::{resource_attributes, Attributes}; use opentelemetry_api::trace; use protobuf::{RepeatedField, SingularPtrField}; @@ -231,12 +223,4 @@ pub mod grpcio { } } } - - fn resource_attributes(resource: &opentelemetry_sdk::Resource) -> Attributes { - resource - .iter() - .map(|(k, v)| opentelemetry_api::KeyValue::new(k.clone(), v.clone())) - .collect::>() - .into() - } } diff --git a/opentelemetry-proto/tests/grpc_build.rs b/opentelemetry-proto/tests/grpc_build.rs index 1fe76e7d97..43f138957e 100644 --- a/opentelemetry-proto/tests/grpc_build.rs +++ b/opentelemetry-proto/tests/grpc_build.rs @@ -12,6 +12,8 @@ const GRPCIO_PROTO_FILES: &[&str] = &[ "src/proto/opentelemetry-proto/opentelemetry/proto/collector/trace/v1/trace_service.proto", "src/proto/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto", "src/proto/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/logs/v1/logs.proto", + "src/proto/opentelemetry-proto/opentelemetry/proto/collector/logs/v1/logs_service.proto", "src/proto/tracez.proto", ]; const GRPCIO_INCLUDES: &[&str] = &["src/proto/opentelemetry-proto/", "src/proto"]; diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index 404f3f8a4e..5dc5e3ece4 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -45,8 +45,9 @@ crossbeam-queue = "0.3.1" default = ["trace"] trace = ["opentelemetry_api/trace", "crossbeam-channel", "rand", "async-trait", "percent-encoding"] jaeger_remote_sampler = ["trace", "opentelemetry-http", "http", "serde", "serde_json", "url"] +logs = ["opentelemetry_api/logs", "crossbeam-channel", "async-trait", "serde_json"] metrics = ["opentelemetry_api/metrics", "regex", "async-trait"] -testing = ["opentelemetry_api/testing", "trace", "metrics", "rt-async-std", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"] +testing = ["opentelemetry_api/testing", "trace", "metrics", "logs", "rt-async-std", "rt-tokio", "rt-tokio-current-thread", "tokio/macros", "tokio/rt-multi-thread"] rt-tokio = ["tokio", "tokio-stream"] rt-tokio-current-thread = ["tokio", "tokio-stream"] rt-async-std = ["async-std"] diff --git a/opentelemetry-sdk/src/export/logs/mod.rs b/opentelemetry-sdk/src/export/logs/mod.rs new file mode 100644 index 0000000000..3fc13fa8b5 --- /dev/null +++ b/opentelemetry-sdk/src/export/logs/mod.rs @@ -0,0 +1,33 @@ +//! Log exporters +use crate::Resource; +use async_trait::async_trait; +use opentelemetry_api::{ + logs::{LogError, LogRecord, LogResult}, + InstrumentationLibrary, +}; +use std::{borrow::Cow, fmt::Debug}; + +/// `LogExporter` defines the interface that log exporters should implement. +#[async_trait] +pub trait LogExporter: Send + Debug { + /// Exports a batch of `ResourceLogs`. + async fn export(&mut self, batch: Vec) -> LogResult<()>; + /// Shuts down the expoter. + fn shutdown(&mut self) {} +} + +/// `LogData` associates a [`LogRecord`] with a [`Resource`] and +/// [`InstrumentationLibrary`]. +#[derive(Debug)] +#[non_exhaustive] +pub struct LogData { + /// Log record + pub record: LogRecord, + /// Resource for the emitter who produced this `LogData`. + pub resource: Cow<'static, Resource>, + /// Instrumentation details for the emitter who produced this `LogData`. + pub instrumentation: InstrumentationLibrary, +} + +/// Describes the result of an export. +pub type ExportResult = Result<(), LogError>; diff --git a/opentelemetry-sdk/src/export/mod.rs b/opentelemetry-sdk/src/export/mod.rs index d32d3cf018..8622bda739 100644 --- a/opentelemetry-sdk/src/export/mod.rs +++ b/opentelemetry-sdk/src/export/mod.rs @@ -1,5 +1,9 @@ //! Telemetry Export +#[cfg(feature = "logs")] +#[cfg_attr(docsrs, doc(cfg(feature = "logs")))] +pub mod logs; + #[cfg(feature = "trace")] #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] pub mod trace; diff --git a/opentelemetry-sdk/src/lib.rs b/opentelemetry-sdk/src/lib.rs index f129a405de..a55f2e823e 100644 --- a/opentelemetry-sdk/src/lib.rs +++ b/opentelemetry-sdk/src/lib.rs @@ -112,6 +112,9 @@ pub(crate) mod attributes; pub mod export; mod instrumentation; +#[cfg(feature = "logs")] +#[cfg_attr(docsrs, doc(cfg(feature = "logs")))] +pub mod logs; #[cfg(feature = "metrics")] #[cfg_attr(docsrs, doc(cfg(feature = "metrics")))] pub mod metrics; @@ -126,6 +129,7 @@ pub mod testing; #[cfg(feature = "trace")] #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] pub mod trace; + #[doc(hidden)] pub mod util; diff --git a/opentelemetry-sdk/src/logs/config.rs b/opentelemetry-sdk/src/logs/config.rs new file mode 100644 index 0000000000..19a3ce0572 --- /dev/null +++ b/opentelemetry-sdk/src/logs/config.rs @@ -0,0 +1,8 @@ +use std::borrow::Cow; + +/// Log emitter configuration. +#[derive(Debug, Default)] +pub struct Config { + /// Contains attributes representing an entity that produces telemetry. + pub resource: Cow<'static, crate::Resource>, +} diff --git a/opentelemetry-sdk/src/logs/log_emitter.rs b/opentelemetry-sdk/src/logs/log_emitter.rs new file mode 100644 index 0000000000..432121a2d1 --- /dev/null +++ b/opentelemetry-sdk/src/logs/log_emitter.rs @@ -0,0 +1,232 @@ +use super::{BatchLogProcessor, Config, LogProcessor, LogRuntime, SimpleLogProcessor}; +use crate::export::logs::{LogData, LogExporter}; +use opentelemetry_api::{ + global::{handle_error, Error}, + logs::{LogRecord, LogResult}, + trace::TraceContextExt, + Context, InstrumentationLibrary, +}; +use std::{ + borrow::Cow, + sync::{Arc, Weak}, +}; + +#[derive(Debug, Clone)] +/// Creator for `Logger` instances. +pub struct LoggerProvider { + inner: Arc, +} + +/// Default logger name if empty string is provided. +const DEFAULT_COMPONENT_NAME: &str = "rust.opentelemetry.io/sdk/logger"; + +impl opentelemetry_api::logs::LoggerProvider for LoggerProvider { + type Logger = Logger; + + /// Create a new versioned `Logger` instance. + fn versioned_logger( + &self, + name: impl Into>, + version: Option>, + schema_url: Option>, + attributes: Option>, + include_trace_context: bool, + ) -> Logger { + let name = name.into(); + + let component_name = if name.is_empty() { + Cow::Borrowed(DEFAULT_COMPONENT_NAME) + } else { + name + }; + + Logger::new( + InstrumentationLibrary::new(component_name, version, schema_url, attributes), + Arc::downgrade(&self.inner), + include_trace_context, + ) + } +} + +impl LoggerProvider { + /// Build a new logger provider. + pub(crate) fn new(inner: Arc) -> Self { + LoggerProvider { inner } + } + + /// Create a new `LoggerProvider` builder. + pub fn builder() -> Builder { + Builder::default() + } + + /// Config associated with this provider. + pub fn config(&self) -> &Config { + &self.inner.config + } + + /// Log processors associated with this provider. + pub fn log_processors(&self) -> &Vec> { + &self.inner.processors + } + + /// Force flush all remaining logs in log processors and return results. + pub fn force_flush(&self) -> Vec> { + self.log_processors() + .iter() + .map(|processor| processor.force_flush()) + .collect() + } + + /// Shuts down this `LoggerProvider`, panicking on failure. + pub fn shutdown(&mut self) -> Vec> { + self.try_shutdown() + .expect("canont shutdown LoggerProvider when child Loggers are still active") + } + + /// Attempts to shutdown this `LoggerProvider`, succeeding only when + /// all cloned `LoggerProvider` values have been dropped. + pub fn try_shutdown(&mut self) -> Option>> { + Arc::get_mut(&mut self.inner).map(|inner| { + inner + .processors + .iter_mut() + .map(|processor| processor.shutdown()) + .collect() + }) + } +} + +impl Drop for LoggerProvider { + fn drop(&mut self) { + match self.try_shutdown() { + None => handle_error(Error::Other( + "canont shutdown LoggerProvider when child Loggers are still active".into(), + )), + Some(results) => { + for result in results { + if let Err(err) = result { + handle_error(err) + } + } + } + } + } +} + +#[derive(Debug)] +pub(crate) struct LoggerProviderInner { + processors: Vec>, + config: Config, +} + +#[derive(Debug, Default)] +/// Builder for provider attributes. +pub struct Builder { + processors: Vec>, + config: Config, +} + +impl Builder { + /// The `LogExporter` that this provider should use. + pub fn with_simple_exporter(self, exporter: T) -> Self { + let mut processors = self.processors; + processors.push(Box::new(SimpleLogProcessor::new(Box::new(exporter)))); + + Builder { processors, ..self } + } + + /// The `LogExporter` setup using a default `BatchLogProcessor` that this provider should use. + pub fn with_batch_exporter( + self, + exporter: T, + runtime: R, + ) -> Self { + let batch = BatchLogProcessor::builder(exporter, runtime).build(); + self.with_log_processor(batch) + } + + /// The `LogProcessor` that this provider should use. + pub fn with_log_processor(self, processor: T) -> Self { + let mut processors = self.processors; + processors.push(Box::new(processor)); + + Builder { processors, ..self } + } + + /// The `Config` that this provider should use. + pub fn with_config(self, config: Config) -> Self { + Builder { config, ..self } + } + + /// Create a new provider from this configuration. + pub fn build(self) -> LoggerProvider { + LoggerProvider { + inner: Arc::new(LoggerProviderInner { + processors: self.processors, + config: self.config, + }), + } + } +} + +#[derive(Debug)] +/// The object for emitting [`LogRecord`]s. +/// +/// [`LogRecord`]: opentelemetry_api::logs::LogRecord +pub struct Logger { + include_trace_context: bool, + instrumentation_lib: InstrumentationLibrary, + provider: Weak, +} + +impl Logger { + pub(crate) fn new( + instrumentation_lib: InstrumentationLibrary, + provider: Weak, + include_trace_context: bool, + ) -> Self { + Logger { + include_trace_context, + instrumentation_lib, + provider, + } + } + + /// LoggerProvider associated with this logger. + pub fn provider(&self) -> Option { + self.provider.upgrade().map(LoggerProvider::new) + } + + /// Instrumentation library information of this logger. + pub fn instrumentation_library(&self) -> &InstrumentationLibrary { + &self.instrumentation_lib + } +} + +impl opentelemetry_api::logs::Logger for Logger { + /// Emit a `LogRecord`. + fn emit(&self, record: LogRecord) { + let provider = match self.provider() { + Some(provider) => provider, + None => return, + }; + + let config = provider.config(); + for processor in provider.log_processors() { + let mut record = record.clone(); + if self.include_trace_context { + let ctx = Context::current(); + if ctx.has_active_span() { + let span = ctx.span(); + record.trace_context = Some(span.span_context().into()); + } + } + let data = LogData { + record, + resource: config.resource.clone(), + instrumentation: self.instrumentation_lib.clone(), + }; + processor.emit(data); + } + } +} diff --git a/opentelemetry-sdk/src/logs/log_processor.rs b/opentelemetry-sdk/src/logs/log_processor.rs new file mode 100644 index 0000000000..62c1c151de --- /dev/null +++ b/opentelemetry-sdk/src/logs/log_processor.rs @@ -0,0 +1,368 @@ +use super::LogRuntime; +use crate::{ + export::logs::{ExportResult, LogData, LogExporter}, + logs::TrySend, +}; +use futures_channel::oneshot; +use futures_util::{ + future::{self, Either}, + {pin_mut, stream, StreamExt as _}, +}; +use opentelemetry_api::{ + global, + logs::{LogError, LogResult}, +}; +use std::thread; +use std::{ + fmt::{self, Debug, Formatter}, + time::Duration, +}; + +/// The interface for plugging into a [`Logger`]. +/// +/// [`Logger`]: crate::logs::Logger +pub trait LogProcessor: Send + Sync + Debug { + /// Called when a log record is ready to processed and exported. + fn emit(&self, data: LogData); + /// Force the logs lying in the cache to be exported. + fn force_flush(&self) -> LogResult<()>; + /// Shuts down the processor. + fn shutdown(&mut self) -> LogResult<()>; +} + +/// A [`LogProcessor`] that exports synchronously when logs are emitted. +/// +/// # Examples +/// +/// Note that the simple processor exports synchronously every time a log is +/// emitted. If you find this limiting, consider the batch processor instead. +#[derive(Debug)] +pub struct SimpleLogProcessor { + sender: crossbeam_channel::Sender>, + shutdown: crossbeam_channel::Receiver<()>, +} + +impl SimpleLogProcessor { + pub(crate) fn new(mut exporter: Box) -> Self { + let (log_tx, log_rx) = crossbeam_channel::unbounded(); + let (shutdown_tx, shutdown_rx) = crossbeam_channel::bounded(0); + + let _ = thread::Builder::new() + .name("opentelemetry-log-exporter".to_string()) + .spawn(move || { + while let Ok(Some(log)) = log_rx.recv() { + if let Err(err) = futures_executor::block_on(exporter.export(vec![log])) { + global::handle_error(err); + } + } + + exporter.shutdown(); + + if let Err(err) = shutdown_tx.send(()) { + global::handle_error(LogError::from(format!( + "could not send shutdown: {:?}", + err + ))); + } + }); + + SimpleLogProcessor { + sender: log_tx, + shutdown: shutdown_rx, + } + } +} + +impl LogProcessor for SimpleLogProcessor { + fn emit(&self, data: LogData) { + if let Err(err) = self.sender.send(Some(data)) { + global::handle_error(LogError::from(format!("error processing log {:?}", err))); + } + } + + fn force_flush(&self) -> LogResult<()> { + // Ignored since all logs in Simple Processor will be exported as they ended. + Ok(()) + } + + fn shutdown(&mut self) -> LogResult<()> { + if self.sender.send(None).is_ok() { + if let Err(err) = self.shutdown.recv() { + global::handle_error(LogError::from(format!( + "error shutting down log processor: {:?}", + err + ))) + } + } + + Ok(()) + } +} + +/// A [`LogProcessor`] that asynchronously buffers log records and reports +/// them at a preconfigured interval. +pub struct BatchLogProcessor { + message_sender: R::Sender, +} + +impl Debug for BatchLogProcessor { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("BatchLogProcessor") + .field("message_sender", &self.message_sender) + .finish() + } +} + +impl LogProcessor for BatchLogProcessor { + fn emit(&self, data: LogData) { + let result = self.message_sender.try_send(BatchMessage::ExportLog(data)); + + if let Err(err) = result { + global::handle_error(err); + } + } + + fn force_flush(&self) -> LogResult<()> { + let (res_sender, res_receiver) = oneshot::channel(); + self.message_sender + .try_send(BatchMessage::Flush(Some(res_sender)))?; + + futures_executor::block_on(res_receiver) + .map_err(|err| LogError::Other(err.into())) + .and_then(std::convert::identity) + } + + fn shutdown(&mut self) -> LogResult<()> { + let (res_sender, res_receiver) = oneshot::channel(); + self.message_sender + .try_send(BatchMessage::Shutdown(res_sender))?; + + futures_executor::block_on(res_receiver) + .map_err(|err| LogError::Other(err.into())) + .and_then(std::convert::identity) + } +} + +impl BatchLogProcessor { + pub(crate) fn new(mut exporter: Box, config: BatchConfig, runtime: R) -> Self { + let (message_sender, message_receiver) = + runtime.batch_message_channel(config.max_queue_size); + let ticker = runtime + .interval(config.scheduled_delay) + .map(|_| BatchMessage::Flush(None)); + let timeout_runtime = runtime.clone(); + + // Spawn worker process via user-defined spawn function. + runtime.spawn(Box::pin(async move { + let mut logs = Vec::new(); + let mut messages = Box::pin(stream::select(message_receiver, ticker)); + + while let Some(message) = messages.next().await { + match message { + // Log has finished, add to buffer of pending logs. + BatchMessage::ExportLog(log) => { + logs.push(log); + + if logs.len() == config.max_export_batch_size { + let result = export_with_timeout( + config.max_export_timeout, + exporter.as_mut(), + &timeout_runtime, + logs.split_off(0), + ) + .await; + + if let Err(err) = result { + global::handle_error(err); + } + } + } + // Log batch interval time reached or a force flush has been invoked, export current spans. + BatchMessage::Flush(res_channel) => { + let result = export_with_timeout( + config.max_export_timeout, + exporter.as_mut(), + &timeout_runtime, + logs.split_off(0), + ) + .await; + + if let Some(channel) = res_channel { + if let Err(result) = channel.send(result) { + global::handle_error(LogError::from(format!( + "failed to send flush result: {:?}", + result + ))); + } + } else if let Err(err) = result { + global::handle_error(err); + } + } + // Stream has terminated or processor is shutdown, return to finish execution. + BatchMessage::Shutdown(ch) => { + let result = export_with_timeout( + config.max_export_timeout, + exporter.as_mut(), + &timeout_runtime, + logs.split_off(0), + ) + .await; + + exporter.shutdown(); + + if let Err(result) = ch.send(result) { + global::handle_error(LogError::from(format!( + "failed to send batch processor shutdown result: {:?}", + result + ))); + } + + break; + } + } + } + })); + + // Return batch processor with link to worker + BatchLogProcessor { message_sender } + } + + /// Create a new batch processor builder + pub fn builder(exporter: E, runtime: R) -> BatchLogProcessorBuilder + where + E: LogExporter, + { + BatchLogProcessorBuilder { + exporter, + config: BatchConfig::default(), + runtime, + } + } +} + +async fn export_with_timeout( + time_out: Duration, + exporter: &mut E, + runtime: &R, + batch: Vec, +) -> ExportResult +where + R: LogRuntime, + E: LogExporter + ?Sized, +{ + if batch.is_empty() { + return Ok(()); + } + + let export = exporter.export(batch); + let timeout = runtime.delay(time_out); + pin_mut!(export); + pin_mut!(timeout); + match future::select(export, timeout).await { + Either::Left((export_res, _)) => export_res, + Either::Right((_, _)) => ExportResult::Err(LogError::ExportTimedOut(time_out)), + } +} + +/// Batch log processor configuration +#[derive(Debug)] +pub struct BatchConfig { + /// The maximum queue size to buffer logs for delayed processing. If the + /// queue gets full it drops the logs. The default value of is 2048. + max_queue_size: usize, + + /// The delay interval in milliseconds between two consecutive processing + /// of batches. The default value is 1 second. + scheduled_delay: Duration, + + /// The maximum number of logs to process in a single batch. If there are + /// more than one batch worth of logs then it processes multiple batches + /// of logs one batch after the other without any delay. The default value + /// is 512. + max_export_batch_size: usize, + + /// The maximum duration to export a batch of data. + max_export_timeout: Duration, +} + +impl Default for BatchConfig { + fn default() -> Self { + BatchConfig { + max_queue_size: 2_048, + scheduled_delay: Duration::from_millis(1_000), + max_export_batch_size: 512, + max_export_timeout: Duration::from_millis(30_000), + } + } +} + +/// A builder for creating [`BatchLogProcessor`] instances. +/// +#[derive(Debug)] +pub struct BatchLogProcessorBuilder { + exporter: E, + config: BatchConfig, + runtime: R, +} + +impl BatchLogProcessorBuilder +where + E: LogExporter + 'static, + R: LogRuntime, +{ + /// Set max queue size for batches + pub fn with_max_queue_size(self, size: usize) -> Self { + let mut config = self.config; + config.max_queue_size = size; + + BatchLogProcessorBuilder { config, ..self } + } + + /// Set scheduled delay for batches + pub fn with_scheduled_delay(self, delay: Duration) -> Self { + let mut config = self.config; + config.scheduled_delay = delay; + + BatchLogProcessorBuilder { config, ..self } + } + + /// Set max timeout for exporting. + pub fn with_max_timeout(self, timeout: Duration) -> Self { + let mut config = self.config; + config.max_export_timeout = timeout; + + BatchLogProcessorBuilder { config, ..self } + } + + /// Set max export size for batches, should always less than or equals to max queue size. + /// + /// If input is larger than max queue size, will lower it to be equal to max queue size + pub fn with_max_export_batch_size(self, size: usize) -> Self { + let mut config = self.config; + if size > config.max_queue_size { + config.max_export_batch_size = config.max_queue_size; + } else { + config.max_export_batch_size = size; + } + + BatchLogProcessorBuilder { config, ..self } + } + + /// Build a batch processor + pub fn build(self) -> BatchLogProcessor { + BatchLogProcessor::new(Box::new(self.exporter), self.config, self.runtime) + } +} + +/// Messages sent between application thread and batch log processor's work thread. +#[allow(clippy::large_enum_variant)] +#[derive(Debug)] +pub enum BatchMessage { + /// Export logs, usually called when the log is emitted. + ExportLog(LogData), + /// Flush the current buffer to the backend, it can be triggered by + /// pre configured interval or a call to `force_push` function. + Flush(Option>), + /// Shut down the worker thread, push all logs in buffer to the backend. + Shutdown(oneshot::Sender), +} diff --git a/opentelemetry-sdk/src/logs/mod.rs b/opentelemetry-sdk/src/logs/mod.rs new file mode 100644 index 0000000000..3aaa607910 --- /dev/null +++ b/opentelemetry-sdk/src/logs/mod.rs @@ -0,0 +1,14 @@ +//! # OpenTelemetry Log SDK + +mod config; +mod log_emitter; +mod log_processor; +mod runtime; + +pub use config::Config; +pub use log_emitter::{Builder, Logger, LoggerProvider}; +pub use log_processor::{ + BatchConfig, BatchLogProcessor, BatchLogProcessorBuilder, BatchMessage, LogProcessor, + SimpleLogProcessor, +}; +pub use runtime::{LogRuntime, TrySend}; diff --git a/opentelemetry-sdk/src/logs/runtime.rs b/opentelemetry-sdk/src/logs/runtime.rs new file mode 100644 index 0000000000..bc15b5cdf8 --- /dev/null +++ b/opentelemetry-sdk/src/logs/runtime.rs @@ -0,0 +1,119 @@ +//! # Log Runtime +//! Log runtime is an extension to [`Runtime`]. Currently it provides a channel that used +//! by [`BatchLogProcessor`]. +//! +//! [`BatchLogProcessor`]: crate::logs::BatchLogProcessor +//! [`Runtime`]: crate::runtime::Runtime +use crate::logs::BatchMessage; +#[cfg(feature = "rt-async-std")] +use crate::runtime::AsyncStd; +use crate::runtime::Runtime; +#[cfg(feature = "rt-tokio")] +use crate::runtime::Tokio; +#[cfg(feature = "rt-tokio-current-thread")] +use crate::runtime::TokioCurrentThread; +use futures_util::stream::Stream; +use opentelemetry_api::logs::LogError; +use std::fmt::Debug; + +#[cfg(any( + feature = "rt-tokio", + feature = "rt-tokio-current-thread", + feature = "rt-async-std" +))] +const CHANNEL_FULL_ERROR: &str = + "cannot send log record to the batch log processor because the channel is full"; +#[cfg(any( + feature = "rt-tokio", + feature = "rt-tokio-current-thread", + feature = "rt-async-std" +))] +const CHANNEL_CLOSED_ERROR: &str = + "cannot send log record to the batch log processor because the channel is closed"; + +/// Log runtime is an extension to [`Runtime`]. Currently it provides a channel that used +/// by [`BatchLogProcessor`]. +/// +/// [`BatchLogProcessor`]: crate::logs::BatchLogProcessor +/// [`Runtime`]: crate::runtime::Runtime +pub trait LogRuntime: Runtime { + /// A future stream to receive the batch messages from channels. + type Receiver: Stream + Send; + + /// A batch messages sender that could be sent across thread safely. + type Sender: TrySend + Debug; + + /// Return the sender and receiver used to send batch message between tasks. + fn batch_message_channel(&self, capacity: usize) -> (Self::Sender, Self::Receiver); +} + +/// TrySend is an abstraction of sender that is capable to send BatchMessage with reference. +pub trait TrySend: Sync + Send { + /// Try to send one batch message to worker thread. + /// + /// It can fail because either the receiver has closed or the buffer is full. + fn try_send(&self, item: BatchMessage) -> Result<(), LogError>; +} + +#[cfg(any(feature = "rt-tokio", feature = "rt-tokio-current-thread"))] +impl TrySend for tokio::sync::mpsc::Sender { + fn try_send(&self, item: BatchMessage) -> Result<(), LogError> { + self.try_send(item).map_err(|err| match err { + tokio::sync::mpsc::error::TrySendError::Full(_) => LogError::from(CHANNEL_FULL_ERROR), + tokio::sync::mpsc::error::TrySendError::Closed(_) => { + LogError::from(CHANNEL_CLOSED_ERROR) + } + }) + } +} + +#[cfg(feature = "rt-tokio")] +#[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio")))] +impl LogRuntime for Tokio { + type Receiver = tokio_stream::wrappers::ReceiverStream; + type Sender = tokio::sync::mpsc::Sender; + + fn batch_message_channel(&self, capacity: usize) -> (Self::Sender, Self::Receiver) { + let (sender, receiver) = tokio::sync::mpsc::channel(capacity); + ( + sender, + tokio_stream::wrappers::ReceiverStream::new(receiver), + ) + } +} + +#[cfg(feature = "rt-tokio-current-thread")] +#[cfg_attr(docsrs, doc(cfg(feature = "rt-tokio-current-thread")))] +impl LogRuntime for TokioCurrentThread { + type Receiver = tokio_stream::wrappers::ReceiverStream; + type Sender = tokio::sync::mpsc::Sender; + + fn batch_message_channel(&self, capacity: usize) -> (Self::Sender, Self::Receiver) { + let (sender, receiver) = tokio::sync::mpsc::channel(capacity); + ( + sender, + tokio_stream::wrappers::ReceiverStream::new(receiver), + ) + } +} + +#[cfg(feature = "rt-async-std")] +impl TrySend for async_std::channel::Sender { + fn try_send(&self, item: BatchMessage) -> Result<(), LogError> { + self.try_send(item).map_err(|err| match err { + async_std::channel::TrySendError::Full(_) => LogError::from(CHANNEL_FULL_ERROR), + async_std::channel::TrySendError::Closed(_) => LogError::from(CHANNEL_CLOSED_ERROR), + }) + } +} + +#[cfg(feature = "rt-async-std")] +#[cfg_attr(docsrs, doc(cfg(feature = "rt-async-std")))] +impl LogRuntime for AsyncStd { + type Receiver = async_std::channel::Receiver; + type Sender = async_std::channel::Sender; + + fn batch_message_channel(&self, capacity: usize) -> (Self::Sender, Self::Receiver) { + async_std::channel::bounded(capacity) + } +} diff --git a/opentelemetry-stdout/Cargo.toml b/opentelemetry-stdout/Cargo.toml index 3f16f5ec50..fb36a73b33 100644 --- a/opentelemetry-stdout/Cargo.toml +++ b/opentelemetry-stdout/Cargo.toml @@ -18,14 +18,17 @@ rust-version = "1.60" [features] trace = ["opentelemetry_api/trace", "opentelemetry_sdk/trace", "futures-util"] metrics = ["async-trait", "opentelemetry_api/metrics", "opentelemetry_sdk/metrics"] +logs = ["opentelemetry_api/logs", "opentelemetry_sdk/logs", "async-trait", "thiserror"] [dependencies] async-trait = { version = "0.1", optional = true } +thiserror = { version = "1", optional = true } futures-util = { version = "0.3", optional = true, default-features = false } opentelemetry_api = { version = "0.19", path = "../opentelemetry-api", default_features = false } opentelemetry_sdk = { version = "0.19", path = "../opentelemetry-sdk", default_features = false } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" +ordered-float = "3.4.0" [dev-dependencies] opentelemetry_api = { version = "0.19", path = "../opentelemetry-api", features = ["metrics"] } diff --git a/opentelemetry-stdout/src/common.rs b/opentelemetry-stdout/src/common.rs index 7dec1cd7e5..1758986941 100644 --- a/opentelemetry-stdout/src/common.rs +++ b/opentelemetry-stdout/src/common.rs @@ -1,9 +1,15 @@ -use std::{borrow::Cow, collections::BTreeMap}; +use std::{ + borrow::Cow, + collections::BTreeMap, + hash::{Hash, Hasher}, + time::{SystemTime, UNIX_EPOCH}, +}; -use serde::Serialize; +use ordered_float::OrderedFloat; +use serde::{Serialize, Serializer}; -#[derive(Debug, Serialize, Clone)] -pub(crate) struct AttributeSet(BTreeMap); +#[derive(Debug, Serialize, Clone, Hash, Eq, PartialEq)] +pub(crate) struct AttributeSet(pub BTreeMap); impl From<&opentelemetry_sdk::AttributeSet> for AttributeSet { fn from(value: &opentelemetry_sdk::AttributeSet) -> Self { @@ -16,6 +22,17 @@ impl From<&opentelemetry_sdk::AttributeSet> for AttributeSet { } } +impl From<&opentelemetry_sdk::Resource> for AttributeSet { + fn from(value: &opentelemetry_sdk::Resource) -> Self { + AttributeSet( + value + .iter() + .map(|(key, value)| (Key::from(key.clone()), Value::from(value.clone()))) + .collect(), + ) + } +} + #[derive(Debug, Clone, Serialize)] #[serde(rename_all = "camelCase")] pub(crate) struct Resource { @@ -43,16 +60,23 @@ impl From<&opentelemetry_sdk::Resource> for Resource { } } -#[derive(Debug, Clone, Serialize, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Serialize, Hash, PartialEq, Eq, Ord, PartialOrd)] pub(crate) struct Key(Cow<'static, str>); +impl From> for Key { + fn from(value: Cow<'static, str>) -> Self { + Key(value) + } +} + impl From for Key { fn from(value: opentelemetry_api::Key) -> Self { Key(value.as_str().to_string().into()) } } -#[derive(Debug, Serialize, PartialEq, Clone)] +#[derive(Debug, Serialize, Clone)] +#[allow(dead_code)] pub(crate) enum Value { #[serde(rename = "boolValue")] Bool(bool), @@ -64,6 +88,38 @@ pub(crate) enum Value { String(String), #[serde(rename = "arrayValue")] Array(Vec), + #[serde(rename = "kvListValue")] + KeyValues(Vec), + #[serde(rename = "bytesValue")] + BytesValue(Vec), +} + +impl PartialEq for Value { + fn eq(&self, other: &Self) -> bool { + match (&self, &other) { + (Value::Double(f), Value::Double(of)) => OrderedFloat(*f).eq(&OrderedFloat(*of)), + (non_double, other_non_double) => non_double.eq(other_non_double), + } + } +} + +impl Eq for Value {} + +impl Hash for Value { + fn hash(&self, state: &mut H) { + match &self { + Value::Bool(b) => b.hash(state), + Value::Int(i) => i.hash(state), + Value::Double(f) => OrderedFloat(*f).hash(state), + Value::String(s) => s.hash(state), + Value::Array(a) => a.iter().for_each(|v| v.hash(state)), + Value::KeyValues(kv) => kv.iter().for_each(|kv| { + kv.key.hash(state); + kv.value.hash(state); + }), + Value::BytesValue(b) => b.hash(state), + } + } } impl From for Value { @@ -91,6 +147,30 @@ impl From for Value { } } +#[cfg(feature = "logs")] +impl From for Value { + fn from(value: opentelemetry_api::logs::AnyValue) -> Self { + match value { + opentelemetry_api::logs::AnyValue::Boolean(b) => Value::Bool(b), + opentelemetry_api::logs::AnyValue::Int(i) => Value::Int(i), + opentelemetry_api::logs::AnyValue::Double(d) => Value::Double(d), + opentelemetry_api::logs::AnyValue::String(s) => Value::String(s.into()), + opentelemetry_api::logs::AnyValue::ListAny(a) => { + Value::Array(a.into_iter().map(Into::into).collect()) + } + opentelemetry_api::logs::AnyValue::Map(m) => Value::KeyValues( + m.into_iter() + .map(|(key, value)| KeyValue { + key: key.into(), + value: value.into(), + }) + .collect(), + ), + opentelemetry_api::logs::AnyValue::Bytes(b) => Value::BytesValue(b), + } + } +} + #[derive(Debug, Serialize, PartialEq, Clone)] #[serde(rename_all = "camelCase")] pub(crate) struct KeyValue { @@ -98,6 +178,16 @@ pub(crate) struct KeyValue { value: Value, } +#[cfg(feature = "logs")] +impl From<(opentelemetry_api::Key, opentelemetry_api::logs::AnyValue)> for KeyValue { + fn from((key, value): (opentelemetry_api::Key, opentelemetry_api::logs::AnyValue)) -> Self { + KeyValue { + key: key.into(), + value: value.into(), + } + } +} + impl From for KeyValue { fn from(value: opentelemetry_api::KeyValue) -> Self { KeyValue { @@ -148,3 +238,15 @@ impl From for Scope { } } } + +pub(crate) fn as_unix_nano(time: &SystemTime, serializer: S) -> Result +where + S: Serializer, +{ + let nanos = time + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_nanos(); + + serializer.serialize_u128(nanos) +} diff --git a/opentelemetry-stdout/src/lib.rs b/opentelemetry-stdout/src/lib.rs index 042463223a..7ff59d4a04 100644 --- a/opentelemetry-stdout/src/lib.rs +++ b/opentelemetry-stdout/src/lib.rs @@ -50,3 +50,9 @@ mod trace; #[cfg_attr(docsrs, doc(cfg(feature = "trace")))] #[cfg(feature = "trace")] pub use trace::*; + +#[cfg(feature = "logs")] +mod logs; +#[cfg_attr(docsrs, doc(cfg(feature = "logs")))] +#[cfg(feature = "logs")] +pub use logs::*; diff --git a/opentelemetry-stdout/src/logs/exporter.rs b/opentelemetry-stdout/src/logs/exporter.rs new file mode 100644 index 0000000000..0ea58382f0 --- /dev/null +++ b/opentelemetry-stdout/src/logs/exporter.rs @@ -0,0 +1,139 @@ +use core::fmt; +use std::io::{stdout, Write}; + +use async_trait::async_trait; +use opentelemetry_api::{ + logs::{LogError, LogResult}, + ExportError, +}; +use opentelemetry_sdk::export::logs::{ExportResult, LogData}; + +type Encoder = + Box LogResult<()> + Send + Sync>; + +/// A [`LogExporter`] that writes to [`Stdout`] or other configured [`Write`]. +/// +/// [`LogExporter`]: opentelemetry_sdk::export::logs::LogExporter +/// [`Write`]: std::io::Write +/// [`Stdout`]: std::io::Stdout +pub struct LogExporter { + writer: Option>, + encoder: Encoder, +} + +impl LogExporter { + /// Create a builder to configure this exporter. + pub fn builder() -> LogExporterBuilder { + Default::default() + } +} + +impl Default for LogExporter { + fn default() -> Self { + LogExporterBuilder::default().build() + } +} + +impl fmt::Debug for LogExporter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("LogsExporter") + } +} + +#[async_trait] +impl opentelemetry_sdk::export::logs::LogExporter for LogExporter { + /// Export spans to stdout + async fn export(&mut self, batch: Vec) -> ExportResult { + if let Some(writer) = &mut self.writer { + let result = (self.encoder)(writer, crate::logs::LogData::from(batch)) as LogResult<()>; + result.and_then(|_| writer.write_all(b"\n").map_err(|e| Error(e).into())) + } else { + Err("exporter is shut down".into()) + } + } + + fn shutdown(&mut self) { + self.writer.take(); + } +} + +/// Stdout exporter's error +#[derive(thiserror::Error, Debug)] +#[error(transparent)] +struct Error(#[from] std::io::Error); + +impl ExportError for Error { + fn exporter_name(&self) -> &'static str { + "stdout" + } +} + +/// Configuration for the stdout log exporter +#[derive(Default)] +pub struct LogExporterBuilder { + writer: Option>, + encoder: Option, +} + +impl fmt::Debug for LogExporterBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("LogExporterBuilder") + } +} + +impl LogExporterBuilder { + /// Set the writer that the exporter will write to + /// + /// # Examples + /// + /// ``` + /// use opentelemetry_stdout::LogExporterBuilder; + /// + /// let buffer = Vec::new(); // Any type that implements `Write` + /// let exporter = LogExporterBuilder::default().with_writer(buffer).build(); + /// ``` + pub fn with_writer(mut self, writer: W) -> Self + where + W: Write + Send + Sync + 'static, + { + self.writer = Some(Box::new(writer)); + self + } + + /// Set the encoder that the exporter will use. + /// + /// # Examples + /// + /// ``` + /// use opentelemetry_stdout::LogExporterBuilder; + /// use serde_json; + /// + /// let exporter = LogExporterBuilder::default() + /// .with_encoder(|writer, data| + /// Ok(serde_json::to_writer_pretty(writer, &data).unwrap())) + /// .build(); + /// ``` + pub fn with_encoder(mut self, encoder: E) -> Self + where + E: Fn(&mut dyn Write, crate::logs::transform::LogData) -> LogResult<()> + + Send + + Sync + + 'static, + { + self.encoder = Some(Box::new(encoder)); + self + } + + /// Create a log exporter with the current configuration. + pub fn build(self) -> LogExporter { + LogExporter { + writer: Some(self.writer.unwrap_or_else(|| Box::new(stdout()))), + encoder: self.encoder.unwrap_or_else(|| { + Box::new(|writer, logs| { + serde_json::to_writer(writer, &logs) + .map_err(|err| LogError::Other(Box::new(err))) + }) + }), + } + } +} diff --git a/opentelemetry-stdout/src/logs/mod.rs b/opentelemetry-stdout/src/logs/mod.rs new file mode 100644 index 0000000000..0d381e2cf0 --- /dev/null +++ b/opentelemetry-stdout/src/logs/mod.rs @@ -0,0 +1,15 @@ +//! # Stdout Log Exporter +//! +//! The stdout [`LogExporter`] writes debug printed [`LogRecord`]s to its configured +//! [`Write`] instance. By default it will write to [`Stdout`]. +//! +//! [`LogExporter`]: opentelemetry_sdk::export::logs::LogExporter +//! [`LogRecord`]: opentelemetry_api::logs::LogRecord +//! [`Write`]: std::io::Write +//! [`Stdout`]: std::io::Stdout +// TODO: Add an example for using this exporter. +mod exporter; +mod transform; + +pub use exporter::*; +pub use transform::*; diff --git a/opentelemetry-stdout/src/logs/transform.rs b/opentelemetry-stdout/src/logs/transform.rs new file mode 100644 index 0000000000..ba0a0ceb78 --- /dev/null +++ b/opentelemetry-stdout/src/logs/transform.rs @@ -0,0 +1,144 @@ +use std::{borrow::Cow, collections::HashMap, time::SystemTime}; + +use crate::common::{as_unix_nano, KeyValue, Resource, Scope, Value}; +use opentelemetry_sdk::AttributeSet; +use serde::{Serialize, Serializer}; + +/// Transformed logs data that can be serialized. +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +pub struct LogData { + #[serde(skip_serializing_if = "Vec::is_empty")] + resource_logs: Vec, +} + +impl From> for LogData { + fn from(sdk_logs: Vec) -> LogData { + let mut resource_logs = HashMap::::new(); + + for sdk_log in sdk_logs { + let resource_schema_url = sdk_log.resource.schema_url().map(|s| s.to_string().into()); + let schema_url = sdk_log.instrumentation.schema_url.clone(); + let scope: Scope = sdk_log.instrumentation.clone().into(); + let resource: Resource = sdk_log.resource.as_ref().into(); + + let rl = resource_logs + .entry(sdk_log.resource.as_ref().into()) + .or_insert_with(move || ResourceLogs { + resource, + scope_logs: Vec::with_capacity(1), + schema_url: resource_schema_url, + }); + + match rl.scope_logs.iter_mut().find(|sl| sl.scope == scope) { + Some(sl) => sl.log_records.push(sdk_log.into()), + None => rl.scope_logs.push(ScopeLogs { + scope, + log_records: vec![sdk_log.into()], + schema_url, + }), + } + } + + LogData { + resource_logs: resource_logs.into_values().collect(), + } + } +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct ResourceLogs { + resource: Resource, + #[serde(skip_serializing_if = "Vec::is_empty")] + scope_logs: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + schema_url: Option>, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct ScopeLogs { + scope: Scope, + #[serde(skip_serializing_if = "Vec::is_empty")] + log_records: Vec, + #[serde(skip_serializing_if = "Option::is_none")] + schema_url: Option>, +} + +#[derive(Debug, Serialize)] +#[serde(rename_all = "camelCase")] +struct LogRecord { + #[serde( + skip_serializing_if = "Option::is_none", + serialize_with = "opt_as_unix_nano" + )] + time_unix_nano: Option, + #[serde( + skip_serializing_if = "Option::is_none", + serialize_with = "opt_as_unix_nano" + )] + observed_time_unix_nano: Option, + severity_number: u32, + #[serde(skip_serializing_if = "Option::is_none")] + severity_text: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + body: Option, + attributes: Vec, + dropped_attributes_count: u32, + #[serde(skip_serializing_if = "Option::is_none")] + flags: Option, + #[serde(skip_serializing_if = "Option::is_none")] + span_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + trace_id: Option, +} + +impl From for LogRecord { + fn from(value: opentelemetry_sdk::export::logs::LogData) -> Self { + LogRecord { + trace_id: value + .record + .trace_context + .as_ref() + .map(|c| format!("{:x}", c.trace_id)), + span_id: value + .record + .trace_context + .as_ref() + .map(|c| format!("{:x}", c.span_id)), + flags: value + .record + .trace_context + .map(|c| c.trace_flags.map(|f| f.to_u8())) + .unwrap_or_default(), + time_unix_nano: value.record.timestamp, + observed_time_unix_nano: value.record.observed_timestamp, + severity_number: value + .record + .severity_number + .map(|u| u as u32) + .unwrap_or_default(), + attributes: value + .record + .attributes + .map(|attrs| { + attrs + .into_iter() + .map(|(key, value)| (key, value).into()) + .collect() + }) + .unwrap_or_default(), + dropped_attributes_count: 0, + severity_text: value.record.severity_text, + body: value.record.body.map(|a| a.into()), + } + } +} + +fn opt_as_unix_nano(time: &Option, serializer: S) -> Result +where + S: Serializer, +{ + as_unix_nano(time.as_ref().unwrap(), serializer) +} diff --git a/opentelemetry-stdout/src/trace/transform.rs b/opentelemetry-stdout/src/trace/transform.rs index 1397d7ecc3..34b8b1e167 100644 --- a/opentelemetry-stdout/src/trace/transform.rs +++ b/opentelemetry-stdout/src/trace/transform.rs @@ -1,13 +1,9 @@ -use std::{ - borrow::Cow, - collections::HashMap, - time::{SystemTime, UNIX_EPOCH}, -}; +use std::{borrow::Cow, collections::HashMap, time::SystemTime}; use opentelemetry_sdk::AttributeSet; use serde::{Serialize, Serializer}; -use crate::common::{KeyValue, Resource, Scope}; +use crate::common::{as_unix_nano, KeyValue, Resource, Scope}; /// Transformed trace data that can be serialized #[derive(Debug, Serialize)] @@ -92,18 +88,6 @@ struct Span { status: Status, } -fn as_unix_nano(time: &SystemTime, serializer: S) -> Result -where - S: Serializer, -{ - let nanos = time - .duration_since(UNIX_EPOCH) - .unwrap_or_default() - .as_nanos(); - - serializer.serialize_u128(nanos) -} - impl From for Span { fn from(value: opentelemetry_sdk::export::trace::SpanData) -> Self { Span { diff --git a/opentelemetry/Cargo.toml b/opentelemetry/Cargo.toml index cd7b31db61..953ae608a0 100644 --- a/opentelemetry/Cargo.toml +++ b/opentelemetry/Cargo.toml @@ -31,6 +31,7 @@ opentelemetry-stdout = { path = "../opentelemetry-stdout" } default = ["trace"] trace = ["opentelemetry_api/trace", "opentelemetry_sdk/trace", "opentelemetry-stdout/trace"] metrics = ["opentelemetry_api/metrics", "opentelemetry_sdk/metrics", "opentelemetry-stdout/metrics"] +logs = ["opentelemetry_sdk/logs"] testing = ["opentelemetry_api/testing", "opentelemetry_sdk/testing"] rt-tokio = ["opentelemetry_sdk/rt-tokio"] rt-tokio-current-thread = ["opentelemetry_sdk/rt-tokio-current-thread"]