8
8
"github.com/aws/aws-sdk-go/aws"
9
9
"github.com/aws/aws-sdk-go/aws/session"
10
10
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
11
+ "github.com/go-kit/log"
12
+ "github.com/go-kit/log/level"
11
13
"github.com/prometheus/client_golang/prometheus"
12
- "github.com/prometheus/common/log"
13
14
14
15
"github.com/percona/rds_exporter/sessions"
15
16
)
@@ -25,7 +26,7 @@ type scraper struct {
25
26
testDisallowUnknownFields bool // for tests only
26
27
}
27
28
28
- func newScraper (session * session.Session , instances []sessions.Instance ) * scraper {
29
+ func newScraper (session * session.Session , instances []sessions.Instance , logger log. Logger ) * scraper {
29
30
logStreamNames := make ([]string , 0 , len (instances ))
30
31
for _ , instance := range instances {
31
32
logStreamNames = append (logStreamNames , instance .ResourceID )
@@ -36,7 +37,7 @@ func newScraper(session *session.Session, instances []sessions.Instance) *scrape
36
37
logStreamNames : logStreamNames ,
37
38
svc : cloudwatchlogs .New (session ),
38
39
nextStartTime : time .Now ().Add (- 3 * time .Minute ).Round (0 ), // strip monotonic clock reading
39
- logger : log .With ("component" , "enhanced" ),
40
+ logger : log .With (logger , "component" , "enhanced" ),
40
41
}
41
42
}
42
43
@@ -82,14 +83,16 @@ func (s *scraper) scrape(ctx context.Context) (map[string][]prometheus.Metric, m
82
83
StartTime : aws .Int64 (aws .TimeUnixMilli (s .nextStartTime )),
83
84
}
84
85
85
- s . logger .With ("next_start" , s .nextStartTime .UTC ()). With ( "since_last" , time .Since (s .nextStartTime )). Debugf ( "Requesting metrics" )
86
+ level . Debug ( log .With (s . logger , "next_start" , s .nextStartTime .UTC (), "since_last" , time .Since (s .nextStartTime ))). Log ( "msg" , "Requesting metrics" )
86
87
87
88
// collect all returned events and metrics/messages
88
89
collectAllMetrics := func (output * cloudwatchlogs.FilterLogEventsOutput , lastPage bool ) bool {
89
90
for _ , event := range output .Events {
90
- l := s .logger .With ("EventId" , * event .EventId ).With ("LogStreamName" , * event .LogStreamName )
91
- l = l .With ("Timestamp" , aws .MillisecondsTimeValue (event .Timestamp ).UTC ())
92
- l = l .With ("IngestionTime" , aws .MillisecondsTimeValue (event .IngestionTime ).UTC ())
91
+ l := log .With (s .logger ,
92
+ "EventId" , * event .EventId ,
93
+ "LogStreamName" , * event .LogStreamName ,
94
+ "Timestamp" , aws .MillisecondsTimeValue (event .Timestamp ).UTC (),
95
+ "IngestionTime" , aws .MillisecondsTimeValue (event .IngestionTime ).UTC ())
93
96
94
97
var instance * sessions.Instance
95
98
for _ , i := range s .instances {
@@ -99,15 +102,15 @@ func (s *scraper) scrape(ctx context.Context) (map[string][]prometheus.Metric, m
99
102
}
100
103
}
101
104
if instance == nil {
102
- l . Errorf ( "Failed to find instance." )
105
+ level . Error ( l ). Log ( "msg" , "Failed to find instance." )
103
106
continue
104
107
}
105
108
106
109
if instance .DisableEnhancedMetrics {
107
- l . Debugf ( " Enhanced Metrics are disabled for instance %v." , instance )
110
+ level . Debug ( l ). Log ( "msg" , fmt . Sprintf ( " Enhanced Metrics are disabled for instance %v." , instance ) )
108
111
continue
109
112
}
110
- l = l .With ("region" , instance .Region ). With ( "instance" , instance .Instance )
113
+ l = log .With (l , "region" , instance .Region , "instance" , instance .Instance )
111
114
112
115
// l.Debugf("Message:\n%s", *event.Message)
113
116
osMetrics , err := parseOSMetrics ([]byte (* event .Message ), s .testDisallowUnknownFields )
@@ -117,13 +120,13 @@ func (s *scraper) scrape(ctx context.Context) (map[string][]prometheus.Metric, m
117
120
panic (fmt .Sprintf ("New metrics should be added: %s" , err ))
118
121
}
119
122
120
- l . Errorf ( " Failed to parse metrics: %s. " , err )
123
+ level . Error ( l ). Log ( "msg" , " Failed to parse metrics." , "error " , err )
121
124
continue
122
125
}
123
126
// l.Debugf("OS Metrics:\n%#v", osMetrics)
124
127
125
128
timestamp := aws .MillisecondsTimeValue (event .Timestamp ).UTC ()
126
- l . Debugf ( " Timestamp from message: %s; from event: %s." , osMetrics .Timestamp .UTC (), timestamp )
129
+ level . Debug ( l ). Log ( "msg" , fmt . Sprintf ( " Timestamp from message: %s; from event: %s." , osMetrics .Timestamp .UTC (), timestamp ) )
127
130
128
131
if allMetrics [instance .ResourceID ] == nil {
129
132
allMetrics [instance .ResourceID ] = make (map [time.Time ][]prometheus.Metric )
@@ -139,7 +142,7 @@ func (s *scraper) scrape(ctx context.Context) (map[string][]prometheus.Metric, m
139
142
return true // continue pagination
140
143
}
141
144
if err := s .svc .FilterLogEventsPagesWithContext (ctx , input , collectAllMetrics ); err != nil {
142
- s .logger . Errorf ( " Failed to filter log events: %s. " , err )
145
+ level . Error ( s .logger ). Log ( "msg" , " Failed to filter log events." , "error " , err )
143
146
}
144
147
}
145
148
// get better times
0 commit comments