Skip to content

Commit 6c48fbb

Browse files
authored
chore: Fix linter findings for revive:enforce-map-style in plugins/inputs/[a-m]* (influxdata#16042)
1 parent e257c14 commit 6c48fbb

File tree

33 files changed

+80
-89
lines changed

33 files changed

+80
-89
lines changed

plugins/inputs/aliyuncms/aliyuncms.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ func (s *AliyunCMS) Init() error {
148148
if metric.Dimensions == "" {
149149
continue
150150
}
151-
metric.dimensionsUdObj = map[string]string{}
151+
metric.dimensionsUdObj = make(map[string]string)
152152
metric.dimensionsUdArr = []map[string]string{}
153153

154154
// first try to unmarshal as an object
@@ -295,9 +295,9 @@ func (s *AliyunCMS) gatherMetric(acc telegraf.Accumulator, metricName string, me
295295

296296
NextDataPoint:
297297
for _, datapoint := range datapoints {
298-
fields := map[string]interface{}{}
298+
fields := make(map[string]interface{}, len(datapoint))
299+
tags := make(map[string]string, len(datapoint))
299300
datapointTime := int64(0)
300-
tags := map[string]string{}
301301
for key, value := range datapoint {
302302
switch key {
303303
case "instanceId", "BucketName":

plugins/inputs/aliyuncms/discovery.go

+8-12
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,6 @@ func newDiscoveryTool(
9797
discoveryInterval time.Duration,
9898
) (*discoveryTool, error) {
9999
var (
100-
dscReq = map[string]discoveryRequest{}
101-
cli = map[string]aliyunSdkClient{}
102100
responseRootKey string
103101
responseObjectIDKey string
104102
err error
@@ -115,6 +113,8 @@ func newDiscoveryTool(
115113
rateLimit = 1
116114
}
117115

116+
dscReq := make(map[string]discoveryRequest, len(regions))
117+
cli := make(map[string]aliyunSdkClient, len(regions))
118118
for _, region := range regions {
119119
switch project {
120120
case "acs_ecs_dashboard":
@@ -252,7 +252,7 @@ func newDiscoveryTool(
252252

253253
func (dt *discoveryTool) parseDiscoveryResponse(resp *responses.CommonResponse) (*parsedDResp, error) {
254254
var (
255-
fullOutput = map[string]interface{}{}
255+
fullOutput = make(map[string]interface{})
256256
data []byte
257257
foundDataItem bool
258258
foundRootKey bool
@@ -335,8 +335,8 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
335335
req.QueryParams["PageNumber"] = strconv.Itoa(pageNumber)
336336

337337
if len(discoveryData) == totalCount { // All data received
338-
// Map data to appropriate shape before return
339-
preparedData := map[string]interface{}{}
338+
// Map data to the appropriate shape before return
339+
preparedData := make(map[string]interface{}, len(discoveryData))
340340

341341
for _, raw := range discoveryData {
342342
elem, ok := raw.(map[string]interface{})
@@ -353,10 +353,7 @@ func (dt *discoveryTool) getDiscoveryData(cli aliyunSdkClient, req *requests.Com
353353
}
354354

355355
func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[string]interface{}, error) {
356-
var (
357-
data map[string]interface{}
358-
resultData = map[string]interface{}{}
359-
)
356+
resultData := make(map[string]interface{})
360357

361358
for region, cli := range dt.cli {
362359
// Building common request, as the code below is the same no matter
@@ -383,7 +380,7 @@ func (dt *discoveryTool) getDiscoveryDataAcrossRegions(lmtr chan bool) (map[stri
383380
commonRequest.TransToAcsRequest()
384381

385382
// Get discovery data using common request
386-
data, err = dt.getDiscoveryData(cli, commonRequest, lmtr)
383+
data, err := dt.getDiscoveryData(cli, commonRequest, lmtr)
387384
if err != nil {
388385
return nil, err
389386
}
@@ -428,8 +425,7 @@ func (dt *discoveryTool) start() {
428425
}
429426

430427
if !reflect.DeepEqual(data, lastData) {
431-
lastData = nil
432-
lastData = map[string]interface{}{}
428+
lastData = make(map[string]interface{}, len(data))
433429
for k, v := range data {
434430
lastData[k] = v
435431
}

plugins/inputs/amd_rocm_smi/amd_rocm_smi.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,6 @@ func genTagsFields(gpus map[string]gpu, system map[string]sysInfo) []metric {
186186
tags := map[string]string{
187187
"name": cardID,
188188
}
189-
fields := map[string]interface{}{}
190189

191190
payload := gpus[cardID]
192191
//nolint:errcheck // silently treat as zero if malformed
@@ -202,6 +201,7 @@ func genTagsFields(gpus map[string]gpu, system map[string]sysInfo) []metric {
202201

203202
setTagIfUsed(tags, "gpu_unique_id", payload.GpuUniqueID)
204203

204+
fields := make(map[string]interface{}, 20)
205205
setIfUsed("int", fields, "driver_version", strings.ReplaceAll(system["system"].DriverVersion, ".", ""))
206206
setIfUsed("int", fields, "fan_speed", payload.GpuFanSpeedPercentage)
207207
setIfUsed("int64", fields, "memory_total", payload.GpuVRAMTotalMemory)

plugins/inputs/ceph/ceph.go

+7-7
Original file line numberDiff line numberDiff line change
@@ -505,7 +505,7 @@ func decodeStatusFsmap(acc telegraf.Accumulator, data *status) error {
505505
"up_standby": data.FSMap.NumUpStandby,
506506
"up": data.FSMap.NumUp,
507507
}
508-
acc.AddFields("ceph_fsmap", fields, map[string]string{})
508+
acc.AddFields("ceph_fsmap", fields, make(map[string]string))
509509
return nil
510510
}
511511

@@ -521,7 +521,7 @@ func decodeStatusHealth(acc telegraf.Accumulator, data *status) error {
521521
"status_code": statusCodes[data.Health.Status],
522522
"status": data.Health.Status,
523523
}
524-
acc.AddFields("ceph_health", fields, map[string]string{})
524+
acc.AddFields("ceph_health", fields, make(map[string]string))
525525
return nil
526526
}
527527

@@ -530,7 +530,7 @@ func decodeStatusMonmap(acc telegraf.Accumulator, data *status) error {
530530
fields := map[string]interface{}{
531531
"num_mons": data.MonMap.NumMons,
532532
}
533-
acc.AddFields("ceph_monmap", fields, map[string]string{})
533+
acc.AddFields("ceph_monmap", fields, make(map[string]string))
534534
return nil
535535
}
536536

@@ -555,7 +555,7 @@ func decodeStatusOsdmap(acc telegraf.Accumulator, data *status) error {
555555
}
556556
}
557557

558-
acc.AddFields("ceph_osdmap", fields, map[string]string{})
558+
acc.AddFields("ceph_osdmap", fields, make(map[string]string))
559559
return nil
560560
}
561561

@@ -586,7 +586,7 @@ func decodeStatusPgmap(acc telegraf.Accumulator, data *status) error {
586586
"write_bytes_sec": data.PGMap.WriteBytesSec,
587587
"write_op_per_sec": data.PGMap.WriteOpPerSec,
588588
}
589-
acc.AddFields("ceph_pgmap", fields, map[string]string{})
589+
acc.AddFields("ceph_pgmap", fields, make(map[string]string))
590590
return nil
591591
}
592592

@@ -654,14 +654,14 @@ func decodeDf(acc telegraf.Accumulator, input string) error {
654654
"total_used_raw_ratio": data.Stats.TotalUsedRawRatio,
655655
"total_used": data.Stats.TotalUsed, // pre ceph 0.84
656656
}
657-
acc.AddFields("ceph_usage", fields, map[string]string{})
657+
acc.AddFields("ceph_usage", fields, make(map[string]string))
658658

659659
// ceph.stats_by_class: records per device-class usage
660660
for class, stats := range data.StatsbyClass {
661661
tags := map[string]string{
662662
"class": class,
663663
}
664-
fields := map[string]interface{}{}
664+
fields := make(map[string]interface{})
665665
for key, value := range stats {
666666
fields[key] = value
667667
}

plugins/inputs/chrony/chrony.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,7 @@ func (c *Chrony) gatherActivity(acc telegraf.Accumulator) error {
227227
return fmt.Errorf("got unexpected response type %T while waiting for activity data", r)
228228
}
229229

230-
tags := map[string]string{}
230+
tags := make(map[string]string, 1)
231231
if c.source != "" {
232232
tags["source"] = c.source
233233
}
@@ -300,7 +300,7 @@ func (c *Chrony) gatherServerStats(acc telegraf.Accumulator) error {
300300
return fmt.Errorf("querying server statistics failed: %w", err)
301301
}
302302

303-
tags := map[string]string{}
303+
tags := make(map[string]string, 1)
304304
if c.source != "" {
305305
tags["source"] = c.source
306306
}

plugins/inputs/cloudwatch/cloudwatch.go

+5-8
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,7 @@ func (c *CloudWatch) Gather(acc telegraf.Accumulator) error {
142142
wg := sync.WaitGroup{}
143143
rLock := sync.Mutex{}
144144

145-
results := map[string][]types.MetricDataResult{}
146-
145+
results := make(map[string][]types.MetricDataResult)
147146
for namespace, namespacedQueries := range queries {
148147
var batches [][]types.MetricDataQuery
149148

@@ -373,9 +372,8 @@ func (c *CloudWatch) getDataQueries(filteredMetrics []filteredMetric) map[string
373372
return c.metricCache.queries
374373
}
375374

376-
c.queryDimensions = map[string]*map[string]string{}
377-
378-
dataQueries := map[string][]types.MetricDataQuery{}
375+
c.queryDimensions = make(map[string]*map[string]string)
376+
dataQueries := make(map[string][]types.MetricDataQuery)
379377
for i, filtered := range filteredMetrics {
380378
for j, singleMetric := range filtered.metrics {
381379
id := strconv.Itoa(j) + "_" + strconv.Itoa(i)
@@ -460,8 +458,7 @@ func (c *CloudWatch) aggregateMetrics(acc telegraf.Accumulator, metricDataResult
460458
namespace = sanitizeMeasurement(namespace)
461459

462460
for _, result := range results {
463-
tags := map[string]string{}
464-
461+
tags := make(map[string]string)
465462
if dimensions, ok := c.queryDimensions[*result.Id]; ok {
466463
tags = *dimensions
467464
}
@@ -507,7 +504,7 @@ func snakeCase(s string) string {
507504

508505
// ctod converts cloudwatch dimensions to regular dimensions.
509506
func ctod(cDimensions []types.Dimension) *map[string]string {
510-
dimensions := map[string]string{}
507+
dimensions := make(map[string]string, len(cDimensions))
511508
for i := range cDimensions {
512509
dimensions[snakeCase(*cDimensions[i].Name)] = *cDimensions[i].Value
513510
}

plugins/inputs/couchdb/couchdb.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -149,8 +149,6 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri
149149
return fmt.Errorf("failed to decode stats from couchdb: HTTP body %q", response.Body)
150150
}
151151

152-
fields := map[string]interface{}{}
153-
154152
// for couchdb 2.0 API changes
155153
requestTime := metaData{
156154
Current: stats.Couchdb.RequestTime.Current,
@@ -207,6 +205,7 @@ func (c *CouchDB) fetchAndInsertData(accumulator telegraf.Accumulator, host stri
207205
httpdStatusCodesStatus500 = stats.Couchdb.HttpdStatusCodes.Status500
208206
}
209207

208+
fields := make(map[string]interface{}, 31)
210209
// CouchDB meta stats:
211210
c.generateFields(fields, "couchdb_auth_cache_misses", stats.Couchdb.AuthCacheMisses)
212211
c.generateFields(fields, "couchdb_database_writes", stats.Couchdb.DatabaseWrites)

plugins/inputs/directory_monitor/directory_monitor.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -100,9 +100,9 @@ func (monitor *DirectoryMonitor) Init() error {
100100
tags := map[string]string{
101101
"directory": monitor.Directory,
102102
}
103-
monitor.filesDropped = selfstat.Register("directory_monitor", "files_dropped", map[string]string{})
103+
monitor.filesDropped = selfstat.Register("directory_monitor", "files_dropped", make(map[string]string))
104104
monitor.filesDroppedDir = selfstat.Register("directory_monitor", "files_dropped_per_dir", tags)
105-
monitor.filesProcessed = selfstat.Register("directory_monitor", "files_processed", map[string]string{})
105+
monitor.filesProcessed = selfstat.Register("directory_monitor", "files_processed", make(map[string]string))
106106
monitor.filesProcessedDir = selfstat.Register("directory_monitor", "files_processed_per_dir", tags)
107107
monitor.filesQueuedDir = selfstat.Register("directory_monitor", "files_queue_per_dir", tags)
108108

plugins/inputs/diskio/diskio.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ func (d *DiskIO) Gather(acc telegraf.Accumulator) error {
8181
match = true
8282
}
8383

84-
tags := map[string]string{}
84+
tags := make(map[string]string)
8585
var devLinks []string
8686
tags["name"], devLinks = d.diskName(io.Name)
8787

@@ -207,7 +207,7 @@ func (d *DiskIO) diskTags(devName string) map[string]string {
207207
return nil
208208
}
209209

210-
tags := map[string]string{}
210+
tags := make(map[string]string, len(d.DeviceTags))
211211
for _, dt := range d.DeviceTags {
212212
if v, ok := di[dt]; ok {
213213
tags[dt] = v

plugins/inputs/docker/docker.go

+5-6
Original file line numberDiff line numberDiff line change
@@ -276,16 +276,15 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
276276
return err
277277
}
278278

279-
running := map[string]int{}
280-
tasksNoShutdown := map[string]uint64{}
281-
282279
activeNodes := make(map[string]struct{})
283280
for _, n := range nodes {
284281
if n.Status.State != swarm.NodeStateDown {
285282
activeNodes[n.ID] = struct{}{}
286283
}
287284
}
288285

286+
tasksNoShutdown := make(map[string]uint64, len(tasks))
287+
running := make(map[string]int, len(tasks))
289288
for _, task := range tasks {
290289
if task.DesiredState != swarm.TaskStateShutdown {
291290
tasksNoShutdown[task.ServiceID]++
@@ -297,8 +296,8 @@ func (d *Docker) gatherSwarmInfo(acc telegraf.Accumulator) error {
297296
}
298297

299298
for _, service := range services {
300-
tags := map[string]string{}
301-
fields := make(map[string]interface{})
299+
tags := make(map[string]string, 3)
300+
fields := make(map[string]interface{}, 2)
302301
now := time.Now()
303302
tags["service_id"] = service.ID
304303
tags["service_name"] = service.Spec.Name
@@ -375,7 +374,7 @@ func (d *Docker) gatherInfo(acc telegraf.Accumulator) error {
375374
var (
376375
// "docker_devicemapper" measurement fields
377376
poolName string
378-
deviceMapperFields = map[string]interface{}{}
377+
deviceMapperFields = make(map[string]interface{}, len(info.DriverStatus))
379378
)
380379

381380
for _, rawData := range info.DriverStatus {

plugins/inputs/elasticsearch/elasticsearch.go

+4-4
Original file line numberDiff line numberDiff line change
@@ -548,11 +548,11 @@ func (e *Elasticsearch) gatherIndicesStats(url string, acc telegraf.Accumulator)
548548
now := time.Now()
549549

550550
// Total Shards Stats
551-
shardsStats := map[string]interface{}{}
551+
shardsStats := make(map[string]interface{}, len(indicesStats.Shards))
552552
for k, v := range indicesStats.Shards {
553553
shardsStats[k] = v
554554
}
555-
acc.AddFields("elasticsearch_indices_stats_shards_total", shardsStats, map[string]string{}, now)
555+
acc.AddFields("elasticsearch_indices_stats_shards_total", shardsStats, make(map[string]string), now)
556556

557557
// All Stats
558558
for m, s := range indicesStats.All {
@@ -603,7 +603,7 @@ func (e *Elasticsearch) gatherIndividualIndicesStats(indices map[string]indexSta
603603
}
604604

605605
func (e *Elasticsearch) categorizeIndices(indices map[string]indexStat) map[string][]string {
606-
categorizedIndexNames := map[string][]string{}
606+
categorizedIndexNames := make(map[string][]string, len(indices))
607607

608608
// If all indices are configured to be gathered, bucket them all together.
609609
if len(e.IndicesInclude) == 0 || e.IndicesInclude[0] == "_all" {
@@ -768,8 +768,8 @@ func (e *Elasticsearch) gatherJSONData(url string, v interface{}) error {
768768
}
769769

770770
func (e *Elasticsearch) compileIndexMatchers() (map[string]filter.Filter, error) {
771-
indexMatchers := map[string]filter.Filter{}
772771
var err error
772+
indexMatchers := make(map[string]filter.Filter, len(e.IndicesInclude))
773773

774774
// Compile each configured index into a glob matcher.
775775
for _, configuredIndex := range e.IndicesInclude {

plugins/inputs/elasticsearch_query/aggregation_parser.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ func parseSimpleResult(acc telegraf.Accumulator, measurement string, searchResul
2323
}
2424

2525
func parseAggregationResult(acc telegraf.Accumulator, aggregationQueryList []aggregationQueryData, searchResult *elastic5.SearchResult) error {
26-
measurements := map[string]map[string]string{}
26+
measurements := make(map[string]map[string]string, len(aggregationQueryList))
2727

2828
// organize the aggregation query data by measurement
2929
for _, aggregationQuery := range aggregationQueryList {

plugins/inputs/ethtool/ethtool_linux.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -289,7 +289,7 @@ func (c *CommandEthtool) Interfaces(includeNamespaces bool) ([]NamespacedInterfa
289289
// Handles are only used to create namespaced goroutines. We don't prefill
290290
// with the handle for the initial namespace because we've already created
291291
// its goroutine in Init().
292-
handles := map[string]netns.NsHandle{}
292+
handles := make(map[string]netns.NsHandle)
293293

294294
if includeNamespaces {
295295
namespaces, err := os.ReadDir(namespaceDirectory)

plugins/inputs/fibaro/hc2/parser.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, deviecsBytes []byt
1313
return err
1414
}
1515

16-
sections := map[uint16]string{}
16+
sections := make(map[uint16]string, len(tmpSections))
1717
for _, v := range tmpSections {
1818
sections[v.ID] = v.Name
1919
}
@@ -22,7 +22,7 @@ func Parse(acc telegraf.Accumulator, sectionBytes, roomBytes, deviecsBytes []byt
2222
if err := json.Unmarshal(roomBytes, &tmpRooms); err != nil {
2323
return err
2424
}
25-
rooms := map[uint16]LinkRoomsSections{}
25+
rooms := make(map[uint16]LinkRoomsSections, len(tmpRooms))
2626
for _, v := range tmpRooms {
2727
rooms[v.ID] = LinkRoomsSections{Name: v.Name, SectionID: v.SectionID}
2828
}

plugins/inputs/hugepages/hugepages.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -240,7 +240,7 @@ func (h *Hugepages) gatherStatsFromMeminfo(acc telegraf.Accumulator) error {
240240
metrics[metricName] = fieldValue
241241
}
242242

243-
acc.AddFields("hugepages_"+meminfoHugepages, metrics, map[string]string{})
243+
acc.AddFields("hugepages_"+meminfoHugepages, metrics, make(map[string]string))
244244
return nil
245245
}
246246

0 commit comments

Comments
 (0)