Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
[chore][exporter/prometheusremotewriteexporter] run make modernize
Signed-off-by: Paulo Dias <[email protected]>
  • Loading branch information
paulojmdias committed Oct 8, 2025
commit 4c0d2defbc1cc9135f289b9ecc5db5f18573edf1
2 changes: 1 addition & 1 deletion exporter/prometheusremotewriteexporter/exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ func (prwe *prwExporter) export(ctx context.Context, requests []*prompb.WriteReq
var errs error
// Run concurrencyLimit of workers until there
// is no more requests to execute in the input channel.
for i := 0; i < concurrencyLimit; i++ {
for range concurrencyLimit {
go func() {
defer wg.Done()
err := prwe.handleRequests(ctx, input)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func Test_PushMetricsConcurrent(t *testing.T) {
n := 1000
ms := make([]pmetric.Metrics, n)
testIDKey := "test_id"
for i := 0; i < n; i++ {
for i := range n {
m := testdata.GenerateMetricsOneMetric()
dps := m.ResourceMetrics().At(0).ScopeMetrics().At(0).Metrics().At(0).Sum().DataPoints()
for j := 0; j < dps.Len(); j++ {
Expand Down
10 changes: 5 additions & 5 deletions exporter/prometheusremotewriteexporter/exporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ func Test_Shutdown(t *testing.T) {
err := prwe.Shutdown(t.Context())
require.NoError(t, err)
errChan := make(chan error, 5)
for i := 0; i < 5; i++ {
for range 5 {
wg.Add(1)
go func() {
defer wg.Done()
Expand Down Expand Up @@ -1251,7 +1251,7 @@ func benchmarkExecute(b *testing.B, numSample int) {

generateSamples := func(n int) []prompb.Sample {
samples := make([]prompb.Sample, 0, n)
for i := 0; i < n; i++ {
for i := range n {
samples = append(samples, prompb.Sample{
Timestamp: int64(i),
Value: float64(i),
Expand All @@ -1262,7 +1262,7 @@ func benchmarkExecute(b *testing.B, numSample int) {

generateHistograms := func(n int) []prompb.Histogram {
histograms := make([]prompb.Histogram, 0, n)
for i := 0; i < n; i++ {
for i := range n {
histograms = append(histograms, prompb.Histogram{
Timestamp: int64(i),
Count: &prompb.Histogram_CountInt{CountInt: uint64(i)},
Expand All @@ -1274,7 +1274,7 @@ func benchmarkExecute(b *testing.B, numSample int) {

reqs := make([]*prompb.WriteRequest, 0, b.N)
const labelValue = "abcdefg'hijlmn234!@#$%^&*()_+~`\"{}[],./<>?hello0123hiOlá你好Dzieńdobry9Zd8ra765v4stvuyte"
for n := 0; n < b.N; n++ {
for n := 0; b.Loop(); n++ {
num := strings.Repeat(strconv.Itoa(n), 16)
req := &prompb.WriteRequest{
Metadata: []prompb.MetricMetadata{
Expand Down Expand Up @@ -1373,7 +1373,7 @@ func benchmarkPushMetrics(b *testing.B, numMetrics, numConsumers int) {
require.NoError(b, err)

var metrics []pmetric.Metrics
for n := 0; n < b.N; n++ {
for n := 0; b.Loop(); n++ {
actualNumMetrics := numMetrics
if numMetrics == -1 {
actualNumMetrics = int(math.Pow(10, float64(n%4+1)))
Expand Down
2 changes: 1 addition & 1 deletion exporter/prometheusremotewriteexporter/exporter_v2.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (prwe *prwExporter) exportV2(ctx context.Context, requests []*writev2.Reque
var errs error
// Run concurrencyLimit of workers until there
// is no more requests to execute in the input channel.
for i := 0; i < concurrencyLimit; i++ {
for range concurrencyLimit {
go func() {
defer wg.Done()
err := prwe.handleRequestsV2(ctx, input)
Expand Down
9 changes: 4 additions & 5 deletions exporter/prometheusremotewriteexporter/helper_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func Test_batchTimeSeriesUpdatesStateForLargeBatches(t *testing.T) {
// Benchmark for large data sizes
// First allocate 100k time series
tsArray := make([]*prompb.TimeSeries, 0, 100000)
for i := 0; i < 100000; i++ {
for range 100000 {
ts := getTimeSeries(labels, sample1, sample2, sample3)
tsArray = append(tsArray, ts)
}
Expand Down Expand Up @@ -119,19 +119,18 @@ func Benchmark_batchTimeSeries(b *testing.B) {
// Benchmark for large data sizes
// First allocate 100k time series
tsArray := make([]*prompb.TimeSeries, 0, 100000)
for i := 0; i < 100000; i++ {
for range 100000 {
ts := getTimeSeries(labels, sample1, sample2, sample3)
tsArray = append(tsArray, ts)
}

tsMap1 := getTimeseriesMap(tsArray)

b.ReportAllocs()
b.ResetTimer()

state := newBatchTimeServicesState()
// Run batchTimeSeries 100 times with a 1mb max request size
for i := 0; i < b.N; i++ {
for b.Loop() {
requests, err := batchTimeSeries(tsMap1, 1000000, nil, state)
assert.NoError(b, err)
assert.Len(b, requests, 18)
Expand Down Expand Up @@ -240,7 +239,7 @@ func TestEnsureTimeseriesPointsAreSortedByTimestamp(t *testing.T) {
for ti, ts := range got.Timeseries {
for i := range ts.Samples {
si := ts.Samples[i]
for j := 0; j < i; j++ {
for j := range i {
sj := ts.Samples[j]
assert.LessOrEqual(t, sj.Timestamp, si.Timestamp, "Timeseries[%d]: Sample[%d].Timestamp(%d) > Sample[%d].Timestamp(%d)",
ti, j, sj.Timestamp, i, si.Timestamp)
Expand Down
4 changes: 2 additions & 2 deletions exporter/prometheusremotewriteexporter/helper_v2_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func Test_batchTimeSeriesV2UpdatesStateForLargeBatches(t *testing.T) {
tsArray := make([]*writev2.TimeSeries, 0, 100000)
var smb writev2.SymbolsTable
var ts *writev2.TimeSeries
for i := 0; i < 100000; i++ {
for range 100000 {
ts, smb = getTimeSeriesV2(labels, sample1, sample2, sample3)
tsArray = append(tsArray, ts)
}
Expand Down Expand Up @@ -211,7 +211,7 @@ func TestEnsureTimeseriesPointsAreSortedByTimestampV2(t *testing.T) {
for ti, ts := range got {
for i := range ts.Samples {
si := ts.Samples[i]
for j := 0; j < i; j++ {
for j := range i {
sj := ts.Samples[j]
assert.LessOrEqual(t, sj.Timestamp, si.Timestamp, "Timeseries[%d]: Sample[%d].Timestamp(%d) > Sample[%d].Timestamp(%d)",
ti, j, sj.Timestamp, i, si.Timestamp)
Expand Down
4 changes: 2 additions & 2 deletions exporter/prometheusremotewriteexporter/testutil_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ func getMetricsFromMetricList(metricList ...pmetric.Metric) pmetric.Metrics {
rm := metrics.ResourceMetrics().AppendEmpty()
ilm := rm.ScopeMetrics().AppendEmpty()
ilm.Metrics().EnsureCapacity(len(metricList))
for i := 0; i < len(metricList); i++ {
for i := range metricList {
metricList[i].CopyTo(ilm.Metrics().AppendEmpty())
}

Expand Down Expand Up @@ -420,7 +420,7 @@ func getQuantiles(bounds, values []float64) pmetric.SummaryDataPointValueAtQuant
quantiles := pmetric.NewSummaryDataPointValueAtQuantileSlice()
quantiles.EnsureCapacity(len(bounds))

for i := 0; i < len(bounds); i++ {
for i := range bounds {
quantile := quantiles.AppendEmpty()
quantile.SetQuantile(bounds[i])
quantile.SetValue(values[i])
Expand Down
2 changes: 1 addition & 1 deletion exporter/prometheusremotewriteexporter/wal.go
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ func (prweWAL *prweWAL) persistToWAL(ctx context.Context, requests []*prompb.Wri

func (prweWAL *prweWAL) readPrompbFromWAL(ctx context.Context, index uint64) (wreq *prompb.WriteRequest, err error) {
var protoBlob []byte
for i := 0; i < 12; i++ {
for range 12 {
// Firstly check if we've been terminated, then exit if so.
select {
case <-ctx.Done():
Expand Down
2 changes: 1 addition & 1 deletion exporter/prometheusremotewriteexporter/wal_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ func TestWALStopManyTimes(t *testing.T) {
// Ensure that invoking .stop() multiple times doesn't cause a panic, but actually
// First close should NOT return an error.
require.NoError(t, pwal.stop())
for i := 0; i < 4; i++ {
for range 4 {
// Every invocation to .stop() should return an errAlreadyClosed.
require.ErrorIs(t, pwal.stop(), errAlreadyClosed)
}
Expand Down