You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Block-builder: adopt concurrent fetcher from ingest storage. (#12222)
Reuse ingest-storage's ConcurrentFetchers inside the block-builder to
consume faster from Kafka when `kafka.fetch-concurrency-max` is given in
the flags.
Copy file name to clipboardExpand all lines: CHANGELOG.md
+1Lines changed: 1 addition & 0 deletions
Display the source diff
Display the rich diff
Original file line number
Diff line number
Diff line change
@@ -52,6 +52,7 @@
52
52
*[ENHANCEMENT] Querier: Include more information about inflight queries in the activity tracker. A querier logs this information after it restarts following a crash. #12526
53
53
*[ENHANCEMENT] Ingester: Add experimental `-blocks-storage.tsdb.index-lookup-planning-comparison-portion` flag to enable mirrored chunk querier comparison between queries with and without index lookup planning. #12460
54
54
*[ENHANCEMENT] Ruler: Add native histogram version of `cortex_ruler_sync_rules_duration_seconds`. #12628
55
+
*[ENHANCEMENT] Block-builder: Implement concurrent consumption within a job when `-ingest-storage.kafka.fetch-concurrency-max` is given. #12222
55
56
*[ENHANCEMENT] Query-frontend: Labels query optimizer is no longer experimental and is enabled by default. It can be disabled with `-query-frontend.labels-query-optimizer-enabled=false` CLI flag. #12606
56
57
*[ENHANCEMENT] Distributor: Add value length to "label value too long" error. #12583
57
58
*[ENHANCEMENT] Distributor: The metric `cortex_distributor_uncompressed_request_body_size_bytes` now differentiates by the handler serving the request. #12661
panic("readerMetrics should be non-nil when concurrent fetchers are used")
291
+
}
292
+
293
+
boff:=backoff.New(ctx, backoff.Config{
294
+
MinBackoff: 100*time.Millisecond,
295
+
MaxBackoff: 5*time.Second,
296
+
MaxRetries: 10,
297
+
})
298
+
299
+
varlastErrorerror
300
+
301
+
forboff.Ongoing() {
302
+
f, ferr:=ingest.NewConcurrentFetchers(
303
+
ctx,
304
+
b.kafkaClient,
305
+
logger,
306
+
b.cfg.Kafka.Topic,
307
+
partition,
308
+
startOffset,
309
+
b.cfg.Kafka.FetchConcurrencyMax,
310
+
int32(b.cfg.Kafka.MaxBufferedBytes),
311
+
b.cfg.Kafka.UseCompressedBytesAsFetchMaxBytes,
312
+
b.cfg.Kafka.FetchMaxWait,
313
+
nil, // Don't need a reader since we've provided the start offset.
314
+
ingest.OnRangeErrorAbort,
315
+
nil, // We're aborting on range error, so we don't need an offset reader.
316
+
backoff.Config{
317
+
MinBackoff: 100*time.Millisecond,
318
+
MaxBackoff: 1*time.Second,
319
+
},
320
+
b.readerMetrics)
321
+
ifferr==nil {
322
+
returnf, nil
323
+
}
324
+
level.Warn(b.logger).Log("msg", "failed to create concurrent fetcher, probably retrying...", "err", ferr)
325
+
lastError=ferr
326
+
boff.Wait()
327
+
}
328
+
329
+
returnnil, lastError
330
+
}
331
+
246
332
// consumePartitionSection is for the use of scheduler-based architecture.
247
333
// startOffset is inclusive, endOffset is exclusive, and must be valid offsets and not something in the future (endOffset can be technically 1 offset in the future).
248
334
// All the records and samples between these offsets will be consumed and put into a block.
0 commit comments