Skip to content

Commit 732bef8

Browse files
mknyszekgopherbot
authored andcommitted
sweet: update cockroachdb metrics
The CockroachDB reported metrics are a bit confusing. Namely, read-sec/op and write-sec/op are presented as the inverse of throughput, but they're actually average latency. We also do not have a unified ns/op metric. This change fixes both of those issues. read-sec/op and write-sec/op are reported as read-avg-latency-ns and write-avg-latency-ns now, and we add a ns/op value which sums the read and write ops and divides by the benchmark time. Change-Id: I46c087de53455c0a441a1bb4a2f01e8c7df071f7 Reviewed-on: https://go-review.googlesource.com/c/benchmarks/+/627515 Reviewed-by: Michael Pratt <mpratt@google.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
1 parent 46eaa19 commit 732bef8

File tree

1 file changed

+42
-28
lines changed
  • sweet/benchmarks/cockroachdb

1 file changed

+42
-28
lines changed

sweet/benchmarks/cockroachdb/main.go

Lines changed: 42 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -497,12 +497,19 @@ func reportFromBenchmarkOutput(b *driver.B, cfg *config, output string) (err err
497497
}
498498
}()
499499

500-
for _, metricType := range cfg.bench.metricTypes {
501-
err = getAndReportMetrics(b, metricType, output)
500+
metrics := make([]benchmarkMetrics, len(cfg.bench.metricTypes))
501+
for i, metricType := range cfg.bench.metricTypes {
502+
metrics[i], err = getMetrics(metricType, output)
502503
if err != nil {
503504
return err
504505
}
505506
}
507+
totalOps := float64(0)
508+
for i, metricType := range cfg.bench.metricTypes {
509+
reportMetrics(b, metricType, metrics[i])
510+
totalOps += metrics[i].totalOps
511+
}
512+
b.Report("ns/op", uint64(metrics[0].totalSec/totalOps*1e9))
506513

507514
if err != nil {
508515
return err
@@ -511,13 +518,14 @@ func reportFromBenchmarkOutput(b *driver.B, cfg *config, output string) (err err
511518
}
512519

513520
type benchmarkMetrics struct {
514-
totalOps uint64
515-
opsPerSecond uint64
516-
averageLatency uint64
517-
p50Latency uint64
518-
p95Latency uint64
519-
p99Latency uint64
520-
p100Latency uint64
521+
totalSec float64
522+
totalOps float64
523+
opsPerSecond float64
524+
averageLatency float64
525+
p50Latency float64
526+
p95Latency float64
527+
p99Latency float64
528+
p100Latency float64
521529
}
522530

523531
func getAndReportMetrics(b *driver.B, metricType string, output string) error {
@@ -538,43 +546,49 @@ func getMetrics(metricType string, output string) (benchmarkMetrics, error) {
538546
match = strings.Split(match, "\n")[1]
539547
fields := strings.Fields(match)
540548

541-
stringToUint64 := func(field string) (uint64, error) {
549+
stringToFloat64 := func(field string) (float64, error) {
542550
number, err := strconv.ParseFloat(field, 64)
543551
if err != nil {
544552
return 0, fmt.Errorf("error parsing metrics to uint64: %w", err)
545553
}
546-
return uint64(number), nil
554+
return number, nil
547555
}
548556

549-
uint64Fields := make([]uint64, len(fields[2:])-1)
550-
for i := range uint64Fields {
557+
float64Fields := make([]float64, len(fields[2:])-1)
558+
for i := range float64Fields {
551559
var err error
552-
uint64Fields[i], err = stringToUint64(fields[2+i])
560+
float64Fields[i], err = stringToFloat64(fields[2+i])
553561
if err != nil {
554562
return benchmarkMetrics{}, err
555563
}
556564
}
565+
// Parse benchmark duration.
566+
dur, err := time.ParseDuration(fields[0])
567+
if err != nil {
568+
return benchmarkMetrics{}, err
569+
}
557570

558571
metrics := benchmarkMetrics{
559-
totalOps: uint64Fields[0],
560-
opsPerSecond: uint64Fields[1],
561-
averageLatency: uint64Fields[2] * 1e6,
562-
p50Latency: uint64Fields[3] * 1e6,
563-
p95Latency: uint64Fields[4] * 1e6,
564-
p99Latency: uint64Fields[5] * 1e6,
565-
p100Latency: uint64Fields[6] * 1e6,
572+
totalSec: dur.Seconds(),
573+
totalOps: float64Fields[0],
574+
opsPerSecond: float64Fields[1],
575+
averageLatency: float64Fields[2] * 1e6,
576+
p50Latency: float64Fields[3] * 1e6,
577+
p95Latency: float64Fields[4] * 1e6,
578+
p99Latency: float64Fields[5] * 1e6,
579+
p100Latency: float64Fields[6] * 1e6,
566580
}
567581
return metrics, nil
568582
}
569583

570584
func reportMetrics(b *driver.B, metricType string, metrics benchmarkMetrics) {
571-
b.Report(fmt.Sprintf("%s-ops/sec", metricType), metrics.opsPerSecond)
572-
b.Report(fmt.Sprintf("%s-ops", metricType), metrics.totalOps)
573-
b.Report(fmt.Sprintf("%s-ns/op", metricType), metrics.averageLatency)
574-
b.Report(fmt.Sprintf("%s-p50-latency-ns", metricType), metrics.p50Latency)
575-
b.Report(fmt.Sprintf("%s-p95-latency-ns", metricType), metrics.p95Latency)
576-
b.Report(fmt.Sprintf("%s-p99-latency-ns", metricType), metrics.p99Latency)
577-
b.Report(fmt.Sprintf("%s-p100-latency-ns", metricType), metrics.p100Latency)
585+
b.Report(fmt.Sprintf("%s-ops/sec", metricType), uint64(metrics.opsPerSecond))
586+
b.Report(fmt.Sprintf("%s-ops", metricType), uint64(metrics.totalOps))
587+
b.Report(fmt.Sprintf("%s-avg-latency-ns", metricType), uint64(metrics.averageLatency))
588+
b.Report(fmt.Sprintf("%s-p50-latency-ns", metricType), uint64(metrics.p50Latency))
589+
b.Report(fmt.Sprintf("%s-p95-latency-ns", metricType), uint64(metrics.p95Latency))
590+
b.Report(fmt.Sprintf("%s-p99-latency-ns", metricType), uint64(metrics.p99Latency))
591+
b.Report(fmt.Sprintf("%s-p100-latency-ns", metricType), uint64(metrics.p100Latency))
578592
}
579593

580594
func run(cfg *config) (err error) {

0 commit comments

Comments
 (0)