Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions cli/analyze.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,6 @@ func mainAnalyze(ctx *cli.Context) error {
if len(args) == 0 {
console.Fatal("No benchmark data file supplied")
}
if len(args) > 1 {
console.Fatal("Only one benchmark file can be given")
}
monitor := api.NewBenchmarkMonitor(ctx.String(serverFlagName), nil)
defer monitor.Done()
log := func(format string, data ...interface{}) {
Expand Down
137 changes: 113 additions & 24 deletions cli/cmp.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,18 @@
package cli

import (
"encoding/json"
"errors"
"io"
"os"
"sort"
"strings"
"time"

"github.com/dustin/go-humanize"
"github.com/fatih/color"
"github.com/minio/cli"
"github.com/minio/mc/pkg/probe"
"github.com/minio/pkg/v3/console"
"github.com/minio/warp/pkg/aggregate"
"github.com/minio/warp/pkg/bench"
)

Expand Down Expand Up @@ -60,35 +62,114 @@ func mainCmp(ctx *cli.Context) error {
if globalQuiet {
log = nil
}
readOps := func(s string) bench.Operations {
rc, isAggreated := openInput(s)
defer rc.Close()
if isAggreated {
fatalIf(probe.NewError(errors.New("aggregated compare not available yet")), "Aggregated compare not available yet")
}
ops, err := bench.OperationsFromCSV(rc, true, ctx.Int("analyze.offset"), ctx.Int("analyze.limit"), log)
fatalIf(probe.NewError(err), "Unable to parse input")
return ops
// Open "before" and "after" files
rc, agg := openInput(args[0])
defer rc.Close()
rc2, agg2 := openInput(args[1])
defer rc2.Close()

if agg != agg2 {
fatalIf(probe.NewError(errors.New("mixed input types")), "mixed input types (aggregated and non-aggregated)")
}

if agg {
var before, after aggregate.Realtime
if err := json.NewDecoder(rc).Decode(&before); err != nil {
fatalIf(probe.NewError(err), "Unable to parse input")
}
if err := json.NewDecoder(rc2).Decode(&after); err != nil {
fatalIf(probe.NewError(err), "Unable to parse input")
}
printCompare(ctx, before, after)
return nil
}

if log != nil {
log("Loading %q", args[0])
}
printCompare(ctx, readOps(args[0]), readOps(args[1]))

beforeOps, err := bench.OperationsFromCSV(rc, true, ctx.Int("analyze.offset"), ctx.Int("analyze.limit"), log)
fatalIf(probe.NewError(err), "Unable to parse input")

if log != nil {
log("Loading %q", args[1])
}
afterOps, err := bench.OperationsFromCSV(rc, true, ctx.Int("analyze.offset"), ctx.Int("analyze.limit"), log)
fatalIf(probe.NewError(err), "Unable to parse input")

printCompareLegacy(ctx, beforeOps, afterOps)
return nil
}

func printCompare(ctx *cli.Context, before, after bench.Operations) {
var wrSegs io.Writer
func printCompare(ctx *cli.Context, before, after aggregate.Realtime) {
afterOps := after.ByOpType
ops := mapKeys(before.ByOpType)
sort.Strings(ops)
for _, typ := range ops {
before := before.ByOpType[typ]
if wantOp := ctx.String("analyze.op"); wantOp != "" {
if strings.ToUpper(wantOp) != typ {
continue
}
}

console.Println("-------------------")
console.SetColor("Print", color.New(color.FgHiWhite))
console.Println("Operation:", typ)
console.SetColor("Print", color.New(color.FgWhite))

if fn := ctx.String("compare.out"); fn != "" {
if fn == "-" {
wrSegs = os.Stdout
} else {
f, err := os.Create(fn)
fatalIf(probe.NewError(err), "Unable to create create analysis output")
defer console.Println("Aggregated data saved to", fn)
defer f.Close()
wrSegs = f
after := afterOps[typ]
cmp, err := aggregate.Compare(before, after, typ)
if err != nil {
console.Println(err)
continue
}

if bErrs, aErrs := before.TotalErrors, after.TotalErrors; bErrs+aErrs > 0 {
console.SetColor("Print", color.New(color.FgHiRed))
console.Println("Errors:", bErrs, "->", aErrs)
console.SetColor("Print", color.New(color.FgWhite))
}
if before.TotalRequests != after.TotalRequests {
console.Println("Operations:", before.TotalRequests, "->", after.TotalRequests)
}
if before.Concurrency != after.Concurrency {
console.Println("Concurrency:", before.Concurrency, "->", after.Concurrency)
}
if len(before.ThroughputByHost) != len(after.ThroughputByHost) {
console.Println("Endpoints:", len(before.ThroughputByHost), "->", len(after.ThroughputByHost))
}
opoB := before.TotalObjects / before.TotalRequests
opoA := after.TotalObjects / after.TotalRequests
if opoB != opoA {
console.Println("Objects per operation:", opoB, "->", opoA)
}
bDur := before.EndTime.Sub(before.StartTime).Round(time.Second)
aDur := after.EndTime.Sub(after.StartTime).Round(time.Second)
if bDur != aDur {
console.Println("Duration:", bDur, "->", aDur)
}
if cmp.Reqs.Before.AvgObjSize != cmp.Reqs.After.AvgObjSize {
console.Printf("Object size: %v -> %v\n",
humanize.Bytes(uint64(cmp.Reqs.Before.AvgObjSize)),
humanize.Bytes(uint64(cmp.Reqs.After.AvgObjSize)))
}
console.Println("* Average:", cmp.Average)
console.Println("* Requests:", cmp.Reqs.String())

if cmp.TTFB != nil {
console.Println("* TTFB:", cmp.TTFB)
}
if cmp.Fastest.ObjPerSec > 0 {
console.SetColor("Print", color.New(color.FgWhite))
console.Println("* Fastest:", cmp.Fastest)
console.Println("* 50% Median:", cmp.Median)
console.Println("* Slowest:", cmp.Slowest)
}
}
_ = wrSegs
}

func printCompareLegacy(ctx *cli.Context, before, after bench.Operations) {
isMultiOp := before.IsMixed()
if isMultiOp != after.IsMixed() {
console.Fatal("Cannot compare multi-operation to single operation.")
Expand Down Expand Up @@ -161,3 +242,11 @@ func checkCmp(ctx *cli.Context) {
console.Fatal("Two data sources must be supplied")
}
}

func mapKeys[Map ~map[K]V, K comparable, V any](m Map) []K {
keys := make([]K, 0, len(m))
for k := range m {
keys = append(keys, k)
}
return keys
}
129 changes: 129 additions & 0 deletions pkg/aggregate/compare.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
/*
* Warp (C) 2019-2025 MinIO, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

package aggregate

import (
"fmt"
"time"

"github.com/minio/warp/pkg/bench"
)

func Compare(before, after *LiveAggregate, op string) (*bench.Comparison, error) {
var res bench.Comparison

if before.TotalErrors > 0 || after.TotalErrors > 0 {
return nil, fmt.Errorf("errors recorded in benchmark run. before: %v, after %d", before.TotalErrors, after.TotalErrors)
}
if after.Throughput.Segmented.Segments == nil || before.Throughput.Segmented.Segments == nil {
return nil, fmt.Errorf("no segments found in benchmark run. before: %v, after %v", before.Throughput.Segmented.Segments, after.Throughput.Segmented.Segments)
}
res.Op = op
as := after.Throughput.Segmented.Segments
aDur := time.Duration(after.Throughput.Segmented.SegmentDurationMillis) * time.Millisecond
bDur := time.Duration(after.Throughput.Segmented.SegmentDurationMillis) * time.Millisecond
bs := before.Throughput.Segmented.Segments
as.SortByObjsPerSec()
bs.SortByObjsPerSec()
res.Median.Compare(bs.Median(0.5).LongSeg(bDur), as.Median(0.5).LongSeg(aDur))
res.Slowest.Compare(bs.Median(0.0).LongSeg(bDur), as.Median(0.0).LongSeg(aDur))
res.Fastest.Compare(bs.Median(1).LongSeg(bDur), as.Median(1).LongSeg(aDur))

beforeTotals := bench.Segment{
EndsBefore: before.EndTime,
Start: before.StartTime,
OpType: op,
Host: "",
OpsStarted: before.TotalRequests,
PartialOps: 0,
FullOps: before.TotalObjects,
OpsEnded: before.TotalRequests,
Objects: float64(before.TotalObjects),
Errors: before.TotalErrors,
ReqAvg: float64(before.TotalRequests),
TotalBytes: before.TotalBytes,
ObjsPerOp: before.TotalObjects / before.TotalRequests,
}

afterTotals := bench.Segment{
EndsBefore: after.EndTime,
Start: after.StartTime,
OpType: op,
Host: "",
OpsStarted: after.TotalRequests,
PartialOps: 0,
FullOps: after.TotalObjects,
OpsEnded: after.TotalRequests,
Objects: float64(after.TotalObjects),
Errors: after.TotalErrors,
ReqAvg: float64(after.TotalRequests),
TotalBytes: after.TotalBytes,
ObjsPerOp: after.TotalObjects / after.TotalRequests,
}
res.Average.Compare(beforeTotals, afterTotals)

if after.Requests != nil && before.Requests != nil {
a, _ := mergeRequests(after.Requests)
b, _ := mergeRequests(before.Requests)
// TODO: Do multisized?
if a.Requests > 0 || b.Requests > 0 {
ms := float64(time.Millisecond)
const round = 100 * time.Microsecond
aInv := 1.0 / max(1, float64(a.MergedEntries))
bInv := 1.0 / max(1, float64(b.MergedEntries))
res.Reqs.CmpRequests = bench.CmpRequests{
AvgObjSize: a.ObjSize/int64(a.MergedEntries) - b.ObjSize/int64(b.MergedEntries),
Requests: a.Requests - b.Requests,
Average: time.Duration((a.DurAvgMillis*aInv - b.DurMedianMillis*bInv) * ms).Round(round),
Worst: time.Duration((a.SlowestMillis - b.SlowestMillis) * ms).Round(round),
Best: time.Duration((a.FastestMillis - b.FastestMillis) * ms).Round(round),
Median: time.Duration((a.DurMedianMillis*aInv - b.DurMedianMillis*bInv) * ms).Round(round),
P90: time.Duration((a.Dur90Millis*aInv - b.Dur90Millis*bInv) * ms).Round(round),
P99: time.Duration((a.Dur99Millis*aInv - b.Dur99Millis*bInv) * ms).Round(round),
StdDev: time.Duration((a.StdDev*aInv - b.StdDev*bInv) * ms).Round(round),
}
res.Reqs.Before = bench.CmpRequests{
AvgObjSize: b.ObjSize / int64(b.MergedEntries),
Requests: b.Requests,
Average: time.Duration(b.DurAvgMillis * bInv * ms).Round(round),
Worst: time.Duration(b.SlowestMillis * ms).Round(round),
Best: time.Duration(b.FastestMillis * ms).Round(round),
Median: time.Duration(b.DurMedianMillis * bInv * ms).Round(round),
P90: time.Duration(b.Dur90Millis * bInv * ms).Round(round),
P99: time.Duration(b.Dur99Millis * bInv * ms).Round(round),
StdDev: time.Duration(b.StdDev * bInv * ms).Round(round),
}
res.Reqs.After = bench.CmpRequests{
AvgObjSize: a.ObjSize / int64(a.MergedEntries),
Requests: a.Requests,
Average: time.Duration(a.DurAvgMillis * aInv * ms).Round(round),
Worst: time.Duration(a.SlowestMillis * ms).Round(round),
Best: time.Duration(a.FastestMillis * ms).Round(round),
Median: time.Duration(a.DurMedianMillis * aInv * ms).Round(round),
P90: time.Duration(a.Dur90Millis * aInv * ms).Round(round),
P99: time.Duration(a.Dur99Millis * aInv * ms).Round(round),
StdDev: time.Duration(a.StdDev * aInv * ms).Round(round),
}
if a.FirstByte != nil && b.FirstByte != nil {
res.TTFB = b.FirstByte.AsBench(b.MergedEntries).Compare(a.FirstByte.AsBench(a.MergedEntries))
}
}
}

return &res, nil
}
10 changes: 10 additions & 0 deletions pkg/aggregate/throughput.go
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,16 @@ func cloneBenchSegments(s bench.Segments) []SegmentSmall {
return res
}

func (s SegmentSmall) LongSeg(segdur time.Duration) bench.Segment {
return bench.Segment{
Start: s.Start,
EndsBefore: s.Start.Add(segdur),
TotalBytes: int64(time.Duration(s.BPS) * segdur / time.Second),
Objects: s.OPS,
Errors: s.Errors,
}
}

func (s *SegmentSmall) add(other SegmentSmall) SegmentSmall {
s.Errors += other.Errors
s.OPS += other.OPS
Expand Down
Loading
Loading