Use burl to compare performance between different endpoints, versions, or configurations.
Compare two endpoints side by side:
#!/bin/bash
echo "Endpoint A (v1):"
burl https://api.example.com/v1/users -c 20 -d 30s -f json | jq '{
rps: .summary.requests_per_second,
p50: .latency_ms.p50,
p99: .latency_ms.p99
}'
echo -e "\nEndpoint B (v2):"
burl https://api.example.com/v2/users -c 20 -d 30s -f json | jq '{
rps: .summary.requests_per_second,
p50: .latency_ms.p50,
p99: .latency_ms.p99
}'
Compare old vs new version of your API:
#!/bin/bash
V1_URL="https://api-v1.example.com"
V2_URL="https://api-v2.example.com"
CONNECTIONS=50
DURATION="60s"
echo "Benchmarking API v1..."
burl "$V1_URL/users" -c $CONNECTIONS -d $DURATION -f json -o v1_results.json
echo "Benchmarking API v2..."
burl "$V2_URL/users" -c $CONNECTIONS -d $DURATION -f json -o v2_results.json
echo -e "\n📊 Comparison Results:"
echo "================================================"
printf "%-20s %15s %15s\n" "Metric" "v1" "v2"
echo "------------------------------------------------"
v1_rps=$(jq '.summary.requests_per_second' v1_results.json)
v2_rps=$(jq '.summary.requests_per_second' v2_results.json)
printf "%-20s %15.2f %15.2f\n" "Requests/sec" "$v1_rps" "$v2_rps"
v1_p50=$(jq '.latency_ms.p50' v1_results.json)
v2_p50=$(jq '.latency_ms.p50' v2_results.json)
printf "%-20s %13.2fms %13.2fms\n" "P50 Latency" "$v1_p50" "$v2_p50"
v1_p99=$(jq '.latency_ms.p99' v1_results.json)
v2_p99=$(jq '.latency_ms.p99' v2_results.json)
printf "%-20s %13.2fms %13.2fms\n" "P99 Latency" "$v1_p99" "$v2_p99"
echo "------------------------------------------------"
# Calculate improvement
rps_improvement=$(echo "scale=2; (($v2_rps - $v1_rps) / $v1_rps) * 100" | bc)
p99_improvement=$(echo "scale=2; (($v1_p99 - $v2_p99) / $v1_p99) * 100" | bc)
echo -e "\n📈 Improvements:"
echo "Throughput: ${rps_improvement}%"
echo "P99 Latency: ${p99_improvement}%"
Compare performance across different regions:
#!/bin/bash
regions=(
"us-east-1:https://us-east.api.example.com"
"us-west-2:https://us-west.api.example.com"
"eu-west-1:https://eu.api.example.com"
"ap-southeast-1:https://asia.api.example.com"
)
echo "Multi-Region Performance Comparison"
echo "===================================="
for entry in "${regions[@]}"; do
region="${entry%%:*}"
url="${entry#*:}"
echo -e "\nTesting $region..."
burl "$url/health" -c 20 -d 30s -f json -o "${region}.json"
done
echo -e "\n📊 Results:"
printf "%-20s %12s %12s %12s\n" "Region" "RPS" "P50" "P99"
echo "--------------------------------------------------------"
for entry in "${regions[@]}"; do
region="${entry%%:*}"
rps=$(jq '.summary.requests_per_second' "${region}.json")
p50=$(jq '.latency_ms.p50' "${region}.json")
p99=$(jq '.latency_ms.p99' "${region}.json")
printf "%-20s %12.2f %10.2fms %10.2fms\n" "$region" "$rps" "$p50" "$p99"
done
Compare different database configurations:
#!/bin/bash
configs=(
"postgres:https://api.example.com/postgres"
"mysql:https://api.example.com/mysql"
"mongodb:https://api.example.com/mongodb"
)
for entry in "${configs[@]}"; do
name="${entry%%:*}"
url="${entry#*:}"
echo "Testing $name backend..."
burl "$url/query" \
-m POST \
-b '{"query":"SELECT * FROM users LIMIT 100"}' \
-T application/json \
-c 30 -d 30s \
-f json \
-o "${name}_results.json"
done
# Generate comparison report
echo -e "\nDatabase Backend Comparison"
echo "============================"
for f in *_results.json; do
name=$(basename "$f" _results.json)
rps=$(jq '.summary.requests_per_second' "$f")
p99=$(jq '.latency_ms.p99' "$f")
echo "$name: ${rps} req/s, P99: ${p99}ms"
done
Measure the impact of optimizations:
#!/bin/bash
ENDPOINT="https://api.example.com/slow-endpoint"
echo "📊 Before Optimization"
echo "======================"
burl "$ENDPOINT" -c 50 -d 60s --llm json -o before.json
echo -e "\n⏳ Apply optimization and press Enter..."
read
echo -e "\n📊 After Optimization"
echo "====================="
burl "$ENDPOINT" -c 50 -d 60s --llm json -o after.json
echo -e "\n📈 Comparison"
echo "============="
before_rps=$(jq '.summary.requests_per_second' before.json)
after_rps=$(jq '.summary.requests_per_second' after.json)
before_p99=$(jq '.latency_ms.p99' before.json)
after_p99=$(jq '.latency_ms.p99' after.json)
rps_change=$(echo "scale=2; (($after_rps - $before_rps) / $before_rps) * 100" | bc)
p99_change=$(echo "scale=2; (($before_p99 - $after_p99) / $before_p99) * 100" | bc)
echo "Throughput: $before_rps → $after_rps req/s (${rps_change}%)"
echo "P99 Latency: ${before_p99}ms → ${after_p99}ms (${p99_change}% improvement)"
Create a detailed markdown report:
#!/bin/bash
cat << EOF > comparison_report.md
# Performance Comparison Report
Generated: $(date)
## Test Configuration
- Connections: 50
- Duration: 60s
- Warmup: 100 requests
## Results
| Endpoint | RPS | P50 | P99 | Errors |
|----------|-----|-----|-----|--------|
EOF
for f in *.json; do
name=$(basename "$f" .json)
rps=$(jq '.summary.requests_per_second' "$f")
p50=$(jq '.latency_ms.p50' "$f")
p99=$(jq '.latency_ms.p99' "$f")
errors=$(jq '.summary.failed_requests' "$f")
echo "| $name | $rps | ${p50}ms | ${p99}ms | $errors |" >> comparison_report.md
done
cat << EOF >> comparison_report.md
## Analysis
$(jq -r '.interpretation.issues[]' *.json 2>/dev/null | sort -u | sed 's/^/- /')
## Recommendations
$(jq -r '.interpretation.recommendations[]' *.json 2>/dev/null | sort -u | sed 's/^/- /')
EOF
echo "Report generated: comparison_report.md"