Common patterns for benchmarking REST APIs.
burl https://api.example.com/users -c 20 -d 30s
burl "https://api.example.com/users?page=1&limit=50" -c 20 -d 30s
burl https://api.example.com/users \
-H "Accept: application/json" \
-H "X-Api-Version: v2" \
-c 20 -d 30s
burl https://api.example.com/users \
-m POST \
-b '{"name":"Test User","email":"test@example.com"}' \
-T application/json \
-c 10 -d 30s
burl https://api.example.com/orders \
-m POST \
-a bearer:$API_TOKEN \
-b '{"product_id":123,"quantity":1}' \
-T application/json \
-c 10 -d 30s
burl https://api.example.com/users/1 \
-m PUT \
-b '{"name":"Updated Name","email":"updated@example.com"}' \
-T application/json \
-c 10 -d 30s
burl https://api.example.com/users/1 \
-m PATCH \
-b '{"status":"active"}' \
-T application/json \
-c 10 -d 30s
burl https://api.example.com/users/1 \
-m DELETE \
-a bearer:$API_TOKEN \
-c 5 -d 30s
burl https://api.example.com/protected \
-a bearer:$OAUTH_TOKEN \
-c 20 -d 30s
burl https://api.example.com/data \
-H "X-API-Key: $API_KEY" \
-c 20 -d 30s
burl https://api.example.com/admin \
-a basic:admin:password \
-c 10 -d 30s
burl https://api.example.com/graphql \
-m POST \
-b '{"query":"{ users { id name email } }"}' \
-T application/json \
-c 20 -d 30s
burl https://api.example.com/graphql \
-m POST \
-b '{"query":"mutation { createUser(name: \"Test\") { id } }"}' \
-T application/json \
-c 10 -d 30s
burl https://api.example.com/graphql \
-m POST \
-b '{"query":"query GetUser($id: ID!) { user(id: $id) { name } }","variables":{"id":"123"}}' \
-T application/json \
-c 20 -d 30s
Compare performance of different endpoints:
#!/bin/bash
endpoints=(
"/users"
"/products"
"/orders"
"/analytics"
)
for endpoint in "${endpoints[@]}"; do
echo "Testing $endpoint..."
burl "https://api.example.com$endpoint" \
-a bearer:$API_TOKEN \
-c 20 -d 30s \
-f json \
-o "benchmark_${endpoint//\//_}.json"
done
# Compare results
echo -e "\nResults:"
for f in benchmark_*.json; do
endpoint=$(echo $f | sed 's/benchmark_//' | sed 's/.json//' | tr '_' '/')
p99=$(jq '.latency_ms.p99' $f)
rps=$(jq '.summary.requests_per_second' $f)
printf "%-20s %8.1f req/s P99: %6.1fms\n" "$endpoint" "$rps" "$p99"
done
Simulate a realistic API usage pattern:
#!/bin/bash
BASE_URL="https://api.example.com"
TOKEN="$API_TOKEN"
echo "Simulating user session..."
# 1. Login (once)
echo "1. Authentication..."
burl "$BASE_URL/auth/login" \
-m POST \
-b '{"email":"test@example.com","password":"secret"}' \
-T application/json \
-n 1
# 2. Browse products (heavy read traffic)
echo "2. Product browsing..."
burl "$BASE_URL/products" \
-a bearer:$TOKEN \
-c 50 -d 30s \
-f json -o products.json
# 3. View product details
echo "3. Product details..."
burl "$BASE_URL/products/123" \
-a bearer:$TOKEN \
-c 30 -d 30s \
-f json -o product_detail.json
# 4. Add to cart (write traffic)
echo "4. Cart operations..."
burl "$BASE_URL/cart" \
-m POST \
-a bearer:$TOKEN \
-b '{"product_id":123,"quantity":1}' \
-T application/json \
-c 10 -d 30s \
-f json -o cart.json
# 5. Checkout (critical path)
echo "5. Checkout..."
burl "$BASE_URL/checkout" \
-m POST \
-a bearer:$TOKEN \
-b '{"payment_method":"card"}' \
-T application/json \
-c 5 -d 30s \
-f json -o checkout.json
echo "Benchmark complete!"
Create a reusable benchmark script:
#!/bin/bash
set -e
API_URL="${API_URL:-https://api.example.com}"
CONNECTIONS="${CONNECTIONS:-20}"
DURATION="${DURATION:-30s}"
OUTPUT_DIR="${OUTPUT_DIR:-./benchmarks}"
mkdir -p "$OUTPUT_DIR"
timestamp=$(date +%Y%m%d_%H%M%S)
echo "Benchmarking $API_URL"
echo "Connections: $CONNECTIONS, Duration: $DURATION"
echo "Output: $OUTPUT_DIR"
burl "$API_URL/health" \
-c "$CONNECTIONS" \
-d "$DURATION" \
--llm json \
-o "$OUTPUT_DIR/benchmark_${timestamp}.json"
# Check results
performance=$(jq -r '.interpretation.performance' "$OUTPUT_DIR/benchmark_${timestamp}.json")
echo "Performance: $performance"
if [ "$performance" = "poor" ]; then
echo "WARNING: Poor performance detected!"
jq '.interpretation.issues[]' "$OUTPUT_DIR/benchmark_${timestamp}.json"
exit 1
fi