Integrate burl into your continuous integration and deployment pipelines.
name: Performance Test
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install burl
run: |
curl -fsSL https://github.com/ZainW/burl/releases/latest/download/burl-linux-x64 -o burl
chmod +x burl
sudo mv burl /usr/local/bin/
- name: Run benchmark
run: |
burl https://api.staging.example.com/health \
-c 20 -d 30s \
--llm json \
-o results.json
- name: Check performance
run: |
p99=$(jq '.latency_ms.p99' results.json)
if (( $(echo "$p99 > 200" | bc -l) )); then
echo "Performance regression: P99 latency is ${p99}ms (threshold: 200ms)"
exit 1
fi
- name: Upload results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: results.json
name: Performance Gates
on:
pull_request:
branches: [main]
env:
P99_THRESHOLD_MS: 200
ERROR_RATE_THRESHOLD: 0.01
MIN_RPS: 100
jobs:
performance-gate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install burl
run: |
curl -fsSL https://github.com/ZainW/burl/releases/latest/download/burl-linux-x64 -o burl
chmod +x burl
sudo mv burl /usr/local/bin/
- name: Deploy to staging
run: |
# Your deployment step here
echo "Deploying to staging..."
- name: Wait for deployment
run: sleep 30
- name: Run performance tests
run: |
burl ${{ secrets.STAGING_URL }}/api/health \
-c 50 -d 60s \
-a bearer:${{ secrets.API_TOKEN }} \
--llm json \
-o benchmark.json
- name: Validate performance
run: |
p99=$(jq '.latency_ms.p99' benchmark.json)
rps=$(jq '.summary.requests_per_second' benchmark.json)
failed=$(jq '.summary.failed_requests' benchmark.json)
total=$(jq '.summary.total_requests' benchmark.json)
error_rate=$(echo "scale=4; $failed / $total" | bc)
echo "P99 Latency: ${p99}ms (threshold: $P99_THRESHOLD_MS ms)"
echo "Requests/sec: $rps (minimum: $MIN_RPS)"
echo "Error rate: $error_rate (threshold: $ERROR_RATE_THRESHOLD)"
failed=0
if (( $(echo "$p99 > $P99_THRESHOLD_MS" | bc -l) )); then
echo "❌ P99 latency exceeds threshold"
failed=1
fi
if (( $(echo "$rps < $MIN_RPS" | bc -l) )); then
echo "❌ Throughput below minimum"
failed=1
fi
if (( $(echo "$error_rate > $ERROR_RATE_THRESHOLD" | bc -l) )); then
echo "❌ Error rate exceeds threshold"
failed=1
fi
exit $failed
- name: Comment on PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const results = JSON.parse(fs.readFileSync('benchmark.json', 'utf8'));
const body = `## Performance Benchmark Results
| Metric | Value |
|--------|-------|
| Requests/sec | ${results.summary.requests_per_second.toFixed(2)} |
| P50 Latency | ${results.latency_ms.p50.toFixed(2)}ms |
| P99 Latency | ${results.latency_ms.p99.toFixed(2)}ms |
| Error Rate | ${(results.summary.failed_requests / results.summary.total_requests * 100).toFixed(2)}% |
| Performance | ${results.interpretation.performance} |
`;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
});
stages:
- deploy
- benchmark
- report
variables:
P99_THRESHOLD: "200"
benchmark:
stage: benchmark
image: ubuntu:latest
before_script:
- apt-get update && apt-get install -y curl jq bc
- curl -fsSL https://github.com/ZainW/burl/releases/latest/download/burl-linux-x64 -o /usr/local/bin/burl
- chmod +x /usr/local/bin/burl
script:
- |
burl $STAGING_URL/health \
-c 50 -d 60s \
--llm json \
-o benchmark.json
p99=$(jq '.latency_ms.p99' benchmark.json)
if (( $(echo "$p99 > $P99_THRESHOLD" | bc -l) )); then
echo "Performance regression detected"
exit 1
fi
artifacts:
paths:
- benchmark.json
reports:
metrics: benchmark.json
version: 2.1
jobs:
benchmark:
docker:
- image: cimg/base:stable
steps:
- checkout
- run:
name: Install burl
command: |
curl -fsSL https://github.com/ZainW/burl/releases/latest/download/burl-linux-x64 -o burl
chmod +x burl
sudo mv burl /usr/local/bin/
- run:
name: Run benchmark
command: |
burl https://api.staging.example.com/health \
-c 50 -d 60s \
--llm json \
-o results.json
- run:
name: Check thresholds
command: |
p99=$(jq '.latency_ms.p99' results.json)
if (( $(echo "$p99 > 200" | bc -l) )); then
echo "P99 latency ${p99}ms exceeds 200ms threshold"
exit 1
fi
- store_artifacts:
path: results.json
workflows:
performance:
jobs:
- benchmark
Run regular performance checks:
name: Scheduled Performance Check
on:
schedule:
- cron: '0 */6 * * *' # Every 6 hours
workflow_dispatch:
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- name: Install burl
run: |
curl -fsSL https://github.com/ZainW/burl/releases/latest/download/burl-linux-x64 -o burl
chmod +x burl
sudo mv burl /usr/local/bin/
- name: Run benchmark
run: |
burl ${{ secrets.PROD_URL }}/health \
-c 20 -d 60s \
--llm json \
-o results.json
- name: Check and alert
run: |
performance=$(jq -r '.interpretation.performance' results.json)
if [ "$performance" = "poor" ]; then
echo "Poor performance detected!"
# Send alert (Slack, PagerDuty, etc.)
curl -X POST -H 'Content-type: application/json' \
--data '{"text":"⚠️ Poor API performance detected!"}' \
${{ secrets.SLACK_WEBHOOK }}
fi
Tip: Run benchmarks against staging environments, not production, to avoid impacting real users.
Always benchmark the same environment with the same configuration:
env:
CONNECTIONS: 50
DURATION: 60s
TARGET_URL: ${{ secrets.STAGING_URL }}
Keep benchmark results for trend analysis:
- name: Store results
run: |
mkdir -p benchmarks
cp results.json "benchmarks/$(date +%Y%m%d_%H%M%S).json"
- uses: actions/upload-artifact@v4
with:
name: benchmark-history
path: benchmarks/
retention-days: 90
Base thresholds on historical data, not arbitrary numbers:
# Calculate threshold from last 10 runs
avg_p99=$(cat benchmarks/*.json | jq -s '[.[].latency_ms.p99] | add / length')
threshold=$(echo "$avg_p99 * 1.2" | bc) # 20% buffer