-
Notifications
You must be signed in to change notification settings - Fork 125
199 lines (192 loc) · 6.36 KB
/
ci.yml
File metadata and controls
199 lines (192 loc) · 6.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
name: CI
on:
push:
branches:
- main
- '[0-9].[0-9]+' # matches to backport branches, e.g. 3.6
tags: [ 'v*' ]
pull_request:
branches:
- main
- '[0-9].[0-9]+'
- 'update/pre-commit-autoupdate'
schedule:
- cron: '0 6 * * *' # Daily 6AM UTC build
env:
pythonversion: "3.10"
jobs:
lint:
name: Linter
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v7
- name: Setup Python 3.10
uses: actions/setup-python@v6
with:
python-version: ${{env.pythonversion}}
- name: Install dependencies
run: uv sync
- name: Make sync version of library (redis_om)
run: make sync
- name: Run linter
run: |
make dist
make lint
test-unix:
name: Test Unix
needs: lint
strategy:
matrix:
os: [ ubuntu-latest ]
pyver: [ "3.10", "3.11", "3.12", "3.13", "pypy-3.10" ]
redisstack: [ "redis/redis-stack:latest", "redis:8.4.0" ]
fail-fast: false
services:
redis:
image: ${{ matrix.redisstack }}
ports:
# Maps port 6379 on service container to the host
- 6379:6379
# Set health checks to wait until redis has started
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
runs-on: ${{ matrix.os }}
timeout-minutes: 15
env:
OS: ${{ matrix.os }}
INSTALL_DIR: ${{ github.workspace }}/redis
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v7
- name: Setup Python ${{ matrix.pyver }}
uses: actions/setup-python@v6
with:
python-version: ${{ matrix.pyver }}
- name: Install dependencies
run: uv sync
- name: Make sync version of library (redis_om)
run: make sync
- name: Run unittests (redisstack:${{ matrix.redisstack }}, ${{ matrix.os }})
env:
REDIS_OM_URL: "redis://localhost:6379?decode_responses=True"
run: |
make test
uv run coverage xml
- name: Upload coverage
uses: codecov/codecov-action@v6
with:
file: ./coverage.xml
flags: unit
env_vars: OS
fail_ci_if_error: false
benchmark:
name: Benchmarks
needs: lint
runs-on: ubuntu-latest
timeout-minutes: 10
services:
redis:
image: redis/redis-stack:latest
ports:
- 6379:6379
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v7
- name: Setup Python ${{ env.pythonversion }}
uses: actions/setup-python@v6
with:
python-version: ${{ env.pythonversion }}
- name: Install dependencies
run: uv sync
- name: Make sync version of library (redis_om)
run: make sync
- name: Run benchmarks
env:
REDIS_OM_URL: "redis://localhost:6379?decode_responses=True"
run: |
uv run pytest tests/test_benchmarks.py -v --benchmark-only --benchmark-json=benchmark-results.json
- name: Upload benchmark results
uses: actions/upload-artifact@v7
with:
name: benchmark-results
path: benchmark-results.json
- name: Display benchmark summary
run: |
# Generate the benchmark table
uv run python << 'PYTHON_SCRIPT' > benchmark-table.txt
import json
print("## Benchmark Results")
print("")
print("| Test | Mean | Min | Max | OPS |")
print("|------|------|-----|-----|-----|")
with open('benchmark-results.json') as f:
data = json.load(f)
for b in data['benchmarks']:
name = b['name'].replace('test_', '')
mean = b['stats']['mean'] * 1e6 # convert to microseconds
min_val = b['stats']['min'] * 1e6
max_val = b['stats']['max'] * 1e6
ops = b['stats']['ops']
if mean < 1:
mean_str = f'{mean*1000:.0f}ns'
elif mean < 1000:
mean_str = f'{mean:.1f}us'
else:
mean_str = f'{mean/1000:.1f}ms'
if min_val < 1:
min_str = f'{min_val*1000:.0f}ns'
elif min_val < 1000:
min_str = f'{min_val:.1f}us'
else:
min_str = f'{min_val/1000:.1f}ms'
if max_val < 1:
max_str = f'{max_val*1000:.0f}ns'
elif max_val < 1000:
max_str = f'{max_val:.1f}us'
else:
max_str = f'{max_val/1000:.1f}ms'
print(f'| {name} | {mean_str} | {min_str} | {max_str} | {ops:.0f}/s |')
PYTHON_SCRIPT
# Output to both logs and step summary
cat benchmark-table.txt
cat benchmark-table.txt >> $GITHUB_STEP_SUMMARY
- name: Prepare for benchmark storage
run: |
# Save benchmark results outside the repo before stashing
cp benchmark-results.json /tmp/benchmark-results.json
# Stash changes from make sync to allow branch switching
git stash --include-untracked
# Restore the benchmark results
cp /tmp/benchmark-results.json benchmark-results.json
- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1
with:
tool: 'pytest'
output-file-path: benchmark-results.json
# Store benchmark data in gh-pages branch
gh-pages-branch: gh-pages
benchmark-data-dir-path: dev/bench
# Alert if performance regresses by more than 20%
alert-threshold: '120%'
fail-on-alert: false
# Dependabot PRs cannot create review comments with the default token.
comment-on-alert: ${{ github.actor != 'dependabot[bot]' }}
github-token: ${{ secrets.GITHUB_TOKEN }}
# Only push to gh-pages on main branch
auto-push: ${{ github.ref == 'refs/heads/main' }}