Skip to content

Commit 7a70e3e

Browse files
Bump levanter+dolma SHAs, pin lm-eval, scalax SHAs (marin-community#1850)
Bump Levanter from c30de5b to 6cd783c (marin-community#1288), Dolma from fd431d0 to 79ce49d (#2), removing `enable_logprobs` parameter from both `InferenceEngineConfig` and `Request` classes. The logprobs functionality was removed in Levanter's inference engine refactor (marin-community#1277). 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-authored-by: Claude <noreply@anthropic.com>
1 parent b72ef59 commit 7a70e3e

4 files changed

Lines changed: 33 additions & 38 deletions

File tree

pyproject.toml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ dependencies = [
2424
"google-cloud-storage-transfer",
2525
"jax==0.6.2",
2626
"haliax>=1.4.dev443",
27-
"levanter[serve] @ git+https://github.com/marin-community/levanter.git@c30de5b9ad706da1ddb5e95cf82ed767692f7f7c",
27+
"levanter[serve] @ git+https://github.com/marin-community/levanter.git@6cd783c82168919b365c1b2266e4a0bde22ba658",
2828
"lz4",
2929
"multiprocess==0.70.16",
3030
"numpy",
@@ -218,13 +218,13 @@ quality_dedup_consolidate = [
218218
"cattrs==24.1.3",
219219
"fsspec>=2025.3.0",
220220
"ddsketch",
221-
"dolma @ git+https://github.com/marin-community/dolma@8b5577fbfb89afc89ea52d20f92e4bc901c5587a",
221+
"dolma @ git+https://github.com/marin-community/dolma@79ce49d9535cfc64f4d96781b03ecd0e2c20cf4e",
222222
]
223223

224224
tokenize_train = [
225225
"multiprocess==0.70.16",
226226
"haliax>=1.4.dev443",
227-
"lm-eval@git+https://github.com/stanford-crfm/lm-evaluation-harness.git",
227+
"lm-eval@git+https://github.com/stanford-crfm/lm-evaluation-harness@d5e3391f22cde186c827674d5c3ec7c5f4fe0cab",
228228
"tblib",
229229
"sentencepiece",
230230
"tiktoken",
@@ -250,12 +250,12 @@ post_training = [
250250
"pylatexenc",
251251
"ipython",
252252
"datasets",
253-
"scalax@git+https://github.com/Sea-Snell/scalax.git",
253+
"scalax@git+https://github.com/Sea-Snell/scalax@8cf9ceabe30d4c4274df3c09aae2f66b59fbce3c",
254254
"swebench ; sys_platform != 'darwin'",
255255
]
256256

257257
eval = [
258-
"lm-eval[math]@git+https://github.com/stanford-crfm/lm-evaluation-harness.git",
258+
"lm-eval[math]@git+https://github.com/stanford-crfm/lm-evaluation-harness@d5e3391f22cde186c827674d5c3ec7c5f4fe0cab",
259259
]
260260

261261

src/marin/rl/rl_job.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,6 @@ def to_worker_configs(self) -> tuple[TrainWorkerConfig, RolloutWorkerConfig]:
253253
max_seq_len=max_tokens,
254254
page_size=128,
255255
hbm_utilization=0.5,
256-
enable_logprobs=True,
257256
),
258257
port=0,
259258
)

tests/rl/integration/config.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -267,7 +267,6 @@ def create_test_inference_server_config(model_config: LlamaConfig, output_dir: s
267267
page_size=8,
268268
max_seq_len=64,
269269
max_queued_tokens=8,
270-
enable_logprobs=True,
271270
),
272271
temperature=1.0,
273272
port=find_open_port(),
@@ -362,7 +361,6 @@ def run_inference_with_engine(
362361
tokenizer=None,
363362
max_tokens: int = 64,
364363
temperature: float = 1.0,
365-
enable_logprobs: bool = False,
366364
) -> tuple[list[list[int]], list[str]]:
367365
"""Run inference on prompts using InferenceEngine directly."""
368366
if tokenizer is None:
@@ -374,7 +372,6 @@ def run_inference_with_engine(
374372
page_size=32,
375373
max_pages=8 * len(prompts) * max(1, max_tokens // 32),
376374
compute_dtype=jnp.bfloat16,
377-
enable_logprobs=enable_logprobs,
378375
)
379376

380377
print("Creating inference engine with config:", config)
@@ -402,7 +399,6 @@ def run_inference_with_engine(
402399
request_id=i,
403400
decode_params=decode_params,
404401
n_generations=1,
405-
enable_logprobs=enable_logprobs,
406402
)
407403
requests.append(request)
408404

uv.lock

Lines changed: 28 additions & 28 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)