|
| 1 | +model: |
| 2 | + model_name: "gpt2" # you can swap in "gpt2-medium" or "EleutherAI/pythia-70m" etc. |
| 3 | + use_fast_tokenizer: true |
| 4 | + block_size: 128 # Maximum sequence length after tokenisation |
| 5 | + |
| 6 | +data: |
| 7 | + dataset_path: "./data/clean/" # Use HF dataset identifier |
| 8 | + max_len: 32 |
| 9 | + train_split: "train" |
| 10 | + validation_split: "val" |
| 11 | + test_split: "test" |
| 12 | + |
| 13 | +training: |
| 14 | + # ── bookkeeping ──────────────────────────────────────────────────────────── |
| 15 | + output_dir: "runs/generator/gpt2_sst2" |
| 16 | + overwrite_output_dir: true |
| 17 | + run_name: "generator_sst2_gpt2" |
| 18 | + |
| 19 | + report_to: "wandb" |
| 20 | + wandb_project: "senti_synth_generator" |
| 21 | + |
| 22 | + # ── batch size & epochs ──────────────────────────────────────────────────── |
| 23 | + per_device_train_batch_size: 32 # fits comfortably on 24 GB VRAM |
| 24 | + per_device_eval_batch_size: 64 |
| 25 | + gradient_accumulation_steps: 1 |
| 26 | + num_train_epochs: 3 # SST‑2 is tiny; 2–3 epochs suffice |
| 27 | + |
| 28 | + # ── precision & speed ────────────────────────────────────────────────────── |
| 29 | + fp16: true # enable mixed precision |
| 30 | + bf16: false # turn off to avoid dual precision modes |
| 31 | + # torch_dtype: "auto" # (optional) lets HF pick fastest dtype |
| 32 | + |
| 33 | + # ── optimiser & scheduler ───────────────────────────────────────────────── |
| 34 | + learning_rate: 5e-5 # good starting LR for GPT‑2 on small corpora |
| 35 | + warmup_ratio: 0.1 |
| 36 | + |
| 37 | + # ── misc performance knobs ──────────────────────────────────────────────── |
| 38 | + dataloader_num_workers: 4 |
| 39 | + gradient_checkpointing: true # big memory win on GPT‑style decoders |
| 40 | + max_grad_norm: 1.0 |
| 41 | + |
| 42 | + # ── logging, saving, early stop ─────────────────────────────────────────── |
| 43 | + logging_steps: 100 |
| 44 | + eval_steps: 500 |
| 45 | + save_steps: 500 |
| 46 | + save_total_limit: 3 |
| 47 | + load_best_model_at_end: true |
| 48 | + metric_for_best_model: "eval_loss" |
| 49 | + greater_is_better: false |
| 50 | + |
| 51 | + use_early_stopping: true |
| 52 | + early_stopping_patience: 2 |
| 53 | + early_stopping_threshold: 0.0005 |
| 54 | + |
| 55 | + do_test_eval: true |
0 commit comments