-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathvivid_eval.py
More file actions
194 lines (161 loc) · 9.1 KB
/
vivid_eval.py
File metadata and controls
194 lines (161 loc) · 9.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
"""
VIVID Evaluation Framework — CLI Entry Point
============================================
Run any VIVID benchmark task from the command line.
Commands:
generate Generate idiom explanations (vLLM or API)
judge Score explanations with LLM-as-a-Judge (GPT-4.1)
discriminate Topic / pattern classification via lm_eval
full-pipeline generate → judge in one command
Run `python vivid_eval.py <command> --help` for per-command options.
"""
import argparse
from datetime import datetime
from vivid.constants import DEFAULT_DATASET, DEFAULT_RESULTS_DIR
from vivid.generate import run_generate
from vivid.judge import run_judge
from vivid.discriminate import run_discriminate
from vivid.pipeline import run_full_pipeline
# ── Shared argument group ─────────────────────────────────────────────────────
def _shared_parser():
p = argparse.ArgumentParser(add_help=False)
p.add_argument("--results-dir", default=DEFAULT_RESULTS_DIR,
help="Directory to save all results (default: ./results)")
p.add_argument("--api-key", default=None,
help="OpenAI API key (or set OPENAI_API_KEY env var)")
p.add_argument("--gemini-api-key", default=None,
help="Google Gemini API key (or set GEMINI_API_KEY env var)")
p.add_argument("--api-delay", type=float, default=1.0,
help="Seconds between API calls for rate-limiting (default: 1.0)")
return p
# ── vLLM argument group (reused by generate, discriminate, full-pipeline) ─────
def _add_vllm_args(p):
p.add_argument("--tensor-parallel", type=int, default=1,
help="Tensor parallel size for vLLM (default: 1)")
p.add_argument("--gpu-memory-util", type=float, default=0.85,
help="GPU memory utilisation for vLLM (default: 0.85)")
p.add_argument("--dtype", default="auto",
help="Model dtype: auto | float16 | bfloat16 (default: auto)")
p.add_argument("--max-model-len", type=int, default=None,
help="Override context length for vLLM (optional)")
# ── Parser ────────────────────────────────────────────────────────────────────
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
prog="vivid_eval",
description="VIVID Benchmark Evaluation Framework",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples
--------
# Open-source model — zero-shot generation
python vivid_eval.py generate --model Qwen/Qwen3-14B --prompt zero-shot
# OpenAI model — few-shot generation
python vivid_eval.py generate --model gpt-4o --prompt few-shot --api-key sk-...
# Gemini model — zero-shot generation (uses google-genai library)
python vivid_eval.py generate --model gemini-2.5-flash --prompt zero-shot --gemini-api-key AIza...
# Score with OpenAI judge
python vivid_eval.py judge \\
--input results/generate_Qwen_Qwen3-14B_zero-shot_<ts>.csv \\
--model-col Qwen_Qwen3-14B_zero_shot_explanation \\
--api-key sk-...
# Score with Gemini judge (uses google-genai library)
python vivid_eval.py judge \\
--input results/generate_Qwen_Qwen3-14B_zero-shot_<ts>.csv \\
--model-col Qwen_Qwen3-14B_zero_shot_explanation \\
--judge-model gemini-2.5-flash \\
--gemini-api-key AIza...
# Discriminative evaluation — open-source (vLLM backend)
python vivid_eval.py discriminate --model Qwen/Qwen3-14B --task both
# Discriminative evaluation — Gemini (uses OpenAI-compat endpoint via lm_eval)
python vivid_eval.py discriminate --model gemini-2.5-flash --task topic --api-key AIza...
# Full generate + judge pipeline
python vivid_eval.py full-pipeline \\
--model Qwen/Qwen3-14B --prompt zero-shot --api-key sk-...
""",
)
sub = parser.add_subparsers(dest="command", required=True)
shared = _shared_parser()
# ── generate ─────────────────────────────────────────────────────────────
p_gen = sub.add_parser(
"generate", parents=[shared],
help="Generate idiom/proverb explanations with any model",
)
p_gen.add_argument("--model", required=True,
help="HuggingFace model ID or API model name")
p_gen.add_argument("--prompt", choices=["zero-shot", "few-shot"], default="zero-shot",
help="Prompting strategy (default: zero-shot)")
p_gen.add_argument("--dataset", default=DEFAULT_DATASET,
help="Path to VIVID_Dataset.csv")
p_gen.add_argument("--batch-size", type=int, default=64,
help="vLLM inference batch size (default: 64)")
p_gen.add_argument("--max-tokens", type=int, default=150,
help="Max tokens to generate (default: 150)")
p_gen.add_argument("--temperature", type=float, default=0.7,
help="Sampling temperature (default: 0.7)")
_add_vllm_args(p_gen)
# ── judge ─────────────────────────────────────────────────────────────────
p_jud = sub.add_parser(
"judge", parents=[shared],
help="Score explanations with LLM-as-a-Judge (GPT-4.1)",
)
p_jud.add_argument("--input", required=True,
help="CSV produced by the `generate` command")
p_jud.add_argument("--model-col", required=True,
help="Column name that contains the LLM explanations")
p_jud.add_argument("--judge-model", default="gpt-4.1",
help="Judge model to use (default: gpt-4.1)")
# ── discriminate ──────────────────────────────────────────────────────────
p_disc = sub.add_parser(
"discriminate", parents=[shared],
help="Run topic / pattern classification via lm_eval",
)
p_disc.add_argument("--model", required=True,
help="HuggingFace model ID or API model name")
p_disc.add_argument("--task", choices=["topic", "pattern", "both"], default="both",
help="Which classification task to run (default: both)")
p_disc.add_argument("--dataset", default=DEFAULT_DATASET,
help="Path to VIVID_Dataset.csv (used to build eval data)")
p_disc.add_argument("--rebuild-eval-data", action="store_true",
help="Force rebuild of topic.json and pattern.json from dataset")
p_disc.add_argument("--num-fewshot", type=int, default=3,
help="Few-shot examples for lm_eval (default: 3)")
p_disc.add_argument("--batch-size", type=int, default=32,
help="lm_eval batch size for open-source models (default: 32)")
_add_vllm_args(p_disc)
# ── full-pipeline ─────────────────────────────────────────────────────────
p_full = sub.add_parser(
"full-pipeline", parents=[shared],
help="Run generate → judge in one command",
)
p_full.add_argument("--model", required=True,
help="HuggingFace model ID or API model name")
p_full.add_argument("--prompt", choices=["zero-shot", "few-shot"], default="zero-shot",
help="Prompting strategy (default: zero-shot)")
p_full.add_argument("--dataset", default=DEFAULT_DATASET,
help="Path to VIVID_Dataset.csv")
p_full.add_argument("--batch-size", type=int, default=64,
help="vLLM inference batch size (default: 64)")
p_full.add_argument("--max-tokens", type=int, default=150,
help="Max tokens to generate (default: 150)")
p_full.add_argument("--temperature", type=float, default=0.7,
help="Sampling temperature (default: 0.7)")
p_full.add_argument("--judge-model", default="gpt-4.1",
help="Judge model (default: gpt-4.1)")
_add_vllm_args(p_full)
return parser
# ── Entry point ───────────────────────────────────────────────────────────────
def main():
parser = build_parser()
args = parser.parse_args()
print(f"\n{'─' * 60}")
print(f" VIVID Evaluation Framework | {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"{'─' * 60}")
dispatch = {
"generate" : run_generate,
"judge" : run_judge,
"discriminate" : run_discriminate,
"full-pipeline" : run_full_pipeline,
}
dispatch[args.command](args)
if __name__ == "__main__":
main()