-
Notifications
You must be signed in to change notification settings - Fork 160
Expand file tree
/
Copy pathprompt_constructor_toml.py
More file actions
470 lines (406 loc) · 18.2 KB
/
prompt_constructor_toml.py
File metadata and controls
470 lines (406 loc) · 18.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
# src/prompt_constructor_toml.py | toml based prompt constructor
import os
import runpy
import tomli
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from kernelbench.utils import read_file, get_package_resource_path, resolve_path, REPO_TOP_PATH
"""
TOML-based prompt constructor for managing prompt templates and configurations.
This module provides a way to load and compose prompt templates from a TOML configuration file.
You can easily check some of the prompt templates we have provided and create your own.
"""
# Resolve paths using the helper from utils
PROMPTS_TOML = get_package_resource_path("prompts/prompts.toml")
GPU_SPECS_PY = get_package_resource_path("prompts/hardware/gpu_specs.py")
HARDWARE_COMPONENT_KEYS = [
"hardware_header",
"hardware_specs",
"hardware_definitions",
"hardware_best_practices",
]
@dataclass
class PromptConfig:
"""
Configuration wrapper for prompts.toml data.
This class holds the parsed TOML file data and provides ways to navigate
the nested structure and compose prompt templates.
The TOML file has a structure like:
[backends.cuda]
[options.few_shot]
[templates.common.arch_block]
This class makes it easy to look up values in that hierarchy.
"""
data: Dict[str, Any] # The raw parsed TOML data as nested dictionaries
@classmethod
def from_toml(cls, path: str) -> "PromptConfig":
"""
Load and parse a TOML configuration file.
Args:
path: Filesystem path to the prompts.toml file
Returns:
PromptConfig instance with parsed data
"""
with open(path, "rb") as f:
data = tomli.load(f)
return cls(data)
def compose_blocks(self, keys: List[str]) -> str:
"""
Look up and concatenate multiple template blocks using dotted key paths.
This method navigates the nested TOML structure using dotted notation
(e.g., "templates.common.arch_block") to find template strings, then
concatenates them together with newlines.
Args:
keys: List of dotted key paths (e.g., ["templates.common.arch_block"])
Each key is split on "." and used to traverse the nested dict.
Returns:
Concatenated string of all template blocks, each separated by newlines
"""
text_parts = []
for key in keys:
# Navigate through the nested dictionary structure
node: Any = self.data
for part in key.split("."):
if part not in node:
raise KeyError(f"compose key not found: {key}")
node = node[part]
# Ensure we found a string template, not another dict/list
if not isinstance(node, str):
raise TypeError(f"compose key must resolve to string: {key}")
text_parts.append(node.strip() + "\n")
return "\n".join(text_parts).strip() + "\n"
def _gpu_context_from_gpu_specs(py_path: str, gpu_name: str) -> Dict[str, str]:
"""
Load GPU_* dicts from the GPU specs file (no exec of raw strings; use runpy).
Expected globals:
- GPU_SPEC_INFO: dict[str, dict]
- GPU_DEFINITIONS: dict[str, str]
- GPU_BEST_PRACTICES: list[str] OR {"list": [...]} for compatibility
"""
mod = runpy.run_path(py_path)
spec_info = mod.get("GPU_SPEC_INFO", {})
definitions = mod.get("GPU_DEFINITIONS", {})
best = mod.get("GPU_BEST_PRACTICES", [])
if not spec_info or not definitions or best is None:
raise ValueError("GPU_SPEC_INFO / GPU_DEFINITIONS / GPU_BEST_PRACTICES missing in gpu specs .py")
if isinstance(best, dict) and "list" in best:
best = best["list"]
if gpu_name not in spec_info:
raise KeyError(f"GPU name {gpu_name} not found in GPU_SPEC_INFO")
curr = spec_info[gpu_name]
gpu_architecture = curr.get("GPU Architecture", "Unknown")
specs_bullets = "\n".join([f"- We have {v} of {k}." for k, v in curr.items() if k != "GPU Architecture"])
defs_bullets = "\n".join([f"- {k}: {v}" for k, v in definitions.items()])
best_bullets = "\n".join([f"- {x}" for x in (best or [])])
return {
"gpu_name": gpu_name,
"gpu_architecture": gpu_architecture,
"gpu_specs_bullets": specs_bullets,
"gpu_definitions_bullets": defs_bullets,
"gpu_best_practices_bullets": best_bullets,
}
def render_prompt_by_option(
*,
prompts_toml: str,
backend: str,
option: str,
context: Dict[str, str],
gpu_specs_py: Optional[str] = None,
gpu_name: Optional[str] = None,
precision: Optional[str] = None,
include_hardware: bool = False,
components_override: Optional[List[str]] = None,
) -> str:
"""
Render a prompt using backends.X and options.Y structure from TOML.
Args:
prompts_toml: Path to the prompts.toml file
backend: The kernel backend (triton, cuda, cute, tilelang, cutile)
option: The prompt option (zero_shot, one_shot, few_shot)
- zero_shot: No examples (model learns from description only)
- one_shot: Single example
- few_shot: Multiple examples if available for backend, otherwise falls back to one_shot
context: Variables to fill in the prompt template
gpu_specs_py: Optional path to GPU specs Python file (required if hardware info is included)
gpu_name: Optional GPU name (required if hardware info is included)
precision: Optional precision string (fp32, fp16, bf16) - defaults to fp32 if not provided
include_hardware: Whether to inject hardware guidance blocks after the examples section
components_override: When provided, users can arrange prompt components from the toml
file in any order they want.
Components must exist under templates.common or be hardware_* entries.
Returns:
The rendered prompt string
"""
cfg = PromptConfig.from_toml(prompts_toml)
# Get backend-specific content
try:
backend_data = cfg.data["backends"][backend]
except KeyError:
raise KeyError(f"Unknown backend: {backend}")
# Get option configuration
try:
option_data = cfg.data["options"][option]
except KeyError:
raise KeyError(f"Unknown option: {option}")
component_sequence = list(components_override or option_data["components"])
if include_hardware:
if components_override is None:
insert_idx = component_sequence.index("arch_block") if "arch_block" in component_sequence else len(component_sequence)
component_sequence[insert_idx:insert_idx] = HARDWARE_COMPONENT_KEYS
else:
# Custom sequences must explicitly have hardware blocks present in their prompt if they
# have set they are including hardware info.
if not any(component in HARDWARE_COMPONENT_KEYS for component in component_sequence):
raise ValueError(
"components_override must contain at least one hardware_* entry when include_hardware=True"
)
# Get shared templates
shared = cfg.data.get("shared", {})
backend_display = backend_data.get("backend_display", backend.upper())
# Fill in shared templates with backend-specific terms
problem_statement = shared.get("problem_statement", "").format(backend_display=backend_display)
instruction = shared.get("instruction", "").format(backend_display=backend_display)
# Add backend-specific content to context
context = {
**context,
"backend": backend.upper() if backend in ["cuda", "cute"] else ("cuTile" if backend == "cutile" else backend.capitalize()),
"backend_display": backend_display,
"problem_statement": problem_statement,
"instruction": instruction,
}
# Load precision details if provided
if precision:
try:
precision_data = cfg.data["precision"][precision]
context["precision_display"] = precision_data.get("precision_display", precision.upper())
except KeyError:
raise KeyError(f"Unknown precision: {precision}. Must be one of: fp32, fp16, bf16")
else:
# Default to fp32 if not specified
default_precision = cfg.data.get("meta", {}).get("default_precision", "fp32")
precision_data = cfg.data["precision"].get(default_precision, {})
context["precision_display"] = precision_data.get("precision_display", "FP32 (32-bit floating point)")
# Load example files if requested. Supports loading one shot or few shot examples.
requires_example = option_data.get("requires_example")
if requires_example:
example_entry_template = cfg.compose_blocks(["templates.common.example_entry_template"]).strip()
intro_one_shot = cfg.compose_blocks(["templates.common.example_intro_one_shot"]).strip()
intro_few_shot = cfg.compose_blocks(["templates.common.example_intro_few_shot"]).strip()
intro_one_shot = intro_one_shot.format(
backend_display=backend_display
)
intro_few_shot = intro_few_shot.format(
backend_display=backend_display
)
def render_example_entry(input_code: str, output_code: str, example_label: str) -> str:
return example_entry_template.format(
example_label=example_label,
input_code=input_code,
output_code=output_code,
backend_display=backend_display,
)
examples_entries: List[str] = []
examples_intro = intro_one_shot
if requires_example == "few_shot":
# Try to load few-shot examples if available
few_shot_examples = backend_data.get("few_shot_examples")
if few_shot_examples and len(few_shot_examples) > 0:
# Use multiple examples (true few-shot)
examples_intro = intro_few_shot
for i, (input_path, output_path) in enumerate(few_shot_examples, 1):
input_code = read_file(resolve_path(input_path))
output_code = read_file(resolve_path(output_path))
examples_entries.append(
render_example_entry(input_code, output_code, f"Example {i}:")
)
else:
# Fall back to one-shot
ex_arch_path = resolve_path(
backend_data.get("few_shot_example_arch") or shared.get("few_shot_example_arch")
)
ex_new_path = resolve_path(backend_data["one_shot_new_arch"])
input_code = read_file(ex_arch_path)
output_code = read_file(ex_new_path)
examples_entries.append(
render_example_entry(input_code, output_code, "Example:")
)
elif requires_example == "one_shot":
# Always use one-shot
ex_arch_path = resolve_path(
backend_data.get("few_shot_example_arch") or shared.get("few_shot_example_arch")
)
ex_new_path = resolve_path(backend_data["one_shot_new_arch"])
input_code = read_file(ex_arch_path)
output_code = read_file(ex_new_path)
examples_entries.append(
render_example_entry(input_code, output_code, "Example:")
)
if not examples_entries:
raise ValueError(f"No example entries could be constructed for option '{option}'.")
context["examples_intro"] = examples_intro
context["examples_entries"] = "\n\n".join(examples_entries).strip()
# Load GPU details if requested
if option_data.get("requires_gpu") or include_hardware:
if not (gpu_specs_py and gpu_name):
raise ValueError(
f"Hardware info requested for option '{option}'; provide gpu_specs_py and gpu_name"
)
context = {**context, **_gpu_context_from_gpu_specs(resolve_path(gpu_specs_py), gpu_name)}
# Builds the prompt from the components in the toml file.
prompt_parts = []
for component in component_sequence:
if component == "problem_statement":
# Use the already-formatted problem_statement from context
prompt_parts.append(context["problem_statement"])
elif component == "instruction":
# Use the already-formatted instruction from context
prompt_parts.append(context["instruction"])
elif component.startswith("hardware_"):
# Hardware components from templates.hardware
template_key = f"templates.hardware.{component}"
prompt_parts.append(cfg.compose_blocks([template_key]))
else:
# Other components from templates.common
template_key = f"templates.common.{component}"
prompt_parts.append(cfg.compose_blocks([template_key]))
prompt_text = "\n".join(prompt_parts).strip() + "\n"
try:
return prompt_text.format(**context).strip() + "\n"
except KeyError as e:
raise KeyError(f"Missing placeholder in context: {e.args[0]}. Available: {list(context.keys())}") from e
# -------------------------------------------------------------------------
# High-level convenience functions
# -------------------------------------------------------------------------
def get_prompt_for_backend(
ref_arch_src: str,
backend: str = "triton",
option: str = "one_shot",
precision: Optional[str] = None,
include_hardware: bool = False,
gpu_name: Optional[str] = None,
) -> str:
"""
Generate a prompt for a specific backend and option.
Args:
ref_arch_src: The reference architecture source code
backend: The kernel backend (triton, cuda, cute, tilelang, cutile)
option: The prompt option (zero_shot, one_shot, few_shot)
precision: Optional precision (fp32, fp16, bf16) - defaults to fp32 if not provided
include_hardware: When True, append hardware guidance blocks (requires gpu_name)
gpu_name: GPU identifier used when include_hardware is True (e.g., "A100")
"""
return render_prompt_by_option(
prompts_toml=PROMPTS_TOML,
backend=backend.lower(),
option=option.lower(),
context={"ref_arch_src": ref_arch_src},
precision=precision,
include_hardware=include_hardware,
gpu_specs_py=GPU_SPECS_PY if include_hardware else None,
gpu_name=gpu_name,
)
def get_custom_prompt(
custom_key: str,
*,
ref_arch_src: str,
backend: str,
option: str,
precision: Optional[str] = None,
include_hardware: bool = False,
gpu_name: Optional[str] = None,
prompts_toml: str = PROMPTS_TOML,
) -> str:
"""
Render a prompt defined under [custom_prompts.<custom_key>] in prompts.toml.
Must still provide backend/option/precision settings just like
get_prompt_for_backend.
"""
if not ref_arch_src:
raise ValueError(f"Custom prompt '{custom_key}' requires ref_arch_src.")
cfg = PromptConfig.from_toml(prompts_toml)
try:
custom_cfg: Dict[str, Any] = cfg.data["custom_prompts"][custom_key]
except KeyError as exc:
raise KeyError(f"Unknown custom prompt: {custom_key}") from exc
components_override = custom_cfg.get("components")
return render_prompt_by_option(
prompts_toml=prompts_toml,
backend=backend.lower(),
option=option.lower(),
context={"ref_arch_src": ref_arch_src},
precision=precision,
include_hardware=include_hardware,
gpu_specs_py=GPU_SPECS_PY if include_hardware else None,
gpu_name=gpu_name,
components_override=components_override,
)
__all__ = [
"get_prompt_for_backend",
"get_custom_prompt",
"get_prompt_with_hardware",
"render_prompt_by_option",
"PromptConfig",
]
def log_prompt(prompt: str, dir_path: str, file_name: str):
os.makedirs(dir_path, exist_ok=True)
with open(os.path.join(dir_path, file_name), "w") as f:
f.write(prompt)
def test_prompt():
"""
Demonstrate baseline, few-shot, DSL, hardware-aware, and custom prompt
generation. Customize the reference architecture or custom_prompt_key
if you want to try different inputs.
"""
ref_arch_src = read_file(os.path.join(REPO_TOP_PATH, "KernelBench", "level1", "1_Square_matrix_multiplication_.py"))
assert len(ref_arch_src) > 0, "ref_arch_src is empty"
print("Testing prompt construction...")
scratch_dir = os.path.join(REPO_TOP_PATH, "scratch")
# baseline prompt
baseline_prompt = get_prompt_for_backend(
ref_arch_src=ref_arch_src,
backend="cuda",
option="one_shot",
precision="fp32",
# GPU platform agnostic for baseline
)
log_prompt(baseline_prompt, os.path.join(scratch_dir), "baseline_prompt.txt")
# few shot prompt
few_shot_prompt = get_prompt_for_backend(
ref_arch_src=ref_arch_src,
backend="cuda",
option="few_shot",
precision="fp32",
)
log_prompt(few_shot_prompt, os.path.join(scratch_dir), "few_shot_prompt.txt")
# DSL prompt
dsl_prompt = get_prompt_for_backend(
ref_arch_src=ref_arch_src,
backend="triton",
option="one_shot",
precision="fp32",
)
log_prompt(dsl_prompt, os.path.join(scratch_dir), "dsl_prompt.txt")
# hardware prompt
hardware_prompt = get_prompt_for_backend(
ref_arch_src=ref_arch_src,
backend="cute",
option="one_shot",
precision="fp32",
include_hardware=True,
gpu_name="L40S",
)
log_prompt(hardware_prompt, os.path.join(scratch_dir), "hardware_prompt.txt")
# custom prompt defined in prompts.toml
custom_prompt = get_custom_prompt(
# the key is whatever you name the prompt in the custom_prompts section of the toml file
custom_key="custom",
ref_arch_src=ref_arch_src,
backend="triton",
option="one_shot",
precision="fp32",
include_hardware=True,
gpu_name="L40S",
)
log_prompt(custom_prompt, os.path.join(scratch_dir), "custom_prompt.txt")
if __name__ == "__main__":
test_prompt()