Skip to content

Commit 729d691

Browse files
author
Atlas
committed
fix(export): prompt in workspace.yaml now describes PR changes
- Prompt shows what the PR does, not add tests for X - Remove conventional commit prefixes (fix:, feat:, etc.) - Remove [brackets] like [BUG] or [Feature] - Natural description of the PR change - Tests: 1197 pass
1 parent 5f15e37 commit 729d691

2 files changed

Lines changed: 70 additions & 15 deletions

File tree

src/swe_forge/export/workspace.py

Lines changed: 18 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -21,34 +21,37 @@ def _extract_test_file_names(test_patch: str) -> list[str]:
2121
return [Path(m).name for m in matches]
2222

2323

24-
def _make_natural_prompt(original_prompt: str, repo: str) -> str:
25-
"""Transform technical PR description into natural user prompt.
24+
def _make_natural_prompt(original_prompt: str, repo: str, patch: str = "") -> str:
25+
"""Create a simple natural prompt from PR info.
2626
2727
Args:
28-
original_prompt: Original PR title/description
28+
original_prompt: Original PR title/description
2929
repo: Repository name
30+
patch: The patch diff (optional)
3031
3132
Returns:
32-
Natural prompt like a real user would write
33+
Simple natural prompt describing the changes
3334
"""
34-
# Clean up the prompt
35+
if not original_prompt:
36+
return f"Changes in {repo}" if repo else "Changes"
37+
3538
prompt = original_prompt.strip()
3639

37-
# Remove common prefixes
38-
for prefix in ["fix:", "feat:", "refactor:", "chore:", "docs:", "style:", "test:"]:
40+
# Remove conventional commit prefixes (fix:, feat:, refactor:, etc.)
41+
for prefix in ["fix:", "feat:", "refactor:", "chore:", "docs:", "style:", "test:", "build:", "perf:", "ci:"]:
3942
if prompt.lower().startswith(prefix):
4043
prompt = prompt[len(prefix):].strip()
4144
break
4245

43-
# Truncate if too long
44-
if len(prompt) > 150:
45-
prompt = prompt[:147] + "..."
46+
# Remove leading [brackets] like [BUG] or [Feature]
47+
import re
48+
prompt = re.sub(r"^\[[^\]]+\]\s*", "", prompt)
4649

47-
# Create natural prompt
50+
# Capitalize first letter
4851
if prompt:
49-
return f"Can you add tests for: {prompt}"
50-
else:
51-
return f"Can you add tests for the changes in {repo}?"
52+
prompt = prompt[0].upper() + prompt[1:]
53+
54+
return prompt[:200] if prompt else f"Changes in {repo}"
5255

5356

5457
def export_task_to_workspace(
@@ -106,7 +109,7 @@ def export_task_to_workspace(
106109
},
107110
"language": task.language,
108111
"difficulty_score": task.difficulty_score,
109-
"prompt": _make_natural_prompt(task.prompt, task.repo),
112+
"prompt": _make_natural_prompt(task.prompt, task.repo, task.patch),
110113
"environment": {
111114
"image": docker_image or "ubuntu:24.04",
112115
"language_version": (
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
"""Generate natural prompts for SWE tasks using LLM."""
2+
3+
from logging import getLogger
4+
5+
logger = getLogger(__name__)
6+
7+
8+
async def generate_task_prompt(llm_client, task) -> str:
9+
"""Generate a natural prompt describing the PR changes.
10+
11+
Args:
12+
llm_client: LLM client for generation
13+
task: SweTask with PR info
14+
15+
Returns:
16+
Natural prompt like a user would write
17+
"""
18+
if not llm_client:
19+
# Fallback to original prompt
20+
return task.prompt or f"Changes in {task.repo}"
21+
22+
try:
23+
from swe_forge.llm.models import GenerationRequest, GenerationResponse
24+
25+
system = "You write short, natural descriptions of code changes."
26+
27+
user = f"""Describe this GitHub PR in one sentence, like a developer would explain it to a colleague.
28+
29+
PR title: {task.prompt[:200]}
30+
Repo: {task.repo}
31+
32+
Just describe what changed, nothing else. Keep it under 50 words."""
33+
34+
request = GenerationRequest(
35+
model=llm_client.default_model,
36+
messages=[
37+
{"role": "system", "content": system},
38+
{"role": "user", "content": user},
39+
],
40+
temperature=0.3,
41+
max_tokens=100,
42+
)
43+
44+
response = await llm_client.complete(request)
45+
46+
if response.choices and response.choices[0].message:
47+
return response.choices[0].message.content.strip()
48+
except Exception as e:
49+
logger.debug(f"LLM prompt generation failed: {e}")
50+
51+
# Fallback
52+
return task.prompt or f"Changes in {task.repo}"

0 commit comments

Comments
 (0)