-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathrequest.rs
More file actions
55 lines (48 loc) · 1.51 KB
/
request.rs
File metadata and controls
55 lines (48 loc) · 1.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
use serde_json::{json, Value};
pub(super) struct InferenceRequest {
pub(super) url: String,
pub(super) body: Value,
}
const SYSTEM_MSG: &str = "You are a code reviewer. Respond with a single JSON object.";
const USER_MSG: &str =
"Review this code change:\n+fn add(a: i32, b: i32) -> i32 { a + b }\nRespond with: {\"ok\": true}";
pub(super) fn build_inference_request(
base_url: &str,
model_name: &str,
endpoint_type: &str,
) -> InferenceRequest {
let messages = build_probe_messages();
if endpoint_type == "ollama" {
build_ollama_request(base_url, model_name, messages)
} else {
build_openai_request(base_url, model_name, messages)
}
}
fn build_probe_messages() -> Value {
json!([
{"role": "system", "content": SYSTEM_MSG},
{"role": "user", "content": USER_MSG}
])
}
fn build_ollama_request(base_url: &str, model_name: &str, messages: Value) -> InferenceRequest {
InferenceRequest {
url: format!("{base_url}/api/chat"),
body: json!({
"model": model_name,
"messages": messages,
"stream": false,
"options": {"num_predict": 50}
}),
}
}
fn build_openai_request(base_url: &str, model_name: &str, messages: Value) -> InferenceRequest {
InferenceRequest {
url: format!("{base_url}/v1/chat/completions"),
body: json!({
"model": model_name,
"messages": messages,
"max_tokens": 50,
"temperature": 0.1
}),
}
}