-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbenchmark_pipeline.py
More file actions
1004 lines (857 loc) · 43.9 KB
/
benchmark_pipeline.py
File metadata and controls
1004 lines (857 loc) · 43.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env python3
"""
Simple Benchmark Pipeline for Q-Benchmark
This pipeline reads prompts from converted_prompts CSV files, runs them through
OpenAI and Google Gemini models, evaluates responses, and updates CSV files
with results.
"""
import os
import sys
import csv
import time
import argparse
import logging
from pathlib import Path
from typing import Dict, List, Any, Optional
from dotenv import load_dotenv
# Progress bar - use tqdm if available, otherwise simple counter
try:
from tqdm import tqdm
HAS_TQDM = True
except ImportError:
HAS_TQDM = False
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class SimpleLLMClient:
"""Base class for LLM API clients"""
def __init__(self, api_key: str, model_name: str, provider: str):
self.api_key = api_key
self.model_name = model_name
self.provider = provider
def generate(self, prompt: str, max_tokens: int = 1000) -> Dict[str, Any]:
"""Generate response from LLM"""
raise NotImplementedError
class SimpleOpenAIClient(SimpleLLMClient):
"""Simple OpenAI API Client"""
def __init__(self, api_key: str, model_name: str = "gpt-4.1-mini"):
super().__init__(api_key, model_name, "openai")
try:
from openai import OpenAI
self.client = OpenAI(api_key=api_key)
except ImportError:
raise ImportError("Please install openai: pip install openai")
def generate(self, prompt: str, max_tokens: int = 4000) -> Dict[str, Any]:
"""Generate response using OpenAI API"""
start_time = time.time()
try:
# Use max_completion_tokens for newer models (GPT-4, GPT-5, etc.)
# and max_tokens for legacy models
# Some models don't support temperature=0, so use default for those
if self.model_name.startswith(('gpt-4', 'gpt-5', 'o1-')):
# o1 models don't support temperature parameter at all
if self.model_name.startswith('o1-'):
response = self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
max_completion_tokens=max_tokens
)
else:
# GPT-4, GPT-5 models - some may not support temperature=0
# GPT-5-mini specifically doesn't support temperature=0
if self.model_name in ['gpt-5-mini', 'gpt-5-mini-2025-08-07']:
# Use default temperature for GPT-5-mini
response = self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
max_completion_tokens=max_tokens
)
else:
try:
response = self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
max_completion_tokens=max_tokens,
temperature=0
)
except Exception as e:
if "temperature" in str(e):
# Retry without temperature parameter
response = self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
max_completion_tokens=max_tokens
)
else:
raise
else:
response = self.client.chat.completions.create(
model=self.model_name,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_tokens,
temperature=0
)
end_time = time.time()
response_content = response.choices[0].message.content or ""
# Debug logging for empty responses
if not response_content or not response_content.strip():
logger.warning(f"WARNING: Empty response from OpenAI API - content is None or empty")
logger.warning(f" Finish reason: {response.choices[0].finish_reason}")
return {
"response": response_content.strip(),
"success": True,
"error": None,
"response_time": end_time - start_time
}
except Exception as e:
end_time = time.time()
logger.error(f"OpenAI API error: {e}")
return {
"response": "",
"success": False,
"error": str(e),
"response_time": end_time - start_time
}
class SimpleGoogleClient(SimpleLLMClient):
"""Simple Google Gemini API Client"""
def __init__(self, api_key: str, model_name: str = "gemini-1.5-flash"):
super().__init__(api_key, model_name, "google")
try:
import google.generativeai as genai
genai.configure(api_key=api_key)
self.model = genai.GenerativeModel(model_name)
except ImportError:
raise ImportError("Please install google-generativeai: pip install google-generativeai")
def generate(self, prompt: str, max_tokens: int = 8192) -> Dict[str, Any]:
"""Generate response using Google Gemini API"""
start_time = time.time()
try:
response = self.model.generate_content(
prompt,
generation_config={
"max_output_tokens": max_tokens,
"temperature": 0,
"top_p": 1
}
)
end_time = time.time()
response_text = response.text if response.text else ""
return {
"response": response_text.strip(),
"success": True,
"error": None,
"response_time": end_time - start_time
}
except Exception as e:
end_time = time.time()
logger.error(f"Google API error: {e}")
return {
"response": "",
"success": False,
"error": str(e),
"response_time": end_time - start_time
}
class SimpleBedrockClient(SimpleLLMClient):
"""Simple AWS Bedrock API Client"""
def __init__(self, api_key: str, model_name: str = "us.meta.llama3-3-70b-instruct-v1:0", region: str = "us-east-1"):
super().__init__(api_key, model_name, "bedrock")
try:
from utils.bedrock_client import BedrockClient
self.client = BedrockClient(api_key, model_name, region)
except ImportError:
raise ImportError("Bedrock client not found. Make sure utils/bedrock_client.py exists")
def generate(self, prompt: str, max_tokens: int = 4000) -> Dict[str, Any]:
"""Generate response using Bedrock API"""
return self.client.generate(prompt, max_tokens)
class SimpleBenchmarkPipeline:
"""Simple benchmark pipeline for processing CSV prompt files"""
def __init__(self, converted_prompts_dir: str = "converted_prompts", init_clients: bool = True,
openai_model: str = "gpt-3.5-turbo", google_model: str = "gemini-1.5-flash",
bedrock_model: str = "us.meta.llama3-3-70b-instruct-v1:0",
variants: str = None, output_dir_name: str = None):
load_dotenv()
self.converted_prompts_dir = Path(converted_prompts_dir)
self.results_dir = Path("benchmark_results")
self.openai_model = openai_model
self.google_model = google_model
self.bedrock_model = bedrock_model
self.variants = variants
self.output_dir_name = output_dir_name
# Create results directory
self.results_dir.mkdir(exist_ok=True)
# Initialize LLM clients only if requested
if init_clients:
self.clients = self._initialize_clients()
logger.info(f"Initialized pipeline with {len(self.clients)} LLM clients")
else:
self.clients = {}
def _initialize_clients(self) -> Dict[str, SimpleLLMClient]:
"""Initialize LLM clients from .env file"""
clients = {}
# Initialize OpenAI client
openai_key = os.getenv("OPENAI_API_KEY")
if openai_key:
try:
clients["openai"] = SimpleOpenAIClient(openai_key, self.openai_model)
logger.info(f"Initialized OpenAI client with model: {self.openai_model}")
except Exception as e:
logger.warning(f"Failed to initialize OpenAI client: {e}")
else:
logger.warning("OPENAI_API_KEY not found in .env file")
# Initialize Google client
google_key = os.getenv("GOOGLE_API_KEY")
if google_key:
try:
clients["google"] = SimpleGoogleClient(google_key, self.google_model)
logger.info(f"Initialized Google client with model: {self.google_model}")
except Exception as e:
logger.warning(f"Failed to initialize Google client: {e}")
else:
logger.warning("GOOGLE_API_KEY not found in .env file")
# Initialize Bedrock client
bedrock_key = os.getenv("AWS_BEARER_TOKEN_BEDROCK") or os.getenv("BEDROCK_API_KEY")
bedrock_region = os.getenv("AWS_REGION", "us-east-1")
if bedrock_key:
try:
clients["bedrock"] = SimpleBedrockClient(bedrock_key, self.bedrock_model, bedrock_region)
logger.info(f"Initialized Bedrock client with model: {self.bedrock_model}")
except Exception as e:
logger.warning(f"Failed to initialize Bedrock client: {e}")
else:
logger.warning("AWS_BEARER_TOKEN_BEDROCK or BEDROCK_API_KEY not found in .env file")
if not clients:
raise ValueError("No valid clients initialized. Please add API keys to .env file")
return clients
def load_csv_prompts(self, csv_file: Path) -> List[Dict[str, str]]:
"""Load prompts from CSV file with corruption detection and recovery"""
prompts = []
try:
with open(csv_file, 'r', encoding='utf-8', newline='') as f:
reader = csv.DictReader(f)
corrupted_rows = 0
for row in reader:
# Check for corruption (case_id not starting with 'case_')
case_id = row.get('case_id', '').strip()
if case_id and not case_id.startswith('case_'):
corrupted_rows += 1
continue
prompts.append(dict(row))
if corrupted_rows > 0:
logger.warning(f"Found {corrupted_rows} corrupted rows in {csv_file}, attempting perfect recovery")
return self.perfect_recovery_csv(csv_file)
return prompts
except Exception as e:
logger.error(f"Error loading CSV file {csv_file}: {e}")
logger.info(f"Attempting perfect recovery for {csv_file}")
return self.perfect_recovery_csv(csv_file)
def perfect_recovery_csv(self, benchmark_file: Path) -> List[Dict[str, str]]:
"""Perform perfect recovery by mapping to converted_prompts source"""
try:
# Find corresponding source file in converted_prompts
source_file = self.find_source_file_for_recovery(benchmark_file)
if not source_file or not source_file.exists():
logger.error(f"Source file not found for recovery: {source_file}")
return []
# Load source data (authoritative)
source_data = []
with open(source_file, 'r', encoding='utf-8', newline='') as f:
reader = csv.DictReader(f)
for row in reader:
source_data.append(dict(row))
# Load benchmark data (potentially corrupted) for response merging
benchmark_data = []
benchmark_lookup = {}
try:
with open(benchmark_file, 'r', encoding='utf-8', newline='') as f:
reader = csv.DictReader(f)
for row in reader:
benchmark_data.append(dict(row))
case_id = row.get('case_id', '').strip()
# Only trust valid case_ids for response data
if case_id and case_id.startswith('case_'):
benchmark_lookup[case_id] = row
except:
pass # If benchmark file is completely corrupted, use empty lookup
# Perform perfect recovery
recovered_data = []
for source_row in source_data:
case_id = source_row.get('case_id', '').strip()
recovered_row = source_row.copy()
# Merge response data from benchmark if available and valid
if case_id in benchmark_lookup:
benchmark_row = benchmark_lookup[case_id]
for field in ['Response']:
if field in benchmark_row and benchmark_row[field]:
recovered_row[field] = benchmark_row[field]
elif field not in recovered_row:
recovered_row[field] = ''
else:
# This case was missing or corrupted in benchmark
for field in ['Response']:
if field not in recovered_row:
recovered_row[field] = ''
recovered_data.append(recovered_row)
corrupted_count = len(benchmark_data) - len(benchmark_lookup)
logger.info(f"Perfect recovery completed: {len(recovered_data)} rows recovered, {corrupted_count} corrupted rows fixed")
return recovered_data
except Exception as e:
logger.error(f"Perfect recovery failed for {benchmark_file}: {e}")
return []
def determine_source_directory(self, model_name: str) -> tuple[Path, Optional[str]]:
"""
Determine the correct source directory based on model name patterns.
Returns:
tuple: (source_directory_path, subdirectory_or_none)
"""
base_dir = Path.cwd()
# Self-augmentation models: *_critical_values, *_format_explaination, *_structural_info
self_aug_patterns = ["critical_values", "format_explaination", "structural_info"]
for pattern in self_aug_patterns:
if model_name.endswith(f"_{pattern}"):
return base_dir / "converted_prompts_self_aug", pattern
# Variant models: *_wo_*
variant_patterns = ["wo_role_prompting", "wo_partition_mark", "wo_format_explaination",
"wo_oneshot", "wo_change_order"]
for pattern in variant_patterns:
if model_name.endswith(f"_{pattern}"):
return base_dir / "converted_prompts_variants", pattern
# Base models: gpt-5-mini, gemini-2.5-flash (exact matches or base models)
base_models = ["gpt-5-mini", "gemini-2.5-flash", "gpt-4o-mini", "gpt-4", "gemini-1.5-flash"]
if model_name in base_models:
return base_dir / "converted_prompts", None
# Handle other base model variations (e.g., gpt-4.1-mini, etc.)
# If it doesn't match any pattern above, it's likely a base model
logger.info(f"Model '{model_name}' doesn't match known patterns, assuming base model")
return base_dir / "converted_prompts", None
def find_source_file_for_recovery(self, benchmark_file: Path) -> Optional[Path]:
"""Find corresponding source file with intelligent directory mapping"""
try:
# For benchmark_results files, map back to appropriate source directory
if "benchmark_results" in str(benchmark_file):
# Extract relative path from benchmark_results
benchmark_results_path = self.results_dir
relative_path = benchmark_file.relative_to(benchmark_results_path)
# Extract model name and path components
model_name = relative_path.parts[0] # First part is model directory
path_parts = relative_path.parts[1:] # Rest is dataset/task/filename
# Determine source directory using intelligent mapping
source_dir, subdirectory = self.determine_source_directory(model_name)
# Build source file path
if subdirectory:
# For self-aug and variants: source_dir/subdirectory/dataset/task/filename
source_file = source_dir / subdirectory / Path(*path_parts)
else:
# For base models: source_dir/dataset/task/filename
source_file = source_dir / Path(*path_parts)
logger.debug(f"Model '{model_name}' maps to source: {source_file}")
return source_file
else:
# For converted_prompts files being processed directly, use as-is
return benchmark_file
except Exception as e:
logger.error(f"Error finding source file for {benchmark_file}: {e}")
return None
def save_csv_results(self, csv_file: Path, prompts: List[Dict[str, str]], provider_name: str, self_aug_type: Optional[str] = None) -> bool:
"""Save prompts results to benchmark_results directory organized by model with smart merging"""
try:
# Get actual model name from client
client = self.clients[provider_name]
actual_model_name = client.model_name
# Clean up model name for directory (remove slashes)
actual_model_name = actual_model_name.replace("/", "-").replace("\\", "-")
# Create model-specific directory using actual model name
if self.output_dir_name:
# Use custom output directory name if provided
model_dir_name = self.output_dir_name
elif self_aug_type:
model_dir_name = f"{actual_model_name}_{self_aug_type}"
elif self.variants:
model_dir_name = f"{actual_model_name}_{self.variants}"
else:
model_dir_name = actual_model_name
model_dir = self.results_dir / model_dir_name
model_dir.mkdir(exist_ok=True)
# Recreate the same structure as converted_prompts
relative_path = csv_file.relative_to(self.converted_prompts_dir)
output_file = model_dir / relative_path
# Ensure parent directory exists
output_file.parent.mkdir(parents=True, exist_ok=True)
# Load existing CSV data if file exists (smart merging with corruption handling)
existing_data = {}
if output_file.exists():
try:
corrupted_rows = 0
with open(output_file, 'r', newline='', encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
case_id = row.get('case_id', '').strip()
# Check for corruption
if case_id and not case_id.startswith('case_'):
corrupted_rows += 1
continue
existing_data[row['case_id']] = row
if corrupted_rows > 0:
logger.warning(f"Found {corrupted_rows} corrupted rows in existing file {output_file}, using perfect recovery")
# Load with perfect recovery
recovered_data = self.perfect_recovery_csv(output_file)
existing_data = {}
for row in recovered_data:
existing_data[row['case_id']] = row
except Exception as e:
logger.error(f"Error loading existing file {output_file}: {e}")
logger.info(f"Attempting perfect recovery for existing file")
# Try perfect recovery on corrupted existing file
recovered_data = self.perfect_recovery_csv(output_file)
existing_data = {}
for row in recovered_data:
existing_data[row['case_id']] = row
# Update existing data with new prompts (overwrite or add)
# Skip cases marked with skip='TRUE' - don't write them to benchmark_results
for prompt in prompts:
# Skip cases that are marked as skip
if prompt.get('skip', '').strip().upper() == 'TRUE':
logger.info(f"Not writing skip case to results: {prompt['case_id']}")
continue
case_id = prompt['case_id']
if case_id in existing_data:
logger.info(f"Updating existing case_id: {case_id}")
else:
logger.info(f"Adding new case_id: {case_id}")
existing_data[case_id] = prompt
# Sort by case_id (natural sorting for case_1, case_2, etc.)
def natural_sort_key(case_id):
# Extract number from case_id (e.g., "case_1" -> 1)
try:
if case_id.startswith('case_'):
return int(case_id.split('_')[1])
else:
return float('inf') # Put non-standard case_ids at the end
except (ValueError, IndexError):
return float('inf')
sorted_prompts = sorted(existing_data.values(), key=lambda x: natural_sort_key(x['case_id']))
# Don't include 'skip' column in benchmark_results - only in converted_prompts
fieldnames = ["case_id", "task", "question", "questionnaire",
"expected_answer", "prompt", "Response", "Correct"]
with open(output_file, 'w', encoding='utf-8', newline='') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames, extrasaction='ignore')
writer.writeheader()
for prompt in sorted_prompts:
# Ensure Correct field exists (default to empty)
if 'Correct' not in prompt:
prompt['Correct'] = ''
# Don't write skip field to benchmark_results
writer.writerow(prompt)
return True
except Exception as e:
logger.error(f"Error saving results file: {e}")
return False
def process_csv_file(self, csv_file: Path, model: str, max_cases: Optional[int] = None, start_case: int = 2, overall_pbar=None, self_aug_type: Optional[str] = None) -> bool:
"""Process a single CSV file with specified model"""
logger.info(f"Processing {csv_file} with {model}")
# Load prompts
prompts = self.load_csv_prompts(csv_file)
if not prompts:
logger.warning(f"No prompts found in {csv_file}")
return False
# Apply start_case filtering (convert to 0-based index)
if start_case > 1:
prompts = prompts[start_case-1:]
logger.info(f"Starting from case {start_case}, processing {len(prompts)} remaining cases")
# Limit cases if specified
if max_cases:
prompts = prompts[:max_cases]
# Get client
if model not in self.clients:
logger.error(f"Model {model} not available. Available models: {list(self.clients.keys())}")
return False
client = self.clients[model]
# Process each prompt
processed_count = 0
skipped_count = 0
for prompt_data in prompts:
# Check for skip flag
if prompt_data.get('skip', '').strip().upper() == 'TRUE':
case_id = prompt_data.get('case_id', 'unknown')
logger.info(f"⏭️ Skipping {case_id} (marked as skip)")
skipped_count += 1
# Update progress bar
if overall_pbar:
overall_pbar.update(1)
continue
# Always process (don't skip based on existing Response field)
# For self-aug prompts, they are already fully processed during generation
# No runtime processing needed - use prompt as-is
processed_prompt = prompt_data["prompt"]
# Generate response
result = client.generate(processed_prompt)
# Check if blocked by safety filter
if not result["success"] and "finish_reason" in str(result.get("error", "")) and "2" in str(result.get("error", "")):
logger.warning(f"Safety filter detected for case_id {prompt_data.get('case_id', 'unknown')}, inserting safety filter response")
# Insert safety filter blocked response
prompt_data["Response"] = "BLOCKED_BY_SAFETY_FILTER"
logger.info(f"Inserted safety filter response for case_id {prompt_data.get('case_id', 'unknown')}")
else:
# Update prompt data with actual response (successful or failed)
prompt_data["Response"] = result["response"]
# Log warning for empty responses
if not result["response"]:
case_id = prompt_data.get('case_id', 'unknown')
logger.warning(f"WARNING: Empty response received for {case_id}")
# No evaluation - just store the response
processed_count += 1
# Update overall progress bar
if overall_pbar:
overall_pbar.update(1)
elif processed_count % 5 == 0: # Log progress every 5 prompts
logger.info(f"Processed {processed_count}/{len(prompts)} prompts")
# Rate limiting
time.sleep(0.5)
# Save results to benchmark_results directory
success = self.save_csv_results(csv_file, prompts, model, self_aug_type)
if success:
actual_model_name = self.clients[model].model_name
if skipped_count > 0:
logger.info(f"Saved results for {actual_model_name} with {processed_count} responses ({skipped_count} cases skipped)")
else:
logger.info(f"Saved results for {actual_model_name} with {processed_count} responses")
return success
def find_csv_files(self, dataset: Optional[str] = None,
task: Optional[str] = None,
format_type: Optional[str] = None) -> List[Path]:
"""Find CSV files matching criteria"""
csv_files = []
if not self.converted_prompts_dir.exists():
logger.error(f"Converted prompts directory not found: {self.converted_prompts_dir}")
return []
# Build search pattern
pattern_parts = []
if dataset:
pattern_parts.append(dataset)
else:
pattern_parts.append("*")
if task:
pattern_parts.append(task)
else:
pattern_parts.append("*")
# Search for CSV files
search_pattern = "/".join(pattern_parts) + "/*.csv"
for csv_file in self.converted_prompts_dir.glob(search_pattern):
# Filter by format if specified (case-insensitive)
if format_type and format_type.lower() not in csv_file.stem.lower():
continue
csv_files.append(csv_file)
return sorted(csv_files)
def run_benchmark(self,
dataset: Optional[str] = None,
task: Optional[str] = None,
format_type: Optional[str] = None,
model: Optional[str] = None,
max_cases: Optional[int] = None,
start_case: int = 2,
self_aug_type: Optional[str] = None) -> bool:
"""Run benchmark on specified criteria"""
# Find CSV files to process
csv_files = self.find_csv_files(dataset, task, format_type)
if not csv_files:
logger.warning("No CSV files found matching criteria")
return False
logger.info(f"Found {len(csv_files)} CSV files to process")
# Print discovered files
print(f"DISCOVERED FILES ({len(csv_files)}):")
for i, csv_file in enumerate(csv_files, 1):
rel_path = csv_file.relative_to(self.converted_prompts_dir)
print(f" {i}. {rel_path}")
print()
# Determine models to use
models_to_use = []
if model and model in self.clients:
models_to_use = [model]
elif model:
logger.error(f"Model {model} not available. Available: {list(self.clients.keys())}")
return False
else:
models_to_use = list(self.clients.keys())
# Print models that will be used
print(f"MODELS TO USE: {models_to_use}")
print()
# Calculate total operations for progress bar
total_operations = 0
for csv_file in csv_files:
prompts = self.load_csv_prompts(csv_file)
# Apply start_case and max_cases filtering
if start_case > 1:
prompts = prompts[start_case-1:] # Convert to 0-based index
if max_cases:
prompts = prompts[:max_cases]
total_operations += len(prompts) * len(models_to_use)
# Create overall progress bar
if HAS_TQDM:
overall_pbar = tqdm(total=total_operations, desc="Overall Progress")
else:
overall_pbar = None
logger.info(f"Starting benchmark: {total_operations} total operations")
# Process each CSV file with each model
success_count = 0
total_count = len(csv_files) * len(models_to_use)
try:
for csv_file in csv_files:
for model_name in models_to_use:
if self.process_csv_file(csv_file, model_name, max_cases, start_case, overall_pbar, self_aug_type):
success_count += 1
finally:
if overall_pbar:
overall_pbar.close()
logger.info(f"Benchmark completed: {success_count}/{total_count} files processed successfully")
# Store actual model names used for summary
self.models_used = [self.clients[model].model_name for model in models_to_use]
return success_count == total_count
def main():
"""Command-line interface"""
parser = argparse.ArgumentParser(
description="Simple benchmark pipeline for Q-Benchmark converted prompts")
parser.add_argument("--dataset", help="Dataset to process (default: all)")
parser.add_argument("--task", help="Task to process (default: all)")
parser.add_argument("--format", help="Data format to process (default: all)")
parser.add_argument("--variants",
choices=["all", "wo_role_prompting", "wo_partition_mark", "wo_format_explaination",
"wo_oneshot", "wo_change_order"],
help="Prompt variant to use instead of standard prompts. Use 'all' to run all variants. Available variants: wo_role_prompting, wo_partition_mark, wo_format_explaination, wo_oneshot, wo_change_order")
parser.add_argument("--self_aug",
choices=["format_explaination", "critical_values", "structural_info"],
help="Use self-augmentation prompts with specific request type. Mutually exclusive with --variants.")
parser.add_argument("--model", choices=["openai", "google", "bedrock"],
help="Model provider to use (default: all available)")
parser.add_argument("--openai-model", default="gpt-3.5-turbo",
help="OpenAI model name (default: gpt-3.5-turbo). Examples: gpt-4o-mini, gpt-4, gpt-3.5-turbo")
parser.add_argument("--google-model", default="gemini-1.5-flash",
help="Google model name (default: gemini-1.5-flash)")
parser.add_argument("--bedrock-model", default="us.meta.llama3-3-70b-instruct-v1:0",
help="Bedrock model ID (default: us.meta.llama3-3-70b-instruct-v1:0). Examples: us.deepseek.r1-v1:0, qwen.qwen3-32b-v1:0")
parser.add_argument("--max-cases", type=int,
help="Maximum cases to process per file")
parser.add_argument("--start-case", type=int, default=2,
help="Starting case number (default: 2)")
parser.add_argument("--converted-prompts-dir", default="converted_prompts",
help="Directory containing converted prompt CSV files")
parser.add_argument("--output-dir-name",
help="Custom output directory name under benchmark_results/ (overrides default model name)")
parser.add_argument("--list", action="store_true",
help="List available datasets, tasks, and formats")
args = parser.parse_args()
# Validate mutually exclusive arguments
if args.variants and args.self_aug:
parser.error("--variants and --self_aug are mutually exclusive. Use one or the other.")
# Infer --model from specific model arguments if not explicitly set
if not args.model:
# Check which model-specific arguments were explicitly provided
model_args_provided = []
# Check if openai-model was explicitly provided (not just the default)
if '--openai-model' in sys.argv:
model_args_provided.append('openai')
# Check if google-model was explicitly provided
if '--google-model' in sys.argv:
model_args_provided.append('google')
# Check if bedrock-model was explicitly provided
if '--bedrock-model' in sys.argv:
model_args_provided.append('bedrock')
# If exactly one model was specified, use that
if len(model_args_provided) == 1:
args.model = model_args_provided[0]
logger.info(f"Inferred --model={args.model} from command-line arguments")
elif len(model_args_provided) > 1:
logger.warning(f"Multiple model providers specified: {model_args_provided}. Will use all available models.")
# If no specific model was provided, default behavior is to use all available
# Define REQUEST messages for self_aug types
SELF_AUG_REQUESTS = {
"format_explaination": "Generate short format specification and description of the survey within five sentences.",
"critical_values": "Identify critical values and ranges of the survey related within five sentences.",
"structural_info": "Describe structural information, patterns and statistics of the survey within five sentences."
}
# Print configuration settings
if not args.list:
print("="*60)
print("BENCHMARK CONFIGURATION")
print("="*60)
print(f"Dataset: {args.dataset or 'ALL'}")
print(f"Task: {args.task or 'ALL'}")
print(f"Format: {args.format or 'ALL'}")
if args.self_aug:
print(f"Self-Augmentation: {args.self_aug}")
else:
print(f"Variants: {args.variants or 'STANDARD'}")
# Only print the model we're actually using
if args.model == "openai":
print(f"Model: {args.openai_model}")
elif args.model == "google":
print(f"Model: {args.google_model}")
elif args.model == "bedrock":
print(f"Model: {args.bedrock_model}")
else:
print(f"OpenAI Model: {args.openai_model}")
print(f"Google Model: {args.google_model}")
print(f"Bedrock Model: {args.bedrock_model}")
print(f"Max Cases per File: {args.max_cases or 'UNLIMITED'}")
print(f"Starting Case: {args.start_case}")
if args.self_aug:
print(f"Prompts Directory: converted_prompts_self_aug/{args.self_aug}")
elif args.variants:
print(f"Prompts Directory: converted_prompts_variants/{args.variants}")
else:
print(f"Prompts Directory: {args.converted_prompts_dir}")
if args.output_dir_name:
print(f"Output Directory: benchmark_results/{args.output_dir_name}")
print("="*60)
print()
# Handle --variants all case
if args.variants == "all":
available_variants = ["wo_role_prompting", "wo_partition_mark", "wo_format_explaination",
"wo_oneshot", "wo_change_order"]
print(f"\n{'='*60}")
print(f"RUNNING ALL VARIANTS: {len(available_variants)} variants + standard")
print(f"{'='*60}")
all_results = []
# Run standard version first (no variants)
print(f"\n[1/{len(available_variants)+1}] Running STANDARD (no variants)")
try:
pipeline = SimpleBenchmarkPipeline(
args.converted_prompts_dir,
init_clients=True,
openai_model=args.openai_model,
google_model=args.google_model,
bedrock_model=args.bedrock_model,
variants=None,
output_dir_name=args.output_dir_name
)
success = pipeline.run_benchmark(
dataset=args.dataset,
task=args.task,
format_type=args.format,
model=args.model,
max_cases=args.max_cases,
start_case=args.start_case
)
all_results.append(("STANDARD", success))
print(f"STANDARD: {'SUCCESS' if success else 'FAILED'}")
except Exception as e:
logger.error(f"Failed to run standard version: {e}")
all_results.append(("STANDARD", False))
# Run each variant
for i, variant in enumerate(available_variants, 2):
print(f"\n[{i}/{len(available_variants)+1}] Running variant: {variant}")
try:
prompts_dir = f"converted_prompts_variants/{variant}"
pipeline = SimpleBenchmarkPipeline(
prompts_dir,
init_clients=True,
openai_model=args.openai_model,
google_model=args.google_model,
bedrock_model=args.bedrock_model,
variants=variant,
output_dir_name=args.output_dir_name
)
success = pipeline.run_benchmark(
dataset=args.dataset,
task=args.task,
format_type=args.format,
model=args.model,
max_cases=args.max_cases,
start_case=args.start_case
)
all_results.append((variant, success))
print(f"{variant}: {'SUCCESS' if success else 'FAILED'}")
except Exception as e:
logger.error(f"Failed to run variant {variant}: {e}")
all_results.append((variant, False))
# Print final summary for all variants
print(f"\n{'='*80}")
print("ALL VARIANTS BENCHMARK COMPLETED")
print(f"{'='*80}")
success_count = sum(1 for _, success in all_results if success)
total_count = len(all_results)
print(f"Overall Status: {success_count}/{total_count} variants completed successfully")
print(f"Results:")
for variant_name, success in all_results:
status = "SUCCESS" if success else "FAILED"
print(f" - {variant_name:<25}: {status}")
print(f"Results Saved To: benchmark_results/ (check individual model directories)")
print(f"{'='*80}")
return 0 if success_count == total_count else 1
# Initialize pipeline for single variant or standard
try:
# Determine the prompts directory based on variants or self_aug argument
if args.self_aug:
prompts_dir = f"converted_prompts_self_aug/{args.self_aug}"
elif args.variants:
prompts_dir = f"converted_prompts_variants/{args.variants}"
else:
prompts_dir = args.converted_prompts_dir
# Don't initialize clients if just listing
pipeline = SimpleBenchmarkPipeline(
prompts_dir,
init_clients=not args.list,
openai_model=args.openai_model,
google_model=args.google_model,
bedrock_model=args.bedrock_model,
variants=args.variants,
output_dir_name=args.output_dir_name
)
except Exception as e:
logger.error(f"Failed to initialize pipeline: {e}")
return 1
# List available options
if args.list:
if args.self_aug:
print(f"Available options in converted_prompts_self_aug/{args.self_aug} directory:")
elif args.variants:
print(f"Available options in converted_prompts_variants/{args.variants} directory:")
else:
print("Available options in converted_prompts directory:")
csv_files = pipeline.find_csv_files()
datasets = set()
tasks = set()
formats = set()
for csv_file in csv_files:
parts = csv_file.relative_to(pipeline.converted_prompts_dir).parts
if len(parts) >= 2:
datasets.add(parts[0])
tasks.add(parts[1])
# Extract format from filename (e.g., answer_lookup_json_converted_prompts.csv)
stem = csv_file.stem
if '_' in stem:
parts = stem.split('_')
# Look for format in the parts
for part in parts:
if part in ['json', 'xml', 'html', 'md', 'txt', 'ttl']:
formats.add(part)
break
print(f"Datasets: {sorted(datasets)}")
print(f"Tasks: {sorted(tasks)}")
print(f"Formats: {sorted(formats)}")
available_models = ["openai", "google", "bedrock"] # Hardcoded since we didn't init clients
print(f"Models: {available_models}")
return 0
# Run benchmark
success = pipeline.run_benchmark(
dataset=args.dataset,
task=args.task,
format_type=args.format,
model=args.model,
max_cases=args.max_cases,
start_case=args.start_case,
self_aug_type=args.self_aug
)
# Print final summary
print("\n" + "="*60)
print("BENCHMARK COMPLETED")
print("="*60)
print(f"Status: {'SUCCESS' if success else 'FAILED'}")
print(f"Models Used: {getattr(pipeline, 'models_used', list(pipeline.clients.keys()))}")
if args.variants:
model_dirs = [f"{model}_{args.variants}" for model in getattr(pipeline, 'models_used', list(pipeline.clients.keys()))]
print(f"Results Saved To: benchmark_results/ (directories: {', '.join(model_dirs)})")
else:
print(f"Results Saved To: benchmark_results/")
print("="*60)
return 0 if success else 1