-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapply_vocab_filter.py
More file actions
108 lines (79 loc) · 3.77 KB
/
apply_vocab_filter.py
File metadata and controls
108 lines (79 loc) · 3.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import torch
import pandas as pd
import gc
from transformers import AutoModelForCausalLM, AutoTokenizer
# --- Configuration ---
MODEL_ID = "swiss-ai/Apertus-8B-Instruct-2509"
TOKEN_FILE = "apertus_bnc_token_frequencies.csv"
DEVICE = "cuda:0"
def apply_english_vocab():
print(f"1. Loading English Token List from {TOKEN_FILE}...")
df = pd.read_csv(TOKEN_FILE)
# Extract the IDs
english_indices = set(df['token_id'].unique().tolist())
print(f"2. Loading Tokenizer to check special tokens...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
# SAFETY CHECK: Ensure EOS token is in the list!
# If we delete the period or the "Stop" button, the model will generate forever.
if tokenizer.eos_token_id not in english_indices:
print(f" WARNING: EOS token ({tokenizer.eos_token_id}) was missing! Adding it.")
english_indices.add(tokenizer.eos_token_id)
# Sort them (Crucial for consistent mapping)
sorted_indices = sorted(list(english_indices))
new_vocab_size = len(sorted_indices)
# Create the Translator (New Index -> Old ID)
index_map = torch.tensor(sorted_indices, device=DEVICE)
print(f" Final Reduced Vocab Size: {new_vocab_size}")
print(f"3. Loading Model: {MODEL_ID}...")
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
dtype=torch.bfloat16,
device_map=DEVICE,
trust_remote_code=True
)
print("4. Performing Brain Surgery (Slicing lm_head)...")
original_head = model.lm_head
hidden_size = original_head.in_features
# A. Get full weights
full_weights = original_head.weight.data
# B. Slice only the English rows
# This physically copies the weights we want into a new tensor
reduced_weights = full_weights[sorted_indices, :]
# C. Delete old head
del model.lm_head
gc.collect()
torch.cuda.empty_cache()
# D. Create new lightweight head
model.lm_head = torch.nn.Linear(hidden_size, new_vocab_size, bias=False, device=DEVICE, dtype=torch.bfloat16)
model.lm_head.weight.data = reduced_weights
# E. Update config
model.config.vocab_size = new_vocab_size
# --- Verification: Generate Text ---
print("-" * 60)
print("5. Verifying Generation (Custom Loop)")
input_text = "The future of AI is"
inputs = tokenizer(input_text, return_tensors="pt").to(DEVICE)
# Standard model.generate() won't work easily because of the ID mismatch.
# We write a simple loop to prove it works.
curr_ids = inputs.input_ids
print(f"Prompt: '{input_text}'")
for _ in range(10): # Generate 10 tokens
with torch.no_grad():
outputs = model(curr_ids)
next_token_logits = outputs.logits[0, -1, :]
# 1. Greedy pick from REDUCED vocab (0 -> 62161)
new_id = torch.argmax(next_token_logits).item()
# 2. Translate to ORIGINAL vocab (0 -> 152064)
original_id = index_map[new_id].item()
# 3. Print
word = tokenizer.decode([original_id])
print(f"Gen: {word} (Map: {new_id} -> {original_id})")
# 4. Append ORIGINAL ID back to input (Tokenizer expects original IDs)
# Note: The model *inputs* are still original IDs. Only the *output* layer is sliced.
# (The embedding layer at the start handles the full range usually, or we slice that too if strictly needed)
curr_ids = torch.cat([curr_ids, torch.tensor([[original_id]], device=DEVICE)], dim=1)
if original_id == tokenizer.eos_token_id:
break
print("\nFull Sequence:", tokenizer.decode(curr_ids[0]))
if __name__ == "__main__":
apply_english_vocab()