This repository was archived by the owner on Jan 26, 2026. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 15
Expand file tree
/
Copy pathmodeling_gpt2_with_pcw.py
More file actions
65 lines (54 loc) · 2.88 KB
/
modeling_gpt2_with_pcw.py
File metadata and controls
65 lines (54 loc) · 2.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from abc import ABC
from typing import Tuple, Optional, Dict
import torch
from transformers import GPT2LMHeadModel
from transformers.configuration_utils import PretrainedConfig
from pcw_wrapper import generate_pcw_position_ids
class GPT2LMHeadPCW(GPT2LMHeadModel, ABC):
def __init__(self, config: PretrainedConfig):
super().__init__(config)
self._adapt_weights()
def _adapt_weights(self):
# We need to override the regular loading of wpe weight since we are adding support to longer contexts.
self.transformer.wpe = GPT2LMHeadModel.from_pretrained(self.config.name_or_path).transformer.wpe
def prepare_inputs_for_generation(self,
input_ids: torch.LongTensor,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
windows_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
max_window_size: Optional[int] = None,
sum_windows_size: Optional[int] = None,
**kwargs
) -> Dict:
"""input_ids:
ids of task_tokens.
attention_mask:
concatenation of windows + task tokens attentions masks.
Note (past_key_values vs windows_key_values):
In the first token generation, past_key_values is None while windows_key_values contains the combined past
key values of context windows. During following generations, past_key_values is the concatenation of
windows_key_values + previous generations. Thus, windows_key_values is practically ignored.
"""
token_type_ids = kwargs.get("token_type_ids")
# only last token for inputs_ids if past_key_values is defined in kwargs
if past_key_values:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask")
position_ids = kwargs.get("position_ids")
if attention_mask is not None and position_ids is None:
# create PCW's position_ids on the fly
position_ids = generate_pcw_position_ids(attention_mask, max_window_size, past_key_values,
sum_windows_size, windows_key_values)
else:
position_ids = None
if windows_key_values and not past_key_values:
past_key_values = windows_key_values
return {
"input_ids": input_ids,
"past_key_values": past_key_values,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}