-
Notifications
You must be signed in to change notification settings - Fork 58
Expand file tree
/
Copy pathconfiguration_bailing_moe_v2.py
More file actions
87 lines (85 loc) · 3.33 KB
/
configuration_bailing_moe_v2.py
File metadata and controls
87 lines (85 loc) · 3.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
"""Bailing MoE model configuration"""
from transformers.configuration_utils import PretrainedConfig
class BailingMoeV2Config(PretrainedConfig):
model_type = "bailing_moe_v2"
def __init__(
self,
vocab_size=30592,
hidden_size=1024,
intermediate_size=None,
num_hidden_layers=24,
num_attention_heads=16,
num_key_value_heads=0,
hidden_act="silu",
use_qkv_bias=False, # bailing only
use_qk_norm=False,
use_bias=True, # bailing only
rms_norm_eps=1e-05,
norm_head=False, # bailing only
tie_word_embeddings=False, # PretrainedConfig key, here change default value.
embedding_dropout=0.1,
attention_dropout=0.1,
output_dropout=0.1,
initializer_range=0.02,
max_position_embeddings=16384,
rope_theta=10000.0,
use_cache=True,
use_sliding_window=False,
sliding_window=81920,
max_window_layers=28,
rope_scaling=None,
pad_token_id=126081,
num_experts=16,
num_shared_experts=0,
num_experts_per_tok=2,
n_group=8,
topk_group=4,
routed_scaling_factor=2.5,
moe_intermediate_size=None,
first_k_dense_replace=0,
head_dim=None,
output_router_logits=False,
partial_rotary_factor=0.5,
router_type="topN",
_attn_implementation="flash_attention_2",
use_interleaved_frame_timestamp=True,
**kwargs,
):
self.num_hidden_layers = num_hidden_layers
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.use_qkv_bias = use_qkv_bias
self.use_bias = use_bias
self.norm_head = norm_head
self.rms_norm_eps = rms_norm_eps
self.embedding_dropout = embedding_dropout
self.attention_dropout = attention_dropout
self.output_dropout = output_dropout
self.initializer_range = initializer_range
self.max_position_embeddings = max_position_embeddings
self.rope_theta = rope_theta
self.use_cache = use_cache
self.use_sliding_window = use_sliding_window
self.sliding_window = sliding_window
self.max_window_layers = max_window_layers
self.head_dim = head_dim or self.hidden_size // self.num_attention_heads
self.rope_scaling = rope_scaling
# MoE configs
self.num_experts = num_experts
self.num_shared_experts = num_shared_experts
self.num_experts_per_tok = num_experts_per_tok
self.n_group = n_group
self.topk_group = topk_group
self.moe_intermediate_size = moe_intermediate_size
self.first_k_dense_replace = first_k_dense_replace
self.output_router_logits = output_router_logits
self.routed_scaling_factor = routed_scaling_factor
self.partial_rotary_factor = partial_rotary_factor
self.router_type = router_type
self.use_interleaved_frame_timestamp = use_interleaved_frame_timestamp
super().__init__(pad_token_id=pad_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs)
self._attn_implementation = _attn_implementation