|
| 1 | +"""Granite architecture adapter. |
| 2 | +
|
| 3 | +Base adapter for the IBM Granite model family. Provides shared config setup and |
| 4 | +helper methods used by GraniteMoe and GraniteMoeHybrid variants. |
| 5 | +""" |
| 6 | + |
| 7 | +from typing import Any, Dict |
| 8 | + |
| 9 | +from transformer_lens.conversion_utils.conversion_steps import RearrangeTensorConversion |
| 10 | +from transformer_lens.conversion_utils.param_processing_conversion import ( |
| 11 | + ParamProcessingConversion, |
| 12 | +) |
| 13 | +from transformer_lens.model_bridge.architecture_adapter import ArchitectureAdapter |
| 14 | +from transformer_lens.model_bridge.generalized_components import ( |
| 15 | + BlockBridge, |
| 16 | + EmbeddingBridge, |
| 17 | + GatedMLPBridge, |
| 18 | + LinearBridge, |
| 19 | + PositionEmbeddingsAttentionBridge, |
| 20 | + RMSNormalizationBridge, |
| 21 | + RotaryEmbeddingBridge, |
| 22 | + UnembeddingBridge, |
| 23 | +) |
| 24 | + |
| 25 | + |
| 26 | +class GraniteArchitectureAdapter(ArchitectureAdapter): |
| 27 | + """Architecture adapter for IBM Granite models (dense). |
| 28 | +
|
| 29 | + Granite is a Llama-like architecture with RMSNorm, rotary position embeddings |
| 30 | + (RoPE), GQA, and a gated MLP (SiLU activation). Granite-specific scaling |
| 31 | + multipliers are handled by the HF model's native forward pass. |
| 32 | +
|
| 33 | + Optional Parameters (may not exist in state_dict): |
| 34 | + ------------------------------------------------- |
| 35 | + Granite models do NOT have biases on attention and MLP projections: |
| 36 | +
|
| 37 | + - blocks.{i}.attn.b_Q/b_K/b_V/b_O - No bias on attention projections |
| 38 | + - blocks.{i}.mlp.b_in/b_gate/b_out - No bias on MLP projections |
| 39 | + - blocks.{i}.ln1.b, blocks.{i}.ln2.b, ln_final.b - RMSNorm has no bias |
| 40 | + """ |
| 41 | + |
| 42 | + def __init__(self, cfg: Any) -> None: |
| 43 | + """Initialize the Granite architecture adapter.""" |
| 44 | + super().__init__(cfg) |
| 45 | + |
| 46 | + self._setup_common_config(cfg) |
| 47 | + n_kv_heads = self._get_n_kv_heads() |
| 48 | + self.weight_processing_conversions = self._build_attn_weight_conversions(n_kv_heads) |
| 49 | + self.component_mapping = self._build_component_mapping() |
| 50 | + |
| 51 | + def _setup_common_config(self, cfg: Any) -> None: |
| 52 | + """Set up config variables shared across all Granite variants.""" |
| 53 | + self.cfg.normalization_type = "RMS" |
| 54 | + self.cfg.positional_embedding_type = "rotary" |
| 55 | + self.cfg.final_rms = True |
| 56 | + self.cfg.gated_mlp = True |
| 57 | + self.cfg.attn_only = False |
| 58 | + self.cfg.uses_rms_norm = True |
| 59 | + self.cfg.eps_attr = "variance_epsilon" |
| 60 | + |
| 61 | + self.default_config = { |
| 62 | + "d_model": cfg.d_model, |
| 63 | + "d_head": cfg.d_model // cfg.n_heads, |
| 64 | + "n_heads": cfg.n_heads, |
| 65 | + "n_layers": cfg.n_layers, |
| 66 | + "d_vocab": cfg.d_vocab, |
| 67 | + } |
| 68 | + |
| 69 | + if hasattr(cfg, "n_key_value_heads") and cfg.n_key_value_heads is not None: |
| 70 | + self.default_config["n_key_value_heads"] = cfg.n_key_value_heads |
| 71 | + self.cfg.n_key_value_heads = cfg.n_key_value_heads |
| 72 | + |
| 73 | + def _get_n_kv_heads(self) -> int: |
| 74 | + """Get the number of key-value heads (for GQA or MHA).""" |
| 75 | + if hasattr(self.cfg, "n_key_value_heads") and self.cfg.n_key_value_heads is not None: |
| 76 | + return self.cfg.n_key_value_heads |
| 77 | + return self.cfg.n_heads |
| 78 | + |
| 79 | + def _build_attn_weight_conversions( |
| 80 | + self, n_kv_heads: int |
| 81 | + ) -> Dict[str, ParamProcessingConversion | str]: |
| 82 | + """Build weight processing conversions for attention projections.""" |
| 83 | + return { |
| 84 | + "blocks.{i}.attn.q.weight": ParamProcessingConversion( |
| 85 | + tensor_conversion=RearrangeTensorConversion("(n h) m -> n m h", n=self.cfg.n_heads), |
| 86 | + ), |
| 87 | + "blocks.{i}.attn.k.weight": ParamProcessingConversion( |
| 88 | + tensor_conversion=RearrangeTensorConversion("(n h) m -> n m h", n=n_kv_heads), |
| 89 | + ), |
| 90 | + "blocks.{i}.attn.v.weight": ParamProcessingConversion( |
| 91 | + tensor_conversion=RearrangeTensorConversion("(n h) m -> n m h", n=n_kv_heads), |
| 92 | + ), |
| 93 | + "blocks.{i}.attn.o.weight": ParamProcessingConversion( |
| 94 | + tensor_conversion=RearrangeTensorConversion("m (n h) -> n h m", n=self.cfg.n_heads), |
| 95 | + ), |
| 96 | + } |
| 97 | + |
| 98 | + def _build_attention_bridge(self) -> PositionEmbeddingsAttentionBridge: |
| 99 | + """Build the standard Granite attention bridge.""" |
| 100 | + return PositionEmbeddingsAttentionBridge( |
| 101 | + name="self_attn", |
| 102 | + config=self.cfg, |
| 103 | + submodules={ |
| 104 | + "q": LinearBridge(name="q_proj"), |
| 105 | + "k": LinearBridge(name="k_proj"), |
| 106 | + "v": LinearBridge(name="v_proj"), |
| 107 | + "o": LinearBridge(name="o_proj"), |
| 108 | + }, |
| 109 | + requires_attention_mask=True, |
| 110 | + requires_position_embeddings=True, |
| 111 | + ) |
| 112 | + |
| 113 | + def _build_mlp_bridge(self) -> GatedMLPBridge: |
| 114 | + """Build the dense gated MLP bridge.""" |
| 115 | + return GatedMLPBridge( |
| 116 | + name="mlp", |
| 117 | + config=self.cfg, |
| 118 | + submodules={ |
| 119 | + "gate": LinearBridge(name="gate_proj"), |
| 120 | + "in": LinearBridge(name="up_proj"), |
| 121 | + "out": LinearBridge(name="down_proj"), |
| 122 | + }, |
| 123 | + ) |
| 124 | + |
| 125 | + def _build_component_mapping(self) -> dict: |
| 126 | + """Build the full component mapping for dense Granite.""" |
| 127 | + return { |
| 128 | + "embed": EmbeddingBridge(name="model.embed_tokens"), |
| 129 | + "rotary_emb": RotaryEmbeddingBridge(name="model.rotary_emb"), |
| 130 | + "blocks": BlockBridge( |
| 131 | + name="model.layers", |
| 132 | + submodules={ |
| 133 | + "ln1": RMSNormalizationBridge(name="input_layernorm", config=self.cfg), |
| 134 | + "ln2": RMSNormalizationBridge(name="post_attention_layernorm", config=self.cfg), |
| 135 | + "attn": self._build_attention_bridge(), |
| 136 | + "mlp": self._build_mlp_bridge(), |
| 137 | + }, |
| 138 | + ), |
| 139 | + "ln_final": RMSNormalizationBridge(name="model.norm", config=self.cfg), |
| 140 | + "unembed": UnembeddingBridge(name="lm_head", config=self.cfg), |
| 141 | + } |
| 142 | + |
| 143 | + def setup_component_testing(self, hf_model: Any, bridge_model: Any = None) -> None: |
| 144 | + """Set up rotary embedding references for Granite component testing. |
| 145 | +
|
| 146 | + Args: |
| 147 | + hf_model: The HuggingFace Granite model instance |
| 148 | + bridge_model: The TransformerBridge model (if available) |
| 149 | + """ |
| 150 | + if not hasattr(hf_model.model, "rotary_emb"): |
| 151 | + return |
| 152 | + |
| 153 | + rotary_emb = hf_model.model.rotary_emb |
| 154 | + |
| 155 | + if bridge_model is not None and hasattr(bridge_model, "blocks"): |
| 156 | + for block in bridge_model.blocks: |
| 157 | + if hasattr(block, "attn"): |
| 158 | + block.attn.set_rotary_emb(rotary_emb) |
| 159 | + |
| 160 | + try: |
| 161 | + attn_bridge = self.get_generalized_component("blocks.0.attn") |
| 162 | + attn_bridge.set_rotary_emb(rotary_emb) |
| 163 | + except (AttributeError, KeyError): |
| 164 | + pass |
0 commit comments