We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
2 parents ecf9ca1 + ee7a536 commit 88e802cCopy full SHA for 88e802c
1 file changed
bitsandbytes/nn/modules.py
@@ -222,6 +222,7 @@ def __new__(
222
quant_storage: torch.dtype = torch.uint8,
223
module: Optional["Linear4bit"] = None,
224
bnb_quantized: bool = False,
225
+ **kwargs,
226
) -> "Params4bit":
227
if data is None:
228
data = torch.empty(0)
@@ -680,6 +681,7 @@ def __new__(
680
681
has_fp16_weights=False,
682
CB: Optional[torch.Tensor] = None,
683
SCB: Optional[torch.Tensor] = None,
684
685
):
686
687
0 commit comments