From dadc2752cd9d7007f5d6408dfb2f8c5cab40958f Mon Sep 17 00:00:00 2001 From: aireenmei Date: Thu, 5 Feb 2026 05:13:02 +0000 Subject: [PATCH] fix ungrouped-imports --- src/MaxText/layers/attention_op.py | 8 ++++---- src/MaxText/layers/moe.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/MaxText/layers/attention_op.py b/src/MaxText/layers/attention_op.py index 75a868a44..ac967295a 100644 --- a/src/MaxText/layers/attention_op.py +++ b/src/MaxText/layers/attention_op.py @@ -68,15 +68,15 @@ Q_LENGTH, Q_LENGTH_NO_EXP, ) +from MaxText.layers import nnx_wrappers +from MaxText.layers.initializers import variable_to_logically_partitioned +from MaxText.layers.quantizations import AqtQuantization as Quant +from MaxText.sharding import logical_to_mesh_axes, maybe_shard_with_name from maxtext.inference import page_manager from maxtext.inference.kvcache import KVQuant, KVTensor from maxtext.kernels.attention import jax_flash_attention from maxtext.kernels.attention.ragged_attention import ragged_gqa from maxtext.kernels.attention.ragged_attention import ragged_mha -from MaxText.layers import nnx_wrappers -from MaxText.layers.initializers import variable_to_logically_partitioned -from MaxText.layers.quantizations import AqtQuantization as Quant -from MaxText.sharding import logical_to_mesh_axes, maybe_shard_with_name from maxtext.utils import max_utils import numpy as np from tokamax._src.ops.experimental.tpu.splash_attention import splash_attention_kernel as tokamax_splash_kernel diff --git a/src/MaxText/layers/moe.py b/src/MaxText/layers/moe.py index da31e841e..9c31e407d 100644 --- a/src/MaxText/layers/moe.py +++ b/src/MaxText/layers/moe.py @@ -32,10 +32,10 @@ from MaxText import common_types as ctypes from MaxText.common_types import ShardMode from MaxText.sharding import maybe_shard_with_logical, create_sharding -from maxtext.kernels import megablox as mblx from MaxText.sharding import logical_to_mesh_axes from MaxText.layers import attentions, linears, nnx_wrappers, quantizations from MaxText.layers.initializers import NdInitializer, default_bias_init, nd_dense_init, variable_to_logically_partitioned +from maxtext.kernels import megablox as mblx from maxtext.utils import max_logging from maxtext.utils import max_utils import numpy as np