Skip to content
Closed
Changes from 2 commits
Commits
Show all changes
40 commits
Select commit Hold shift + click to select a range
c335f6e
train with only layer distillation losses
oleksost Dec 16, 2025
e06a4b2
unscaled loss llogging + training with distillation loss factor = 0
oleksost Dec 16, 2025
179ae25
make logging more explicit
oleksost Dec 17, 2025
af456f0
Merge remote-tracking branch 'origin/main' into train_only_layer_losses
oleksost Dec 17, 2025
9968aac
clean + tests
oleksost Dec 17, 2025
945c5a7
nvm
oleksost Dec 17, 2025
4b6e3d7
forward KL
oleksost Dec 19, 2025
c5fefa0
test forward kl
oleksost Dec 19, 2025
4119596
wip: report unscaled + kl loss
oleksost Dec 19, 2025
b55a0a4
loss config
oleksost Dec 22, 2025
097baeb
wip
oleksost Dec 22, 2025
d773d98
tests
oleksost Dec 22, 2025
35400c1
Merge remote-tracking branch 'origin/main' into train_only_layer_losses
oleksost Dec 22, 2025
282925c
test
oleksost Dec 22, 2025
0f73ea2
tests
oleksost Dec 22, 2025
04a0193
Merge branch 'main' into train_only_layer_losses
oleksost Dec 22, 2025
fa85c41
wip
oleksost Dec 22, 2025
feb416e
Merge branch 'train_only_layer_losses' of https://github.com/ServiceN…
oleksost Dec 22, 2025
31cfb84
wip
oleksost Dec 23, 2025
24fe67b
no grad if factor 0
oleksost Dec 23, 2025
00f6118
Merge remote-tracking branch 'origin/main' into train_only_layer_losses
oleksost Dec 23, 2025
0cadf98
Merge branch 'main' into train_only_layer_losses
oleksost Dec 23, 2025
0e562e9
addressed comments
oleksost Dec 23, 2025
2a474e2
Merge branch 'train_only_layer_losses' of https://github.com/ServiceN…
oleksost Dec 23, 2025
52c1c11
addressed comments
oleksost Dec 23, 2025
406d0a2
Removed Targets class
oleksost Dec 30, 2025
f25380a
fixes
oleksost Dec 30, 2025
8adb7dd
imports
oleksost Dec 30, 2025
1ce641d
polish naming
oleksost Jan 6, 2026
95f14af
addresseing comments
oleksost Jan 8, 2026
5ad4c0c
explicit z_loss grads
oleksost Jan 8, 2026
0a66e14
removed z_loss as aux loss
oleksost Jan 8, 2026
f8f7041
move loss configs to the lm config
oleksost Jan 8, 2026
ab9c917
tests
oleksost Jan 8, 2026
89470dc
Merge branch 'main' into train_only_layer_losses
oleksost Jan 9, 2026
6e54c93
comments
oleksost Jan 12, 2026
8137b8c
Merge remote-tracking branch 'origin/main' into train_only_layer_losses
jlamypoirier Jan 13, 2026
3c8f3c2
misc
jlamypoirier Jan 13, 2026
705c482
fix
jlamypoirier Jan 13, 2026
3c8ce50
Merge branch 'main' into train_only_layer_losses
jlamypoirier Jan 16, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 41 additions & 6 deletions fast_llm/layers/language_model/head.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,11 +370,13 @@ def _logits_cross_entropy_forward_backward(
logits_scale_factor=self._config.logits_scale_factor,
target_format=TargetFormat.labels,
)
if self.training and losses is not None:
losses[self._ce_loss_name_unscaled].append(lm_loss.detach())
lm_loss = lm_loss * self._config.language_model_loss_factor
else:
lm_loss, lm_grad = None, None

if distillation_target is not None and self._config.distillation_loss_factor > 0.0:
if distillation_target is not None:
Comment thread
oleksost marked this conversation as resolved.
Outdated
if self._config.distillation_loss_implementation == DistillationLossImpl.reverse_kl:
distillation_loss, distillation_grad = reverse_kl_forward_backward(
logits.flatten(0, -2),
Expand Down Expand Up @@ -405,18 +407,19 @@ def _logits_cross_entropy_forward_backward(
raise ValueError(
f"Invalid distillation loss implementation: {self._config.distillation_loss_implementation}"
)
if self.training and losses is not None: # we keep track of unscaled losses for model comparison purposes
losses[self._distillation_loss_name_unscaled].append(distillation_loss.detach())
Comment thread
oleksost marked this conversation as resolved.
Outdated
distillation_loss = distillation_loss * self._config.distillation_loss_factor
else:
distillation_loss, distillation_grad = None, None

# TODO: de-allocate earlier.
del logits

# TODO: Accumulate grads in-place to reduce memory and compute overhead.
grad = _add_tensors(dpo_grad, lm_grad, distillation_grad)

# TODO: Return individual losses?
loss = _add_tensors(dpo_loss, lm_loss, distillation_loss)

# TODO: de-allocate earlier.
del logits

if self.training and losses is not None:
if dpo_loss is not None:
losses[self._dpo_loss_name].append(dpo_loss.detach())
Expand All @@ -434,6 +437,13 @@ def _loss_name(self) -> str:
name = f"{name}_{self._prediction_distance}"
return name

@functools.cached_property
def _ce_loss_name_unscaled(self) -> str:
name = "language_model_loss_unscaled"
if self._prediction_distance > 0:
name = f"{name}_{self._prediction_distance}"
return name

@functools.cached_property
def _z_loss_name(self) -> str:
name = "z_loss"
Expand Down Expand Up @@ -462,8 +472,24 @@ def _distillation_loss_name(self) -> str:
name = f"{name}_{self._prediction_distance}"
return name

@functools.cached_property
def _distillation_loss_name_unscaled(self) -> str:
name = "distillation_loss_unscaled"
if self._prediction_distance > 0:
name = f"{name}_{self._prediction_distance}"
return name

def get_loss_definitions(self, count: int = 1) -> list[LossDef]:
loss_defs = [LossDef(name=self._loss_name, formatted_name=_format_name(self._loss_name), count=count)]
if self._config.distillation_model is None or self._config.language_model_loss_factor > 0.0:
# unscaled CE loss (NTP)
loss_defs = [
LossDef(
name=self._ce_loss_name_unscaled,
formatted_name=_format_name(self._ce_loss_name_unscaled),
count=count,
)
]
if self._config.logit_z_loss:
loss_defs.append(
LossDef(name=self._z_loss_name, formatted_name=_format_name(self._z_loss_name), count=count)
Expand All @@ -481,6 +507,15 @@ def get_loss_definitions(self, count: int = 1) -> list[LossDef]:
count=count,
)
)
# unscaled distillation loss for comparison purposes
loss_defs.append(
LossDef(
name=self._distillation_loss_name_unscaled,
formatted_name=_format_name(self._distillation_loss_name_unscaled),
count=count,
)
)
# if we mix distillation loss and CE loss for NTP, we want to log both
if self._config.language_model_loss_factor > 0.0:
loss_defs.append(
LossDef(
Expand Down