Skip to content

Commit fc10040

Browse files
authored
Merge pull request #1064 from Parallel-in-Time/bibtex-bibbot-1063-18eba15
pint.bib updates
2 parents 18eba15 + 0e1af52 commit fc10040

1 file changed

Lines changed: 15 additions & 0 deletions

File tree

_bibliography/pint.bib

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8608,6 +8608,21 @@ @article{HeEtAl2026
86088608
year = {2026},
86098609
}
86108610

8611+
@article{HonEtAl2026,
8612+
author = {Hon, Sean Y. and Fung, Po Yin and Lin, Xue-lei},
8613+
doi = {10.1137/24m1702933},
8614+
issn = {1095-7162},
8615+
journal = {SIAM Journal on Matrix Analysis and Applications},
8616+
month = {February},
8617+
number = {1},
8618+
pages = {214–243},
8619+
publisher = {Society for Industrial & Applied Mathematics (SIAM)},
8620+
title = {An Optimal Diagonalization-Based Preconditioner for Parabolic Optimal Control Problems},
8621+
url = {http://dx.doi.org/10.1137/24M1702933},
8622+
volume = {47},
8623+
year = {2026},
8624+
}
8625+
86118626
@unpublished{JiangEtAl2026,
86128627
abstract = {We present a new training methodology for transformers using a multilevel, layer-parallel approach. Through a neural ODE formulation of transformers, our application of a multilevel parallel-in-time algorithm for the forward and backpropagation phases of training achieves parallel acceleration over the layer dimension. This dramatically enhances parallel scalability as the network depth increases, which is particularly useful for increasingly large foundational models. However, achieving this introduces errors that cause systematic bias in the gradients, which in turn reduces convergence when closer to the minima. We develop an algorithm to detect this critical transition and either switch to serial training or systematically increase the accuracy of layer-parallel training. Results, including BERT, GPT2, ViT, and machine translation architectures, demonstrate parallel-acceleration as well as accuracy commensurate with serial pre-training while fine-tuning is unaffected.},
86138628
author = {Shuai Jiang and Marc Salvado and Eric C. Cyr and Alena Kopaničáková and Rolf Krause and Jacob B. Schroder},

0 commit comments

Comments
 (0)