Skip to content

Commit 3375aea

Browse files
committed
Fix export tutorial
1 parent a18358d commit 3375aea

4 files changed

Lines changed: 15 additions & 30 deletions

File tree

.jenkins/validate_tutorials_built.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@
2323
"intermediate_source/dqn_with_rnn_tutorial", #not working on 2.8 release reenable after 3514
2424
"intermediate_source/mnist_train_nas", # used by ax_multiobjective_nas_tutorial.py
2525
"intermediate_source/torch_compile_conv_bn_fuser",
26-
"intermediate_source/_torch_export_nightly_tutorial", # does not work on release
2726
"advanced_source/usb_semisup_learn", # fails with CUDA OOM error, should try on a different worker
2827
"unstable_source/gpu_direct_storage", # requires specific filesystem + GPUDirect Storage to be set up
2928
"recipes_source/recipes/tensorboard_with_pytorch",

conf.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,6 @@ def wrapper(*args, **kwargs):
181181
"# https://docs.pytorch.org/tutorials/beginner/colab\n"
182182
"%matplotlib inline"
183183
),
184-
"ignore_pattern": r"_torch_export_nightly_tutorial.py",
185184
"pypandoc": {
186185
"extra_args": ["--mathjax", "--toc"],
187186
"filters": [".jenkins/custom_pandoc_filter.py"],

intermediate_source/torch_export_nightly_tutorial.rst

Lines changed: 0 additions & 10 deletions
This file was deleted.

intermediate_source/torch_export_tutorial.py

Lines changed: 15 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
# :func:`torch.export` is the PyTorch 2.X way to export PyTorch models into
1717
# standardized model representations, intended
1818
# to be run on different (i.e. Python-less) environments. The official
19-
# documentation can be found `here <https://pytorch.org/docs/main/export.html>`__.
19+
# documentation can be found `here <https://docs.pytorch.org/docs/main/export.html>`__.
2020
#
2121
# In this tutorial, you will learn how to use :func:`torch.export` to extract
2222
# ``ExportedProgram``'s (i.e. single-graph representations) from PyTorch programs.
@@ -79,7 +79,7 @@ def forward(self, x, y):
7979
######################################################################
8080
# Let's review some attributes of ``ExportedProgram`` that are of interest.
8181
#
82-
# The ``graph`` attribute is an `FX graph <https://pytorch.org/docs/stable/fx.html#torch.fx.Graph>`__
82+
# The ``graph`` attribute is an `FX graph <https://docs.pytorch.org/docs/stable/fx.html#torch.fx.Graph>`__
8383
# traced from the function we exported, that is, the computation graph of all PyTorch operations.
8484
# The FX graph is in "ATen IR" meaning that it contains only "ATen-level" operations.
8585
#
@@ -92,7 +92,7 @@ def forward(self, x, y):
9292
print(exported_mod)
9393

9494
######################################################################
95-
# See the ``torch.export`` `documentation <https://pytorch.org/docs/main/export.html#torch.export.export>`__
95+
# See the ``torch.export`` `documentation <https://docs.pytorch.org/docs/main/export.html#torch.export.export>`__
9696
# for more details.
9797

9898
######################################################################
@@ -220,7 +220,7 @@ def false_fn(x):
220220
# - Branch functions cannot access closure variables, except for ``self`` if the function is
221221
# defined in the scope of a method.
222222
#
223-
# For more details about ``cond``, check out the `cond documentation <https://pytorch.org/docs/main/cond.html>`__.
223+
# For more details about ``cond``, check out the `cond documentation <https://docs.pytorch.org/docs/main/cond.html>`__.
224224

225225
######################################################################
226226
# We can also use ``map``, which applies a function across the first dimension
@@ -308,7 +308,7 @@ def forward(
308308
######################################################################
309309
# Before we look at the program that's produced, let's understand what specifying ``dynamic_shapes`` entails,
310310
# and how that interacts with export. For every input dimension where a ``Dim`` object is specified, a symbol is
311-
# `allocated <https://pytorch.org/docs/main/export.programming_model.html#basics-of-symbolic-shapes>`_,
311+
# `allocated <https://docs.pytorch.org/docs/main/user_guide/torch_compiler/export.programming_model.html#basics-of-symbolic-shapes>`_,
312312
# taking on a range of ``[2, inf]`` (why not ``[0, inf]`` or ``[1, inf]``? we'll explain later in the
313313
# 0/1 specialization section).
314314
#
@@ -605,7 +605,7 @@ def forward(self, x, y):
605605
# How are these values represented in the exported program? In the `Constraints/Dynamic Shapes <https://pytorch.org/tutorials/intermediate/torch_export_tutorial.html#constraints-dynamic-shapes>`_
606606
# section, we talked about allocating symbols to represent dynamic input dimensions.
607607
# The same happens here: we allocate symbols for every data-dependent value that appears in the program. The important distinction is that these are "unbacked" symbols,
608-
# in contrast to the "backed" symbols allocated for input dimensions. The `"backed/unbacked" <https://pytorch.org/docs/main/export.programming_model.html#basics-of-symbolic-shapes>`_
608+
# in contrast to the "backed" symbols allocated for input dimensions. The `"backed/unbacked" <https://docs.pytorch.org/docs/main/user_guide/torch_compiler/export.programming_model.html#basics-of-symbolic-shapes>`_
609609
# nomenclature refers to the presence/absence of a "hint" for the symbol: a concrete value backing the symbol, that can inform the compiler on how to proceed.
610610
#
611611
# In the input shape symbol case (backed symbols), these hints are simply the sample input shapes provided, which explains why control-flow branching is determined by the sample input properties.
@@ -637,7 +637,7 @@ def forward(self, x, y):
637637
# ^^^^^^^^^^^^^^^^^^^^^^
638638
#
639639
# But the case above is easy to export, because the concrete values of these symbols aren't used in any compiler decision-making; all that's relevant is that the return values are unbacked symbols.
640-
# The data-dependent errors highlighted in this section are cases like the following, where `data-dependent guards <https://pytorch.org/docs/main/export.programming_model.html#control-flow-static-vs-dynamic>`_ are encountered:
640+
# The data-dependent errors highlighted in this section are cases like the following, where `data-dependent guards <https://docs.pytorch.org/docs/main/user_guide/torch_compiler/export.programming_model.html#control-flow-static-vs-dynamic>`_ are encountered:
641641

642642
class Foo(torch.nn.Module):
643643
def forward(self, x, y):
@@ -779,15 +779,15 @@ def forward(self, x, y):
779779

780780
######################################################################
781781
# Data-dependent errors can be much more involved, and there are many more options in your toolkit to deal with them: ``torch._check_is_size()``, ``guard_size_oblivious()``, or real-tensor tracing, as starters.
782-
# For more in-depth guides, please refer to the `Export Programming Model <https://pytorch.org/docs/main/export.programming_model.html>`_,
782+
# For more in-depth guides, please refer to the `Export Programming Model <https://docs.pytorch.org/docs/main/user_guide/torch_compiler/export.programming_model.html>`_,
783783
# or `Dealing with GuardOnDataDependentSymNode errors <https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs>`_.
784784

785785
######################################################################
786786
# Custom Ops
787787
# ----------
788788
#
789789
# ``torch.export`` can export PyTorch programs with custom operators. Please
790-
# refer to `this page <https://pytorch.org/tutorials/advanced/custom_ops_landing_page.html>`__
790+
# refer to `this page <https://docs.pytorch.org/tutorials/advanced/custom_ops_landing_page.html>`__
791791
# on how to author a custom operator in either C++ or Python.
792792
#
793793
# The following is an example of registering a custom operator in python to be
@@ -843,10 +843,7 @@ def forward(self, x):
843843
print(torch.ops.aten.add_.Tensor._schema.is_mutable)
844844

845845
######################################################################
846-
# This generic IR can be used to train in eager PyTorch Autograd. This IR can be
847-
# more explicitly reached through the API ``torch.export.export_for_training``,
848-
# which was introduced in PyTorch 2.5, but calling ``torch.export.export``
849-
# should produce the same graph as of PyTorch 2.6.
846+
# This generic IR can be used to train in eager PyTorch Autograd.
850847

851848
class DecompExample(torch.nn.Module):
852849
def __init__(self) -> None:
@@ -859,7 +856,7 @@ def forward(self, x):
859856
x = self.bn(x)
860857
return (x,)
861858

862-
ep_for_training = torch.export.export_for_training(DecompExample(), (torch.randn(1, 1, 3, 3),))
859+
ep_for_training = torch.export.export(DecompExample(), (torch.randn(1, 1, 3, 3),))
863860
print(ep_for_training.graph)
864861

865862
######################################################################
@@ -882,7 +879,7 @@ def forward(self, x):
882879
######################################################################
883880
# We can also further lower this exported program to an operator set which only
884881
# contains the
885-
# `Core ATen Operator Set <https://pytorch.org/docs/main/torch.compiler_ir.html#core-aten-ir>`__,
882+
# `Core ATen Operator Set <https://docs.pytorch.org/docs/main/torch.compiler_ir.html#core-aten-ir>`__,
886883
# which is a collection of only ~180 operators. This IR is optimal for backends
887884
# who do not want to reimplement all ATen operators.
888885

@@ -925,7 +922,7 @@ def my_awesome_custom_conv2d_function(x, weight, bias, stride=[1, 1], padding=[0
925922
# rewrite parts of their model code. We have seen examples of this earlier in the tutorial -- for example, rewriting
926923
# if-statements using ``cond``.
927924
#
928-
# `ExportDB <https://pytorch.org/docs/main/generated/exportdb/index.html>`__ is the standard reference that documents
925+
# `ExportDB <https://docs.pytorch.org/docs/main/generated/exportdb/index.html>`__ is the standard reference that documents
929926
# supported and unsupported Python/PyTorch features for ``torch.export``. It is essentially a list a program samples, each
930927
# of which represents the usage of one particular Python/PyTorch feature and its interaction with ``torch.export``.
931928
# Examples are also tagged by category so that they can be more easily searched.
@@ -961,7 +958,7 @@ def cond_predicate(x):
961958
# produced by ``torch.export`` eagerly will be equivalent to running the eager
962959
# module. To optimize the execution of the Exported Program, we can pass this
963960
# exported artifact to backends such as Inductor through ``torch.compile``,
964-
# `AOTInductor <https://pytorch.org/docs/main/torch.compiler_aot_inductor.html>`__,
961+
# `AOTInductor <https://docs.pytorch.org/docs/main/torch.compiler_aot_inductor.html>`__,
965962
# or `TensorRT <https://pytorch.org/TensorRT/dynamo/dynamo_export.html>`__.
966963

967964
class M(torch.nn.Module):
@@ -997,7 +994,7 @@ def forward(self, x):
997994
#
998995
# # Load and run the .so file in Python.
999996
# # To load and run it in a C++ environment, see:
1000-
# # https://pytorch.org/docs/main/torch.compiler_aot_inductor.html
997+
# # https://docs.pytorch.org/docs/main/torch.compiler_aot_inductor.html
1001998
# aoti_compiled = torch._inductor.aoti_load_package(pt2_path)
1002999
# res = aoti_compiled(inp)
10031000

0 commit comments

Comments
 (0)