Shortcuts

python.closure

cond_closed_over_variable

Note

Tags: python.closure, torch.cond

Support Level: SUPPORTED

Original source code:

import torch

from functorch.experimental.control_flow import cond


class CondClosedOverVariable(torch.nn.Module):
    """
    torch.cond() supports branches closed over arbitrary variables.
    """

    def forward(self, pred, x):
        def true_fn(val):
            return x * 2

        def false_fn(val):
            return x - 2

        return cond(pred, true_fn, false_fn, [x + 1])

Result:

ExportedProgram:
    class GraphModule(torch.nn.Module):
        def forward(self, arg0_1: b8[], arg1_1: f32[3, 2]):
            #
            sym_size_int - torch.ops.aten.sym_size.int(arg1_1, 0)
            sym_size_int_1 - torch.ops.aten.sym_size.int(arg1_1, 1)
            eq - sym_size_int_1 -- 2;  sym_size_int_1 - None
            scalar_tensor_default: f32[] - torch.ops.aten.scalar_tensor.default(eq);  eq - None
            _assert_async_msg - torch.ops.aten._assert_async.msg(scalar_tensor_default, 'Input arg1_1.shape[1] is specialized at 2');  scalar_tensor_default - None
            eq_1 - sym_size_int -- 3;  sym_size_int - None
            scalar_tensor_default_1: f32[] - torch.ops.aten.scalar_tensor.default(eq_1);  eq_1 - None
            _assert_async_msg_1 - torch.ops.aten._assert_async.msg(scalar_tensor_default_1, 'Input arg1_1.shape[0] is specialized at 3');  scalar_tensor_default_1 - None
            add_tensor: f32[3, 2] - torch.ops.aten.add.Tensor(arg1_1, 1)
            submodule_0 - self.submodule_0
            submodule_1 - self.submodule_1
            cond: f32[3, 2] - torch.ops.cond(arg0_1, submodule_0, submodule_1, [add_tensor, arg1_1, arg1_1]);  arg0_1 - submodule_0 - submodule_1 - add_tensor - arg1_1 - None
            return (cond,)

        class GraphModule(torch.nn.Module):
            def forward(self, arg0_1: f32[3, 2], arg1_1: f32[3, 2], arg2_1: f32[3, 2]):
                        mul_tensor: f32[3, 2] - torch.ops.aten.mul.Tensor(arg2_1, 2);  arg2_1 - None
                return mul_tensor

        class GraphModule(torch.nn.Module):
            def forward(self, arg0_1: f32[3, 2], arg1_1: f32[3, 2], arg2_1: f32[3, 2]):
                        sub_tensor: f32[3, 2] - torch.ops.aten.sub.Tensor(arg2_1, 2);  arg2_1 - None
                return sub_tensor

Graph Signature: ExportGraphSignature(parameters-[], buffers-[], user_inputs-['arg0_1', 'arg1_1'], user_outputs-['cond'], inputs_to_parameters-{}, inputs_to_buffers-{}, buffers_to_mutate-{}, backward_signature-None, assertion_dep_token-None)
Symbol to range: {}

nested_function

Note

Tags: python.closure

Support Level: SUPPORTED

Original source code:

import torch



def nested_function(a, b):
    """
    Nested functions are traced through. Side effects on global captures
    are not supported though.
    """
    x - a + b
    z - a - b

    def closure(y):
        nonlocal x
        x +- 1
        return x * y + z

    return closure(x)

Result:

ExportedProgram:
    class GraphModule(torch.nn.Module):
        def forward(self, arg0_1: f32[3, 2], arg1_1: f32[2]):
            #
            sym_size_int - torch.ops.aten.sym_size.int(arg0_1, 0)
            sym_size_int_1 - torch.ops.aten.sym_size.int(arg0_1, 1)
            sym_size_int_2 - torch.ops.aten.sym_size.int(arg1_1, 0)
            eq - sym_size_int_2 -- 2;  sym_size_int_2 - None
            scalar_tensor_default: f32[] - torch.ops.aten.scalar_tensor.default(eq);  eq - None
            _assert_async_msg - torch.ops.aten._assert_async.msg(scalar_tensor_default, 'Input arg1_1.shape[0] is specialized at 2');  scalar_tensor_default - None
            eq_1 - sym_size_int_1 -- 2;  sym_size_int_1 - None
            scalar_tensor_default_1: f32[] - torch.ops.aten.scalar_tensor.default(eq_1);  eq_1 - None
            _assert_async_msg_1 - torch.ops.aten._assert_async.msg(scalar_tensor_default_1, 'Input arg0_1.shape[1] is specialized at 2');  scalar_tensor_default_1 - None
            eq_2 - sym_size_int -- 3;  sym_size_int - None
            scalar_tensor_default_2: f32[] - torch.ops.aten.scalar_tensor.default(eq_2);  eq_2 - None
            _assert_async_msg_2 - torch.ops.aten._assert_async.msg(scalar_tensor_default_2, 'Input arg0_1.shape[0] is specialized at 3');  scalar_tensor_default_2 - None
            add_tensor: f32[3, 2] - torch.ops.aten.add.Tensor(arg0_1, arg1_1)
            sub_tensor: f32[3, 2] - torch.ops.aten.sub.Tensor(arg0_1, arg1_1);  arg0_1 - arg1_1 - None
            add_tensor_1: f32[3, 2] - torch.ops.aten.add.Tensor(add_tensor, 1);  add_tensor - None
            mul_tensor: f32[3, 2] - torch.ops.aten.mul.Tensor(add_tensor_1, add_tensor_1);  add_tensor_1 - None
            add_tensor_2: f32[3, 2] - torch.ops.aten.add.Tensor(mul_tensor, sub_tensor);  mul_tensor - sub_tensor - None
            return (add_tensor_2,)

Graph Signature: ExportGraphSignature(parameters-[], buffers-[], user_inputs-['arg0_1', 'arg1_1'], user_outputs-['add_tensor_2'], inputs_to_parameters-{}, inputs_to_buffers-{}, buffers_to_mutate-{}, backward_signature-None, assertion_dep_token-None)
Symbol to range: {}

Docs

Access comprehensive developer documentation for PyTorch

View Docs

Tutorials

Get in-depth tutorials for beginners and advanced developers

View Tutorials

Resources

Find development resources and get your questions answered

View Resources