github.com/goplus/llgo@v0.8.3/py/torch/gen.go (about)

     1  package torch
     2  
     3  import (
     4  	_ "unsafe"
     5  
     6  	"github.com/goplus/llgo/py"
     7  )
     8  
     9  const LLGoPackage = "py.torch"
    10  
    11  // SymInt-aware utility for logical negation.
    12  //
    13  //	Args:
    14  //	    a (SymBool or bool): Object to negate
    15  //
    16  //go:linkname SymNot py.sym_not
    17  func SymNot(a *py.Object) *py.Object
    18  
    19  // SymInt-aware utility for float casting.
    20  //
    21  //	Args:
    22  //	    a (SymInt, SymFloat, or object): Object to cast
    23  //
    24  //go:linkname SymFloat py.sym_float
    25  func SymFloat(a *py.Object) *py.Object
    26  
    27  // SymInt-aware utility for int casting.
    28  //
    29  //	Args:
    30  //	    a (SymInt, SymFloat, or object): Object to cast
    31  //
    32  //go:linkname SymInt py.sym_int
    33  func SymInt(a *py.Object) *py.Object
    34  
    35  // SymInt-aware utility for max().
    36  //
    37  //go:linkname SymMax py.sym_max
    38  func SymMax(a *py.Object, b *py.Object) *py.Object
    39  
    40  // SymInt-aware utility for max().
    41  //
    42  //go:linkname SymMin py.sym_min
    43  func SymMin(a *py.Object, b *py.Object) *py.Object
    44  
    45  // Returns True if `obj` is a PyTorch tensor.
    46  //
    47  //	Note that this function is simply doing ``isinstance(obj, Tensor)``.
    48  //	Using that ``isinstance`` check is better for typechecking with mypy,
    49  //	and more explicit - so it's recommended to use that instead of
    50  //	``is_tensor``.
    51  //
    52  //	Args:
    53  //	    obj (Object): Object to test
    54  //	Example::
    55  //
    56  //	    >>> x = torch.tensor([1, 2, 3])
    57  //	    >>> torch.is_tensor(x)
    58  //	    True
    59  //
    60  //go:linkname IsTensor py.is_tensor
    61  func IsTensor(obj *py.Object) *py.Object
    62  
    63  // Returns True if `obj` is a PyTorch storage object.
    64  //
    65  //	Args:
    66  //	    obj (Object): Object to test
    67  //
    68  //go:linkname IsStorage py.is_storage
    69  func IsStorage(obj *py.Object) *py.Object
    70  
    71  // Sets the default “torch.Tensor“ to be allocated on “device“.  This
    72  //
    73  //	does not affect factory function calls which are called with an explicit
    74  //	``device`` argument.  Factory calls will be performed as if they
    75  //	were passed ``device`` as an argument.
    76  //
    77  //	To only temporarily change the default device instead of setting it
    78  //	globally, use ``with torch.device(device):`` instead.
    79  //
    80  //	The default device is initially ``cpu``.  If you set the default tensor
    81  //	device to another device (e.g., ``cuda``) without a device index, tensors
    82  //	will be allocated on whatever the current device for the device type,
    83  //	even after :func:`torch.cuda.set_device` is called.
    84  //
    85  //	.. warning::
    86  //
    87  //	    This function imposes a slight performance cost on every Python
    88  //	    call to the torch API (not just factory functions).  If this
    89  //	    is causing problems for you, please comment on
    90  //	    https://github.com/pytorch/pytorch/issues/92701
    91  //
    92  //	.. note::
    93  //
    94  //	    This doesn't affect functions that create tensors that share the same memory as the input, like:
    95  //	    :func:`torch.from_numpy` and :func:`torch.frombuffer`
    96  //
    97  //	Args:
    98  //	    device (device or string): the device to set as default
    99  //
   100  //	Example::
   101  //
   102  //	    >>> # xdoctest: +SKIP("requires cuda, changes global state")
   103  //	    >>> torch.tensor([1.2, 3]).device
   104  //	    device(type='cpu')
   105  //	    >>> torch.set_default_device('cuda')  # current device is 0
   106  //	    >>> torch.tensor([1.2, 3]).device
   107  //	    device(type='cuda', index=0)
   108  //	    >>> torch.set_default_device('cuda:1')
   109  //	    >>> torch.tensor([1.2, 3]).device
   110  //	    device(type='cuda', index=1)
   111  //
   112  //go:linkname SetDefaultDevice py.set_default_device
   113  func SetDefaultDevice(device *py.Object) *py.Object
   114  
   115  // Sets the default “torch.Tensor“ type to floating point tensor type
   116  //
   117  //	``t``. This type will also be used as default floating point type for
   118  //	type inference in :func:`torch.tensor`.
   119  //
   120  //	The default floating point tensor type is initially ``torch.FloatTensor``.
   121  //
   122  //	Args:
   123  //	    t (type or string): the floating point tensor type or its name
   124  //
   125  //	Example::
   126  //
   127  //	    >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
   128  //	    >>> torch.tensor([1.2, 3]).dtype    # initial default for floating point is torch.float32
   129  //	    torch.float32
   130  //	    >>> torch.set_default_tensor_type(torch.DoubleTensor)
   131  //	    >>> torch.tensor([1.2, 3]).dtype    # a new floating point tensor
   132  //	    torch.float64
   133  //
   134  //go:linkname SetDefaultTensorType py.set_default_tensor_type
   135  func SetDefaultTensorType(t *py.Object) *py.Object
   136  
   137  // Sets the default floating point dtype to :attr:`d`. Supports torch.float32
   138  // and torch.float64 as inputs. Other dtypes may be accepted without complaint
   139  // but are not supported and are unlikely to work as expected.
   140  //
   141  // When PyTorch is initialized its default floating point dtype is torch.float32,
   142  // and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
   143  // type inference. The default floating point dtype is used to:
   144  //
   145  //  1. Implicitly determine the default complex dtype. When the default floating point
   146  //     type is float32 the default complex dtype is complex64, and when the default
   147  //     floating point type is float64 the default complex type is complex128.
   148  //  2. Infer the dtype for tensors constructed using Python floats or complex Python
   149  //     numbers. See examples below.
   150  //  3. Determine the result of type promotion between bool and integer tensors and
   151  //     Python floats and complex Python numbers.
   152  //
   153  // Args:
   154  //
   155  //	d (:class:`torch.dtype`): the floating point dtype to make the default.
   156  //	                          Either torch.float32 or torch.float64.
   157  //
   158  // Example:
   159  //
   160  //	>>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
   161  //	>>> # initial default for floating point is torch.float32
   162  //	>>> # Python floats are interpreted as float32
   163  //	>>> torch.tensor([1.2, 3]).dtype
   164  //	torch.float32
   165  //	>>> # initial default for floating point is torch.complex64
   166  //	>>> # Complex Python numbers are interpreted as complex64
   167  //	>>> torch.tensor([1.2, 3j]).dtype
   168  //	torch.complex64
   169  //
   170  //	>>> torch.set_default_dtype(torch.float64)
   171  //
   172  //	>>> # Python floats are now interpreted as float64
   173  //	>>> torch.tensor([1.2, 3]).dtype    # a new floating point tensor
   174  //	torch.float64
   175  //	>>> # Complex Python numbers are now interpreted as complex128
   176  //	>>> torch.tensor([1.2, 3j]).dtype   # a new complex tensor
   177  //	torch.complex128
   178  //
   179  //go:linkname SetDefaultDtype py.set_default_dtype
   180  func SetDefaultDtype(d *py.Object) *py.Object
   181  
   182  // Sets whether PyTorch operations must use "deterministic"
   183  //
   184  //	algorithms. That is, algorithms which, given the same input, and when
   185  //	run on the same software and hardware, always produce the same output.
   186  //	When enabled, operations will use deterministic algorithms when available,
   187  //	and if only nondeterministic algorithms are available they will throw a
   188  //	:class:`RuntimeError` when called.
   189  //
   190  //	.. note:: This setting alone is not always enough to make an application
   191  //	    reproducible. Refer to :ref:`reproducibility` for more information.
   192  //
   193  //	.. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
   194  //	    interface for this feature.
   195  //
   196  //	The following normally-nondeterministic operations will act
   197  //	deterministically when ``mode=True``:
   198  //
   199  //	    * :class:`torch.nn.Conv1d` when called on CUDA tensor
   200  //	    * :class:`torch.nn.Conv2d` when called on CUDA tensor
   201  //	    * :class:`torch.nn.Conv3d` when called on CUDA tensor
   202  //	    * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
   203  //	    * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
   204  //	    * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
   205  //	    * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
   206  //	    * :func:`torch.bmm` when called on sparse-dense CUDA tensors
   207  //	    * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
   208  //	      and the index is a list of tensors
   209  //	    * :func:`torch.Tensor.index_put` with ``accumulate=False``
   210  //	    * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
   211  //	      tensor
   212  //	    * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
   213  //	      tensor
   214  //	    * :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor
   215  //	    * :func:`torch.gather` when called on a CUDA tensor that requires grad
   216  //	    * :func:`torch.index_add` when called on CUDA tensor
   217  //	    * :func:`torch.index_select` when attempting to differentiate a CUDA tensor
   218  //	    * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
   219  //	    * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
   220  //	    * :func:`torch.Tensor.scatter` when `src` type is Tensor and called on CUDA tensor
   221  //	    * :func:`torch.Tensor.scatter_reduce` when ``reduce='sum'`` or ``reduce='mean'`` and called on CUDA tensor
   222  //
   223  //	The following normally-nondeterministic operations will throw a
   224  //	:class:`RuntimeError` when ``mode=True``:
   225  //
   226  //	    * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
   227  //	    * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
   228  //	    * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
   229  //	    * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
   230  //	    * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
   231  //	    * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
   232  //	    * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
   233  //	    * :class:`torch.nn.MaxUnpool1d`
   234  //	    * :class:`torch.nn.MaxUnpool2d`
   235  //	    * :class:`torch.nn.MaxUnpool3d`
   236  //	    * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
   237  //	      and one of the following modes is used:
   238  //
   239  //	      - ``linear``
   240  //	      - ``bilinear``
   241  //	      - ``bicubic``
   242  //	      - ``trilinear``
   243  //
   244  //	    * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
   245  //	    * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
   246  //	    * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
   247  //	    * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
   248  //	    * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
   249  //	    * :class:`torch.nn.NLLLoss` when called on a CUDA tensor
   250  //	    * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
   251  //	    * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
   252  //	      ``mode='max'``
   253  //	    * :func:`torch.Tensor.put_` when ``accumulate=False``
   254  //	    * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
   255  //	    * :func:`torch.histc` when called on a CUDA tensor
   256  //	    * :func:`torch.bincount` when called on a CUDA tensor and ``weights``
   257  //	      tensor is given
   258  //	    * :func:`torch.kthvalue` with called on a CUDA tensor
   259  //	    * :func:`torch.median` with indices output when called on a CUDA tensor
   260  //	    * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
   261  //	    * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
   262  //	    * :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor
   263  //	    * :func:`torch.Tensor.resize_` when called with a quantized tensor
   264  //
   265  //	In addition, several operations fill uninitialized memory when this setting
   266  //	is turned on and when
   267  //	:attr:`torch.utils.deterministic.fill_uninitialized_memory` is turned on.
   268  //	See the documentation for that attribute for more information.
   269  //
   270  //	A handful of CUDA operations are nondeterministic if the CUDA version is
   271  //	10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
   272  //	or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
   273  //	details: `<https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility>`_
   274  //	If one of these environment variable configurations is not set, a :class:`RuntimeError`
   275  //	will be raised from these operations when called with CUDA tensors:
   276  //
   277  //	    * :func:`torch.mm`
   278  //	    * :func:`torch.mv`
   279  //	    * :func:`torch.bmm`
   280  //
   281  //	Note that deterministic operations tend to have worse performance than
   282  //	nondeterministic operations.
   283  //
   284  //	.. note::
   285  //
   286  //	    This flag does not detect or prevent nondeterministic behavior caused
   287  //	    by calling an inplace operation on a tensor with an internal memory
   288  //	    overlap or by giving such a tensor as the :attr:`out` argument for an
   289  //	    operation. In these cases, multiple writes of different data may target
   290  //	    a single memory location, and the order of writes is not guaranteed.
   291  //
   292  //	Args:
   293  //	    mode (:class:`bool`): If True, makes potentially nondeterministic
   294  //	        operations switch to a deterministic algorithm or throw a runtime
   295  //	        error. If False, allows nondeterministic operations.
   296  //
   297  //	Keyword args:
   298  //	    warn_only (:class:`bool`, optional): If True, operations that do not
   299  //	        have a deterministic implementation will throw a warning instead of
   300  //	        an error. Default: ``False``
   301  //
   302  //	Example::
   303  //
   304  //	    >>> # xdoctest: +SKIP
   305  //	    >>> torch.use_deterministic_algorithms(True)
   306  //
   307  //	    # Forward mode nondeterministic error
   308  //	    >>> torch.randn(10, device='cuda').kthvalue(1)
   309  //	    ...
   310  //	    RuntimeError: kthvalue CUDA does not have a deterministic implementation...
   311  //
   312  //	    # Backward mode nondeterministic error
   313  //	    >>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward()
   314  //	    ...
   315  //	    RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation...
   316  //
   317  //go:linkname UseDeterministicAlgorithms py.use_deterministic_algorithms
   318  func UseDeterministicAlgorithms(mode *py.Object) *py.Object
   319  
   320  // Returns True if the global deterministic flag is turned on. Refer to
   321  //
   322  //	:func:`torch.use_deterministic_algorithms` documentation for more details.
   323  //
   324  //go:linkname AreDeterministicAlgorithmsEnabled py.are_deterministic_algorithms_enabled
   325  func AreDeterministicAlgorithmsEnabled() *py.Object
   326  
   327  // Returns True if the global deterministic flag is set to warn only.
   328  //
   329  //	Refer to :func:`torch.use_deterministic_algorithms` documentation for more
   330  //	details.
   331  //
   332  //go:linkname IsDeterministicAlgorithmsWarnOnlyEnabled py.is_deterministic_algorithms_warn_only_enabled
   333  func IsDeterministicAlgorithmsWarnOnlyEnabled() *py.Object
   334  
   335  // Sets the debug mode for deterministic operations.
   336  //
   337  //	.. note:: This is an alternative interface for
   338  //	    :func:`torch.use_deterministic_algorithms`. Refer to that function's
   339  //	    documentation for details about affected operations.
   340  //
   341  //	Args:
   342  //	    debug_mode(str or int): If "default" or 0, don't error or warn on
   343  //	        nondeterministic operations. If "warn" or 1, warn on
   344  //	        nondeterministic operations. If "error" or 2, error on
   345  //	        nondeterministic operations.
   346  //
   347  //go:linkname SetDeterministicDebugMode py.set_deterministic_debug_mode
   348  func SetDeterministicDebugMode(debugMode *py.Object) *py.Object
   349  
   350  // Returns the current value of the debug mode for deterministic
   351  //
   352  //	operations. Refer to :func:`torch.set_deterministic_debug_mode`
   353  //	documentation for more details.
   354  //
   355  //go:linkname GetDeterministicDebugMode py.get_deterministic_debug_mode
   356  func GetDeterministicDebugMode() *py.Object
   357  
   358  // Returns the current value of float32 matrix multiplication precision. Refer to
   359  //
   360  //	:func:`torch.set_float32_matmul_precision` documentation for more details.
   361  //
   362  //go:linkname GetFloat32MatmulPrecision py.get_float32_matmul_precision
   363  func GetFloat32MatmulPrecision() *py.Object
   364  
   365  // Sets the internal precision of float32 matrix multiplications.
   366  //
   367  //	Running float32 matrix multiplications in lower precision may significantly increase
   368  //	performance, and in some programs the loss of precision has a negligible impact.
   369  //
   370  //	Supports three settings:
   371  //
   372  //	    * "highest", float32 matrix multiplications use the float32 datatype (24 mantissa
   373  //	      bits) for internal computations.
   374  //	    * "high", float32 matrix multiplications either use the TensorFloat32 datatype (10
   375  //	      mantissa bits) or treat each float32 number as the sum of two bfloat16 numbers
   376  //	      (approximately 16 mantissa bits), if the appropriate fast matrix multiplication
   377  //	      algorithms are available.  Otherwise float32 matrix multiplications are computed
   378  //	      as if the precision is "highest".  See below for more information on the bfloat16
   379  //	      approach.
   380  //	    * "medium", float32 matrix multiplications use the bfloat16 datatype (8 mantissa
   381  //	      bits) for internal computations, if a fast matrix multiplication algorithm
   382  //	      using that datatype internally is available. Otherwise float32
   383  //	      matrix multiplications are computed as if the precision is "high".
   384  //
   385  //	When using "high" precision, float32 multiplications may use a bfloat16-based algorithm
   386  //	that is more complicated than simply truncating to some smaller number mantissa bits
   387  //	(e.g. 10 for TensorFloat32, 8 for bfloat16).  Refer to [Henry2019]_ for a complete
   388  //	description of this algorithm.  To briefly explain here, the first step is to realize
   389  //	that we can perfectly encode a single float32 number as the sum of three bfloat16
   390  //	numbers (because float32 has 24 mantissa bits while bfloat16 has 8, and both have the
   391  //	same number of exponent bits).  This means that the product of two float32 numbers can
   392  //	be exactly given by the sum of nine products of bfloat16 numbers.  We can then trade
   393  //	accuracy for speed by dropping some of these products.  The "high" precision algorithm
   394  //	specifically keeps only the three most significant products, which conveniently excludes
   395  //	all of the products involving the last 8 mantissa bits of either input.  This means that
   396  //	we can represent our inputs as the sum of two bfloat16 numbers rather than three.
   397  //	Because bfloat16 fused-multiply-add (FMA) instructions are typically >10x faster than
   398  //	float32 ones, it's faster to do three multiplications and 2 additions with bfloat16
   399  //	precision than it is to do a single multiplication with float32 precision.
   400  //
   401  //	.. [Henry2019] http://arxiv.org/abs/1904.06376
   402  //
   403  //	.. note::
   404  //
   405  //	    This does not change the output dtype of float32 matrix multiplications,
   406  //	    it controls how the internal computation of the matrix multiplication is performed.
   407  //
   408  //	.. note::
   409  //
   410  //	    This does not change the precision of convolution operations. Other flags,
   411  //	    like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
   412  //	    operations.
   413  //
   414  //	.. note::
   415  //
   416  //	    This flag currently only affects one native device type: CUDA.
   417  //	    If "high" or "medium" are set then the TensorFloat32 datatype will be used
   418  //	    when computing float32 matrix multiplications, equivalent to setting
   419  //	    `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
   420  //	    is set then the float32 datatype is used for internal computations, equivalent
   421  //	    to setting `torch.backends.cuda.matmul.allow_tf32 = False`.
   422  //
   423  //	Args:
   424  //	    precision(str): can be set to "highest" (default), "high", or "medium" (see above).
   425  //
   426  //go:linkname SetFloat32MatmulPrecision py.set_float32_matmul_precision
   427  func SetFloat32MatmulPrecision(precision *py.Object) *py.Object
   428  
   429  // When this flag is False (default) then some PyTorch warnings may only
   430  //
   431  //	appear once per process. This helps avoid excessive warning information.
   432  //	Setting it to True causes these warnings to always appear, which may be
   433  //	helpful when debugging.
   434  //
   435  //	Args:
   436  //	    b (:class:`bool`): If True, force warnings to always be emitted
   437  //	                       If False, set to the default behaviour
   438  //
   439  //go:linkname SetWarnAlways py.set_warn_always
   440  func SetWarnAlways(b *py.Object) *py.Object
   441  
   442  // Returns True if the global warn_always flag is turned on. Refer to
   443  //
   444  //	:func:`torch.set_warn_always` documentation for more details.
   445  //
   446  //go:linkname IsWarnAlwaysEnabled py.is_warn_always_enabled
   447  func IsWarnAlwaysEnabled() *py.Object
   448  
   449  // Sets the random number generator state.
   450  //
   451  //	.. note: This function only works for CPU. For CUDA, please use
   452  //	         torch.manual_seed(seed), which works for both CPU and CUDA.
   453  //
   454  //	Args:
   455  //	    new_state (torch.ByteTensor): The desired state
   456  //
   457  //go:linkname SetRngState py.set_rng_state
   458  func SetRngState(newState *py.Object) *py.Object
   459  
   460  // Returns the random number generator state as a `torch.ByteTensor`.
   461  //
   462  //go:linkname GetRngState py.get_rng_state
   463  func GetRngState() *py.Object
   464  
   465  // Sets the seed for generating random numbers. Returns a
   466  //
   467  //	`torch.Generator` object.
   468  //
   469  //	Args:
   470  //	    seed (int): The desired seed. Value must be within the inclusive range
   471  //	        `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
   472  //	        is raised. Negative inputs are remapped to positive values with the formula
   473  //	        `0xffff_ffff_ffff_ffff + seed`.
   474  //
   475  //go:linkname ManualSeed py.manual_seed
   476  func ManualSeed(seed *py.Object) *py.Object
   477  
   478  // Returns the initial seed for generating random numbers as a
   479  //
   480  //	Python `long`.
   481  //
   482  //go:linkname InitialSeed py.initial_seed
   483  func InitialSeed() *py.Object
   484  
   485  // Sets the seed for generating random numbers to a non-deterministic
   486  //
   487  //	random number. Returns a 64 bit number used to seed the RNG.
   488  //
   489  //go:linkname Seed py.seed
   490  func Seed() *py.Object
   491  
   492  // save(obj, f, pickle_module=pickle, pickle_protocol=DEFAULT_PROTOCOL, _use_new_zipfile_serialization=True)
   493  //
   494  //	Saves an object to a disk file.
   495  //
   496  //	See also: :ref:`saving-loading-tensors`
   497  //
   498  //	Args:
   499  //	    obj: saved object
   500  //	    f: a file-like object (has to implement write and flush) or a string or
   501  //	       os.PathLike object containing a file name
   502  //	    pickle_module: module used for pickling metadata and objects
   503  //	    pickle_protocol: can be specified to override the default protocol
   504  //
   505  //	.. note::
   506  //	    A common PyTorch convention is to save tensors using .pt file extension.
   507  //
   508  //	.. note::
   509  //	    PyTorch preserves storage sharing across serialization. See
   510  //	    :ref:`preserve-storage-sharing` for more details.
   511  //
   512  //	.. note::
   513  //	    The 1.6 release of PyTorch switched ``torch.save`` to use a new
   514  //	    zipfile-based file format. ``torch.load`` still retains the ability to
   515  //	    load files in the old format. If for any reason you want ``torch.save``
   516  //	    to use the old format, pass the kwarg ``_use_new_zipfile_serialization=False``.
   517  //
   518  //	Example:
   519  //	    >>> # xdoctest: +SKIP("makes cwd dirty")
   520  //	    >>> # Save to file
   521  //	    >>> x = torch.tensor([0, 1, 2, 3, 4])
   522  //	    >>> torch.save(x, 'tensor.pt')
   523  //	    >>> # Save to io.BytesIO buffer
   524  //	    >>> buffer = io.BytesIO()
   525  //	    >>> torch.save(x, buffer)
   526  //
   527  //go:linkname Save py.save
   528  func Save(obj *py.Object, f *py.Object, pickleModule *py.Object, pickleProtocol *py.Object, UseNewZipfileSerialization *py.Object, DisableByteorderRecord *py.Object) *py.Object
   529  
   530  // load(f, map_location=None, pickle_module=pickle, *, weights_only=False, mmap=None, **pickle_load_args)
   531  //
   532  //	Loads an object saved with :func:`torch.save` from a file.
   533  //
   534  //	:func:`torch.load` uses Python's unpickling facilities but treats storages,
   535  //	which underlie tensors, specially. They are first deserialized on the
   536  //	CPU and are then moved to the device they were saved from. If this fails
   537  //	(e.g. because the run time system doesn't have certain devices), an exception
   538  //	is raised. However, storages can be dynamically remapped to an alternative
   539  //	set of devices using the :attr:`map_location` argument.
   540  //
   541  //	If :attr:`map_location` is a callable, it will be called once for each serialized
   542  //	storage with two arguments: storage and location. The storage argument
   543  //	will be the initial deserialization of the storage, residing on the CPU.
   544  //	Each serialized storage has a location tag associated with it which
   545  //	identifies the device it was saved from, and this tag is the second
   546  //	argument passed to :attr:`map_location`. The builtin location tags are ``'cpu'``
   547  //	for CPU tensors and ``'cuda:device_id'`` (e.g. ``'cuda:2'``) for CUDA tensors.
   548  //	:attr:`map_location` should return either ``None`` or a storage. If
   549  //	:attr:`map_location` returns a storage, it will be used as the final deserialized
   550  //	object, already moved to the right device. Otherwise, :func:`torch.load` will
   551  //	fall back to the default behavior, as if :attr:`map_location` wasn't specified.
   552  //
   553  //	If :attr:`map_location` is a :class:`torch.device` object or a string containing
   554  //	a device tag, it indicates the location where all tensors should be loaded.
   555  //
   556  //	Otherwise, if :attr:`map_location` is a dict, it will be used to remap location tags
   557  //	appearing in the file (keys), to ones that specify where to put the
   558  //	storages (values).
   559  //
   560  //	User extensions can register their own location tags and tagging and
   561  //	deserialization methods using :func:`torch.serialization.register_package`.
   562  //
   563  //	Args:
   564  //	    f: a file-like object (has to implement :meth:`read`, :meth:`readline`, :meth:`tell`, and :meth:`seek`),
   565  //	        or a string or os.PathLike object containing a file name
   566  //	    map_location: a function, :class:`torch.device`, string or a dict specifying how to remap storage
   567  //	        locations
   568  //	    pickle_module: module used for unpickling metadata and objects (has to
   569  //	        match the :attr:`pickle_module` used to serialize file)
   570  //	    weights_only: Indicates whether unpickler should be restricted to
   571  //	        loading only tensors, primitive types and dictionaries
   572  //	    mmap: Indicates whether the file should be mmaped rather than loading all the storages into memory.
   573  //	        Typically, tensor storages in the file will first be moved from disk to CPU memory, after which they
   574  //	        are moved to the location that they were tagged with when saving, or specified by ``map_location``. This
   575  //	        second step is a no-op if the final location is CPU. When the ``mmap`` flag is set, instead of copying the
   576  //	        tensor storages from disk to CPU memory in the first step, ``f`` is mmaped.
   577  //	    pickle_load_args: (Python 3 only) optional keyword arguments passed over to
   578  //	        :func:`pickle_module.load` and :func:`pickle_module.Unpickler`, e.g.,
   579  //	        :attr:`errors=...`.
   580  //
   581  //	.. warning::
   582  //	    :func:`torch.load()` unless `weights_only` parameter is set to `True`,
   583  //	    uses ``pickle`` module implicitly, which is known to be insecure.
   584  //	    It is possible to construct malicious pickle data which will execute arbitrary code
   585  //	    during unpickling. Never load data that could have come from an untrusted
   586  //	    source in an unsafe mode, or that could have been tampered with. **Only load data you trust**.
   587  //
   588  //	.. note::
   589  //	    When you call :func:`torch.load()` on a file which contains GPU tensors, those tensors
   590  //	    will be loaded to GPU by default. You can call ``torch.load(.., map_location='cpu')``
   591  //	    and then :meth:`load_state_dict` to avoid GPU RAM surge when loading a model checkpoint.
   592  //
   593  //	.. note::
   594  //	    By default, we decode byte strings as ``utf-8``.  This is to avoid a common error
   595  //	    case ``UnicodeDecodeError: 'ascii' codec can't decode byte 0x...``
   596  //	    when loading files saved by Python 2 in Python 3.  If this default
   597  //	    is incorrect, you may use an extra :attr:`encoding` keyword argument to specify how
   598  //	    these objects should be loaded, e.g., :attr:`encoding='latin1'` decodes them
   599  //	    to strings using ``latin1`` encoding, and :attr:`encoding='bytes'` keeps them
   600  //	    as byte arrays which can be decoded later with ``byte_array.decode(...)``.
   601  //
   602  //	Example:
   603  //	    >>> # xdoctest: +SKIP("undefined filepaths")
   604  //	    >>> torch.load('tensors.pt', weights_only=True)
   605  //	    # Load all tensors onto the CPU
   606  //	    >>> torch.load('tensors.pt', map_location=torch.device('cpu'), weights_only=True)
   607  //	    # Load all tensors onto the CPU, using a function
   608  //	    >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage, weights_only=True)
   609  //	    # Load all tensors onto GPU 1
   610  //	    >>> torch.load('tensors.pt', map_location=lambda storage, loc: storage.cuda(1), weights_only=True)
   611  //	    # Map tensors from GPU 1 to GPU 0
   612  //	    >>> torch.load('tensors.pt', map_location={'cuda:1': 'cuda:0'}, weights_only=True)
   613  //	    # Load tensor from io.BytesIO object
   614  //	    # Loading from a buffer setting weights_only=False, warning this can be unsafe
   615  //	    >>> with open('tensor.pt', 'rb') as f:
   616  //	    ...     buffer = io.BytesIO(f.read())
   617  //	    >>> torch.load(buffer, weights_only=False)
   618  //	    # Load a module with 'ascii' encoding for unpickling
   619  //	    # Loading from a module setting weights_only=False, warning this can be unsafe
   620  //	    >>> torch.load('module.pt', encoding='ascii', weights_only=False)
   621  //
   622  //go:linkname Load py.load
   623  func Load(f *py.Object, mapLocation *py.Object, pickleModule *py.Object) *py.Object
   624  
   625  // Set options for printing. Items shamelessly taken from NumPy
   626  //
   627  //	Args:
   628  //	    precision: Number of digits of precision for floating point output
   629  //	        (default = 4).
   630  //	    threshold: Total number of array elements which trigger summarization
   631  //	        rather than full `repr` (default = 1000).
   632  //	    edgeitems: Number of array items in summary at beginning and end of
   633  //	        each dimension (default = 3).
   634  //	    linewidth: The number of characters per line for the purpose of
   635  //	        inserting line breaks (default = 80). Thresholded matrices will
   636  //	        ignore this parameter.
   637  //	    profile: Sane defaults for pretty printing. Can override with any of
   638  //	        the above options. (any one of `default`, `short`, `full`)
   639  //	    sci_mode: Enable (True) or disable (False) scientific notation. If
   640  //	        None (default) is specified, the value is defined by
   641  //	        `torch._tensor_str._Formatter`. This value is automatically chosen
   642  //	        by the framework.
   643  //
   644  //	Example::
   645  //
   646  //	    >>> # Limit the precision of elements
   647  //	    >>> torch.set_printoptions(precision=2)
   648  //	    >>> torch.tensor([1.12345])
   649  //	    tensor([1.12])
   650  //	    >>> # Limit the number of elements shown
   651  //	    >>> torch.set_printoptions(threshold=5)
   652  //	    >>> torch.arange(10)
   653  //	    tensor([0, 1, 2, ..., 7, 8, 9])
   654  //	    >>> # Restore defaults
   655  //	    >>> torch.set_printoptions(profile='default')
   656  //	    >>> torch.tensor([1.12345])
   657  //	    tensor([1.1235])
   658  //	    >>> torch.arange(10)
   659  //	    tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
   660  //
   661  //go:linkname SetPrintoptions py.set_printoptions
   662  func SetPrintoptions(precision *py.Object, threshold *py.Object, edgeitems *py.Object, linewidth *py.Object, profile *py.Object, sciMode *py.Object) *py.Object
   663  
   664  // Returns a 1-dimensional view of each input tensor with zero dimensions.
   665  // Input tensors with one or more dimensions are returned as-is.
   666  //
   667  // Args:
   668  //
   669  //	input (Tensor or list of Tensors)
   670  //
   671  // Returns:
   672  //
   673  //	output (Tensor or tuple of Tensors)
   674  //
   675  // Example::
   676  //
   677  //	>>> x = torch.arange(2)
   678  //	>>> x
   679  //	tensor([0, 1])
   680  //	>>> torch.atleast_1d(x)
   681  //	tensor([0, 1])
   682  //	>>> x = torch.tensor(1.)
   683  //	>>> x
   684  //	tensor(1.)
   685  //	>>> torch.atleast_1d(x)
   686  //	tensor([1.])
   687  //	>>> x = torch.tensor(0.5)
   688  //	>>> y = torch.tensor(1.)
   689  //	>>> torch.atleast_1d((x, y))
   690  //	(tensor([0.5000]), tensor([1.]))
   691  //
   692  //go:linkname Atleast1d py.atleast_1d
   693  func Atleast1d(__llgo_va_list ...interface{}) *py.Object
   694  
   695  // Returns a 2-dimensional view of each input tensor with zero dimensions.
   696  // Input tensors with two or more dimensions are returned as-is.
   697  //
   698  // Args:
   699  //
   700  //	input (Tensor or list of Tensors)
   701  //
   702  // Returns:
   703  //
   704  //	output (Tensor or tuple of Tensors)
   705  //
   706  // Example::
   707  //
   708  //	>>> x = torch.tensor(1.)
   709  //	>>> x
   710  //	tensor(1.)
   711  //	>>> torch.atleast_2d(x)
   712  //	tensor([[1.]])
   713  //	>>> x = torch.arange(4).view(2, 2)
   714  //	>>> x
   715  //	tensor([[0, 1],
   716  //	        [2, 3]])
   717  //	>>> torch.atleast_2d(x)
   718  //	tensor([[0, 1],
   719  //	        [2, 3]])
   720  //	>>> x = torch.tensor(0.5)
   721  //	>>> y = torch.tensor(1.)
   722  //	>>> torch.atleast_2d((x, y))
   723  //	(tensor([[0.5000]]), tensor([[1.]]))
   724  //
   725  //go:linkname Atleast2d py.atleast_2d
   726  func Atleast2d(__llgo_va_list ...interface{}) *py.Object
   727  
   728  // Returns a 3-dimensional view of each input tensor with zero dimensions.
   729  // Input tensors with three or more dimensions are returned as-is.
   730  //
   731  // Args:
   732  //
   733  //	input (Tensor or list of Tensors)
   734  //
   735  // Returns:
   736  //
   737  //	output (Tensor or tuple of Tensors)
   738  //
   739  // Example:
   740  //
   741  //	>>> x = torch.tensor(0.5)
   742  //	>>> x
   743  //	tensor(0.5000)
   744  //	>>> torch.atleast_3d(x)
   745  //	tensor([[[0.5000]]])
   746  //	>>> y = torch.arange(4).view(2, 2)
   747  //	>>> y
   748  //	tensor([[0, 1],
   749  //	        [2, 3]])
   750  //	>>> torch.atleast_3d(y)
   751  //	tensor([[[0],
   752  //	         [1]],
   753  //	        <BLANKLINE>
   754  //	        [[2],
   755  //	         [3]]])
   756  //	>>> x = torch.tensor(1).view(1, 1, 1)
   757  //	>>> x
   758  //	tensor([[[1]]])
   759  //	>>> torch.atleast_3d(x)
   760  //	tensor([[[1]]])
   761  //	>>> x = torch.tensor(0.5)
   762  //	>>> y = torch.tensor(1.)
   763  //	>>> torch.atleast_3d((x, y))
   764  //	(tensor([[[0.5000]]]), tensor([[[1.]]]))
   765  //
   766  //go:linkname Atleast3d py.atleast_3d
   767  func Atleast3d(__llgo_va_list ...interface{}) *py.Object
   768  
   769  // Create a block diagonal matrix from provided tensors.
   770  //
   771  //	Args:
   772  //	    *tensors: One or more tensors with 0, 1, or 2 dimensions.
   773  //
   774  //	Returns:
   775  //	    Tensor: A 2 dimensional tensor with all the input tensors arranged in
   776  //	    order such that their upper left and lower right corners are
   777  //	    diagonally adjacent. All other elements are set to 0.
   778  //
   779  //	Example::
   780  //
   781  //	    >>> import torch
   782  //	    >>> A = torch.tensor([[0, 1], [1, 0]])
   783  //	    >>> B = torch.tensor([[3, 4, 5], [6, 7, 8]])
   784  //	    >>> C = torch.tensor(7)
   785  //	    >>> D = torch.tensor([1, 2, 3])
   786  //	    >>> E = torch.tensor([[4], [5], [6]])
   787  //	    >>> torch.block_diag(A, B, C, D, E)
   788  //	    tensor([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
   789  //	            [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
   790  //	            [0, 0, 3, 4, 5, 0, 0, 0, 0, 0],
   791  //	            [0, 0, 6, 7, 8, 0, 0, 0, 0, 0],
   792  //	            [0, 0, 0, 0, 0, 7, 0, 0, 0, 0],
   793  //	            [0, 0, 0, 0, 0, 0, 1, 2, 3, 0],
   794  //	            [0, 0, 0, 0, 0, 0, 0, 0, 0, 4],
   795  //	            [0, 0, 0, 0, 0, 0, 0, 0, 0, 5],
   796  //	            [0, 0, 0, 0, 0, 0, 0, 0, 0, 6]])
   797  //
   798  //go:linkname BlockDiag py.block_diag
   799  func BlockDiag(__llgo_va_list ...interface{}) *py.Object
   800  
   801  // broadcast_tensors(*tensors) -> List of Tensors
   802  //
   803  //	Broadcasts the given tensors according to :ref:`broadcasting-semantics`.
   804  //
   805  //	Args:
   806  //	    *tensors: any number of tensors of the same type
   807  //
   808  //	.. warning::
   809  //
   810  //	    More than one element of a broadcasted tensor may refer to a single
   811  //	    memory location. As a result, in-place operations (especially ones that
   812  //	    are vectorized) may result in incorrect behavior. If you need to write
   813  //	    to the tensors, please clone them first.
   814  //
   815  //	Example::
   816  //
   817  //	    >>> x = torch.arange(3).view(1, 3)
   818  //	    >>> y = torch.arange(2).view(2, 1)
   819  //	    >>> a, b = torch.broadcast_tensors(x, y)
   820  //	    >>> a.size()
   821  //	    torch.Size([2, 3])
   822  //	    >>> a
   823  //	    tensor([[0, 1, 2],
   824  //	            [0, 1, 2]])
   825  //
   826  //go:linkname BroadcastTensors py.broadcast_tensors
   827  func BroadcastTensors(__llgo_va_list ...interface{}) *py.Object
   828  
   829  // Do cartesian product of the given sequence of tensors. The behavior is similar to
   830  //
   831  //	python's `itertools.product`.
   832  //
   833  //	Args:
   834  //	    *tensors: any number of 1 dimensional tensors.
   835  //
   836  //	Returns:
   837  //	    Tensor: A tensor equivalent to converting all the input tensors into lists,
   838  //	    do `itertools.product` on these lists, and finally convert the resulting list
   839  //	    into tensor.
   840  //
   841  //	Example::
   842  //
   843  //	    >>> import itertools
   844  //	    >>> a = [1, 2, 3]
   845  //	    >>> b = [4, 5]
   846  //	    >>> list(itertools.product(a, b))
   847  //	    [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)]
   848  //	    >>> tensor_a = torch.tensor(a)
   849  //	    >>> tensor_b = torch.tensor(b)
   850  //	    >>> torch.cartesian_prod(tensor_a, tensor_b)
   851  //	    tensor([[1, 4],
   852  //	            [1, 5],
   853  //	            [2, 4],
   854  //	            [2, 5],
   855  //	            [3, 4],
   856  //	            [3, 5]])
   857  //
   858  //go:linkname CartesianProd py.cartesian_prod
   859  func CartesianProd(__llgo_va_list ...interface{}) *py.Object
   860  
   861  // Computes batched the p-norm distance between each pair of the two collections of row vectors.
   862  //
   863  //	Args:
   864  //	    x1 (Tensor): input tensor of shape :math:`B \times P \times M`.
   865  //	    x2 (Tensor): input tensor of shape :math:`B \times R \times M`.
   866  //	    p: p value for the p-norm distance to calculate between each vector pair
   867  //	        :math:`\in [0, \infty]`.
   868  //	    compute_mode:
   869  //	        'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate
   870  //	        euclidean distance (p = 2) if P > 25 or R > 25
   871  //	        'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate
   872  //	        euclidean distance (p = 2)
   873  //	        'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate
   874  //	        euclidean distance (p = 2)
   875  //	        Default: use_mm_for_euclid_dist_if_necessary.
   876  //
   877  //	If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the
   878  //	output will have shape :math:`B \times P \times R`.
   879  //
   880  //	This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)`
   881  //	if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to
   882  //	`scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest
   883  //	scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`.
   884  //
   885  //	Example:
   886  //
   887  //	    >>> a = torch.tensor([[0.9041,  0.0196], [-0.3108, -2.4423], [-0.4821,  1.059]])
   888  //	    >>> a
   889  //	    tensor([[ 0.9041,  0.0196],
   890  //	            [-0.3108, -2.4423],
   891  //	            [-0.4821,  1.0590]])
   892  //	    >>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986,  1.3702]])
   893  //	    >>> b
   894  //	    tensor([[-2.1763, -0.4713],
   895  //	            [-0.6986,  1.3702]])
   896  //	    >>> torch.cdist(a, b, p=2)
   897  //	    tensor([[3.1193, 2.0959],
   898  //	            [2.7138, 3.8322],
   899  //	            [2.2830, 0.3791]])
   900  //
   901  //go:linkname Cdist py.cdist
   902  func Cdist(x1 *py.Object, x2 *py.Object, p *py.Object, computeMode *py.Object) *py.Object
   903  
   904  // Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed
   905  //
   906  //	using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms
   907  //	of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`
   908  //	needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned.
   909  //	If :math:`N` is 1, then this is a no-op - the original matrix is returned as is.
   910  //
   911  //	.. warning::
   912  //
   913  //	    :func:`torch.chain_matmul` is deprecated and will be removed in a future PyTorch release.
   914  //	    Use :func:`torch.linalg.multi_dot` instead, which accepts a list of two or more tensors
   915  //	    rather than multiple arguments.
   916  //
   917  //	Args:
   918  //	    matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined.
   919  //	    out (Tensor, optional): the output tensor. Ignored if :attr:`out` = ``None``.
   920  //
   921  //	Returns:
   922  //	    Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product
   923  //	    would be of dimensions :math:`p_{1} \times p_{N + 1}`.
   924  //
   925  //	Example::
   926  //
   927  //	    >>> # xdoctest: +SKIP
   928  //	    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
   929  //	    >>> a = torch.randn(3, 4)
   930  //	    >>> b = torch.randn(4, 5)
   931  //	    >>> c = torch.randn(5, 6)
   932  //	    >>> d = torch.randn(6, 7)
   933  //	    >>> # will raise a deprecation warning
   934  //	    >>> torch.chain_matmul(a, b, c, d)
   935  //	    tensor([[ -2.3375,  -3.9790,  -4.1119,  -6.6577,   9.5609, -11.5095,  -3.2614],
   936  //	            [ 21.4038,   3.3378,  -8.4982,  -5.2457, -10.2561,  -2.4684,   2.7163],
   937  //	            [ -0.9647,  -5.8917,  -2.3213,  -5.2284,  12.8615, -12.2816,  -2.5095]])
   938  //
   939  //	.. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition
   940  //
   941  //go:linkname ChainMatmul py.chain_matmul
   942  func ChainMatmul(__llgo_va_list ...interface{}) *py.Object
   943  
   944  // einsum(equation, *operands) -> Tensor
   945  //
   946  //	Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation
   947  //	based on the Einstein summation convention.
   948  //
   949  //	Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them
   950  //	in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of
   951  //	this format are described below, but the general idea is to label every dimension of the input :attr:`operands`
   952  //	with some subscript and define which subscripts are part of the output. The output is then computed by summing
   953  //	the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the
   954  //	output. For example, matrix multiplication can be computed using einsum as `torch.einsum("ij,jk->ik", A, B)`.
   955  //	Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why).
   956  //
   957  //	Equation:
   958  //
   959  //	    The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of
   960  //	    the input :attr:`operands` in the same order as the dimensions, separating subscripts for each operand by a
   961  //	    comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript
   962  //	    must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is
   963  //	    repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand
   964  //	    must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that
   965  //	    appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order.
   966  //	    The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based
   967  //	    on the subscripts, and then summing out the dimensions whose subscripts are not part of the output.
   968  //
   969  //	    Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation
   970  //	    followed by the subscripts for the output. For instance, the following equation computes the transpose of a
   971  //	    matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and
   972  //	    at most once for the output.
   973  //
   974  //	    Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis.
   975  //	    Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts,
   976  //	    e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth
   977  //	    dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the
   978  //	    'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not
   979  //	    explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions),
   980  //	    before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements
   981  //	    batch matrix multiplication `'...ij,...jk'`.
   982  //
   983  //	    A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis,
   984  //	    arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands.
   985  //
   986  //	.. note::
   987  //
   988  //	    ``torch.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions
   989  //	    covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output.
   990  //
   991  //	.. note::
   992  //
   993  //	    This function uses opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/) to speed up computation or to
   994  //	    consume less memory by optimizing contraction order. This optimization occurs when there are at least three
   995  //	    inputs, since the order does not matter otherwise. Note that finding _the_ optimal path is an NP-hard problem,
   996  //	    thus, opt_einsum relies on different heuristics to achieve near-optimal results. If opt_einsum is not available,
   997  //	    the default order is to contract from left to right.
   998  //
   999  //	    To bypass this default behavior, add the following line to disable the usage of opt_einsum and skip path
  1000  //	    calculation: `torch.backends.opt_einsum.enabled = False`
  1001  //
  1002  //	    To specify which strategy you'd like for opt_einsum to compute the contraction path, add the following line:
  1003  //	    `torch.backends.opt_einsum.strategy = 'auto'`. The default strategy is 'auto', and we also support 'greedy' and
  1004  //	    'optimal'. Disclaimer that the runtime of 'optimal' is factorial in the number of inputs! See more details in
  1005  //	    the opt_einsum documentation (https://optimized-einsum.readthedocs.io/en/stable/path_finding.html).
  1006  //
  1007  //	.. note::
  1008  //
  1009  //	    As of PyTorch 1.10 :func:`torch.einsum` also supports the sublist format (see examples below). In this format,
  1010  //	    subscripts for each operand are specified by sublists, list of integers in the range [0, 52). These sublists
  1011  //	    follow their operands, and an extra sublist can appear at the end of the input to specify the output's
  1012  //	    subscripts., e.g. `torch.einsum(op1, sublist1, op2, sublist2, ..., [subslist_out])`. Python's `Ellipsis` object
  1013  //	    may be provided in a sublist to enable broadcasting as described in the Equation section above.
  1014  //
  1015  //	Args:
  1016  //	    equation (str): The subscripts for the Einstein summation.
  1017  //	    operands (List[Tensor]): The tensors to compute the Einstein summation of.
  1018  //
  1019  //	Examples::
  1020  //
  1021  //	    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  1022  //	    >>> # trace
  1023  //	    >>> torch.einsum('ii', torch.randn(4, 4))
  1024  //	    tensor(-1.2104)
  1025  //
  1026  //	    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  1027  //	    >>> # diagonal
  1028  //	    >>> torch.einsum('ii->i', torch.randn(4, 4))
  1029  //	    tensor([-0.1034,  0.7952, -0.2433,  0.4545])
  1030  //
  1031  //	    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  1032  //	    >>> # outer product
  1033  //	    >>> x = torch.randn(5)
  1034  //	    >>> y = torch.randn(4)
  1035  //	    >>> torch.einsum('i,j->ij', x, y)
  1036  //	    tensor([[ 0.1156, -0.2897, -0.3918,  0.4963],
  1037  //	            [-0.3744,  0.9381,  1.2685, -1.6070],
  1038  //	            [ 0.7208, -1.8058, -2.4419,  3.0936],
  1039  //	            [ 0.1713, -0.4291, -0.5802,  0.7350],
  1040  //	            [ 0.5704, -1.4290, -1.9323,  2.4480]])
  1041  //
  1042  //	    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  1043  //	    >>> # batch matrix multiplication
  1044  //	    >>> As = torch.randn(3, 2, 5)
  1045  //	    >>> Bs = torch.randn(3, 5, 4)
  1046  //	    >>> torch.einsum('bij,bjk->bik', As, Bs)
  1047  //	    tensor([[[-1.0564, -1.5904,  3.2023,  3.1271],
  1048  //	            [-1.6706, -0.8097, -0.8025, -2.1183]],
  1049  //
  1050  //	            [[ 4.2239,  0.3107, -0.5756, -0.2354],
  1051  //	            [-1.4558, -0.3460,  1.5087, -0.8530]],
  1052  //
  1053  //	            [[ 2.8153,  1.8787, -4.3839, -1.2112],
  1054  //	            [ 0.3728, -2.1131,  0.0921,  0.8305]]])
  1055  //
  1056  //	    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  1057  //	    >>> # with sublist format and ellipsis
  1058  //	    >>> torch.einsum(As, [..., 0, 1], Bs, [..., 1, 2], [..., 0, 2])
  1059  //	    tensor([[[-1.0564, -1.5904,  3.2023,  3.1271],
  1060  //	            [-1.6706, -0.8097, -0.8025, -2.1183]],
  1061  //
  1062  //	            [[ 4.2239,  0.3107, -0.5756, -0.2354],
  1063  //	            [-1.4558, -0.3460,  1.5087, -0.8530]],
  1064  //
  1065  //	            [[ 2.8153,  1.8787, -4.3839, -1.2112],
  1066  //	            [ 0.3728, -2.1131,  0.0921,  0.8305]]])
  1067  //
  1068  //	    >>> # batch permute
  1069  //	    >>> A = torch.randn(2, 3, 4, 5)
  1070  //	    >>> torch.einsum('...ij->...ji', A).shape
  1071  //	    torch.Size([2, 3, 5, 4])
  1072  //
  1073  //	    >>> # equivalent to torch.nn.functional.bilinear
  1074  //	    >>> A = torch.randn(3, 5, 4)
  1075  //	    >>> l = torch.randn(2, 5)
  1076  //	    >>> r = torch.randn(2, 4)
  1077  //	    >>> torch.einsum('bn,anm,bm->ba', l, A, r)
  1078  //	    tensor([[-0.3430, -5.2405,  0.4494],
  1079  //	            [ 0.3311,  5.5201, -3.0356]])
  1080  //
  1081  //go:linkname Einsum py.einsum
  1082  func Einsum(__llgo_va_list ...interface{}) *py.Object
  1083  
  1084  // Creates grids of coordinates specified by the 1D inputs in `attr`:tensors.
  1085  //
  1086  //	This is helpful when you want to visualize data over some
  1087  //	range of inputs. See below for a plotting example.
  1088  //
  1089  //	Given :math:`N` 1D tensors :math:`T_0 \ldots T_{N-1}` as
  1090  //	inputs with corresponding sizes :math:`S_0 \ldots S_{N-1}`,
  1091  //	this creates :math:`N` N-dimensional tensors :math:`G_0 \ldots
  1092  //	G_{N-1}`, each with shape :math:`(S_0, ..., S_{N-1})` where
  1093  //	the output :math:`G_i` is constructed by expanding :math:`T_i`
  1094  //	to the result shape.
  1095  //
  1096  //	.. note::
  1097  //	    0D inputs are treated equivalently to 1D inputs of a
  1098  //	    single element.
  1099  //
  1100  //	.. warning::
  1101  //	    `torch.meshgrid(*tensors)` currently has the same behavior
  1102  //	    as calling `numpy.meshgrid(*arrays, indexing='ij')`.
  1103  //
  1104  //	    In the future `torch.meshgrid` will transition to
  1105  //	    `indexing='xy'` as the default.
  1106  //
  1107  //	    https://github.com/pytorch/pytorch/issues/50276 tracks
  1108  //	    this issue with the goal of migrating to NumPy's behavior.
  1109  //
  1110  //	.. seealso::
  1111  //
  1112  //	    :func:`torch.cartesian_prod` has the same effect but it
  1113  //	    collects the data in a tensor of vectors.
  1114  //
  1115  //	Args:
  1116  //	    tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be
  1117  //	        treated as tensors of size :math:`(1,)` automatically
  1118  //
  1119  //	    indexing: (str, optional): the indexing mode, either "xy"
  1120  //	        or "ij", defaults to "ij". See warning for future changes.
  1121  //
  1122  //	        If "xy" is selected, the first dimension corresponds
  1123  //	        to the cardinality of the second input and the second
  1124  //	        dimension corresponds to the cardinality of the first
  1125  //	        input.
  1126  //
  1127  //	        If "ij" is selected, the dimensions are in the same
  1128  //	        order as the cardinality of the inputs.
  1129  //
  1130  //	Returns:
  1131  //	    seq (sequence of Tensors): If the input has :math:`N`
  1132  //	    tensors of size :math:`S_0 \ldots S_{N-1}``, then the
  1133  //	    output will also have :math:`N` tensors, where each tensor
  1134  //	    is of shape :math:`(S_0, ..., S_{N-1})`.
  1135  //
  1136  //	Example::
  1137  //
  1138  //	    >>> x = torch.tensor([1, 2, 3])
  1139  //	    >>> y = torch.tensor([4, 5, 6])
  1140  //
  1141  //	    Observe the element-wise pairings across the grid, (1, 4),
  1142  //	    (1, 5), ..., (3, 6). This is the same thing as the
  1143  //	    cartesian product.
  1144  //	    >>> grid_x, grid_y = torch.meshgrid(x, y, indexing='ij')
  1145  //	    >>> grid_x
  1146  //	    tensor([[1, 1, 1],
  1147  //	            [2, 2, 2],
  1148  //	            [3, 3, 3]])
  1149  //	    >>> grid_y
  1150  //	    tensor([[4, 5, 6],
  1151  //	            [4, 5, 6],
  1152  //	            [4, 5, 6]])
  1153  //
  1154  //	    This correspondence can be seen when these grids are
  1155  //	    stacked properly.
  1156  //	    >>> torch.equal(torch.cat(tuple(torch.dstack([grid_x, grid_y]))),
  1157  //	    ...             torch.cartesian_prod(x, y))
  1158  //	    True
  1159  //
  1160  //	    `torch.meshgrid` is commonly used to produce a grid for
  1161  //	    plotting.
  1162  //	    >>> # xdoctest: +REQUIRES(module:matplotlib)
  1163  //	    >>> # xdoctest: +REQUIRES(env:DOCTEST_SHOW)
  1164  //	    >>> import matplotlib.pyplot as plt
  1165  //	    >>> xs = torch.linspace(-5, 5, steps=100)
  1166  //	    >>> ys = torch.linspace(-5, 5, steps=100)
  1167  //	    >>> x, y = torch.meshgrid(xs, ys, indexing='xy')
  1168  //	    >>> z = torch.sin(torch.sqrt(x * x + y * y))
  1169  //	    >>> ax = plt.axes(projection='3d')
  1170  //	    >>> ax.plot_surface(x.numpy(), y.numpy(), z.numpy())
  1171  //	    >>> plt.show()
  1172  //
  1173  //	.. image:: ../_static/img/meshgrid.png
  1174  //	    :width: 512
  1175  //
  1176  //go:linkname Meshgrid py.meshgrid
  1177  func Meshgrid(__llgo_va_list ...interface{}) *py.Object
  1178  
  1179  // Returns the matrix norm or vector norm of a given tensor.
  1180  //
  1181  //	.. warning::
  1182  //
  1183  //	    torch.norm is deprecated and may be removed in a future PyTorch release.
  1184  //	    Its documentation and behavior may be incorrect, and it is no longer
  1185  //	    actively maintained.
  1186  //
  1187  //	    Use :func:`torch.linalg.vector_norm` when computing vector norms and
  1188  //	    :func:`torch.linalg.matrix_norm` when computing matrix norms.
  1189  //	    For a function with a similar behavior as this one see :func:`torch.linalg.norm`.
  1190  //	    Note, however, the signature for these functions is slightly different than the
  1191  //	    signature for ``torch.norm``.
  1192  //
  1193  //	Args:
  1194  //	    input (Tensor): The input tensor. Its data type must be either a floating
  1195  //	        point or complex type. For complex inputs, the norm is calculated using the
  1196  //	        absolute value of each element. If the input is complex and neither
  1197  //	        :attr:`dtype` nor :attr:`out` is specified, the result's data type will
  1198  //	        be the corresponding floating point type (e.g. float if :attr:`input` is
  1199  //	        complexfloat).
  1200  //
  1201  //	    p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'``
  1202  //	        The following norms can be calculated:
  1203  //
  1204  //	        ======  ==============  ==========================
  1205  //	        ord     matrix norm     vector norm
  1206  //	        ======  ==============  ==========================
  1207  //	        'fro'   Frobenius norm  --
  1208  //	        'nuc'   nuclear norm    --
  1209  //	        Number  --              sum(abs(x)**ord)**(1./ord)
  1210  //	        ======  ==============  ==========================
  1211  //
  1212  //	        The vector norm can be calculated across any number of dimensions.
  1213  //	        The corresponding dimensions of :attr:`input` are flattened into
  1214  //	        one dimension, and the norm is calculated on the flattened
  1215  //	        dimension.
  1216  //
  1217  //	        Frobenius norm produces the same result as ``p=2`` in all cases
  1218  //	        except when :attr:`dim` is a list of three or more dims, in which
  1219  //	        case Frobenius norm throws an error.
  1220  //
  1221  //	        Nuclear norm can only be calculated across exactly two dimensions.
  1222  //
  1223  //	    dim (int, tuple of ints, list of ints, optional):
  1224  //	        Specifies which dimension or dimensions of :attr:`input` to
  1225  //	        calculate the norm across. If :attr:`dim` is ``None``, the norm will
  1226  //	        be calculated across all dimensions of :attr:`input`. If the norm
  1227  //	        type indicated by :attr:`p` does not support the specified number of
  1228  //	        dimensions, an error will occur.
  1229  //	    keepdim (bool, optional): whether the output tensors have :attr:`dim`
  1230  //	        retained or not. Ignored if :attr:`dim` = ``None`` and
  1231  //	        :attr:`out` = ``None``. Default: ``False``
  1232  //	    out (Tensor, optional): the output tensor. Ignored if
  1233  //	        :attr:`dim` = ``None`` and :attr:`out` = ``None``.
  1234  //	    dtype (:class:`torch.dtype`, optional): the desired data type of
  1235  //	        returned tensor. If specified, the input tensor is casted to
  1236  //	        :attr:`dtype` while performing the operation. Default: None.
  1237  //
  1238  //	.. note::
  1239  //	    Even though ``p='fro'`` supports any number of dimensions, the true
  1240  //	    mathematical definition of Frobenius norm only applies to tensors with
  1241  //	    exactly two dimensions. :func:`torch.linalg.matrix_norm` with ``ord='fro'``
  1242  //	    aligns with the mathematical definition, since it can only be applied across
  1243  //	    exactly two dimensions.
  1244  //
  1245  //	Example::
  1246  //
  1247  //	    >>> import torch
  1248  //	    >>> a = torch.arange(9, dtype= torch.float) - 4
  1249  //	    >>> b = a.reshape((3, 3))
  1250  //	    >>> torch.norm(a)
  1251  //	    tensor(7.7460)
  1252  //	    >>> torch.norm(b)
  1253  //	    tensor(7.7460)
  1254  //	    >>> torch.norm(a, float('inf'))
  1255  //	    tensor(4.)
  1256  //	    >>> torch.norm(b, float('inf'))
  1257  //	    tensor(4.)
  1258  //	    >>> c = torch.tensor([[ 1, 2, 3], [-1, 1, 4]] , dtype=torch.float)
  1259  //	    >>> torch.norm(c, dim=0)
  1260  //	    tensor([1.4142, 2.2361, 5.0000])
  1261  //	    >>> torch.norm(c, dim=1)
  1262  //	    tensor([3.7417, 4.2426])
  1263  //	    >>> torch.norm(c, p=1, dim=1)
  1264  //	    tensor([6., 6.])
  1265  //	    >>> d = torch.arange(8, dtype=torch.float).reshape(2, 2, 2)
  1266  //	    >>> torch.norm(d, dim=(1, 2))
  1267  //	    tensor([ 3.7417, 11.2250])
  1268  //	    >>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :])
  1269  //	    (tensor(3.7417), tensor(11.2250))
  1270  //
  1271  //go:linkname Norm py.norm
  1272  func Norm(input *py.Object, p *py.Object, dim *py.Object, keepdim *py.Object, out *py.Object, dtype *py.Object) *py.Object
  1273  
  1274  // Splits the tensor into chunks. Each chunk is a view of the original tensor.
  1275  //
  1276  //	If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will
  1277  //	be split into equally sized chunks (if possible). Last chunk will be smaller if
  1278  //	the tensor size along the given dimension :attr:`dim` is not divisible by
  1279  //	:attr:`split_size`.
  1280  //
  1281  //	If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split
  1282  //	into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according
  1283  //	to :attr:`split_size_or_sections`.
  1284  //
  1285  //	Args:
  1286  //	    tensor (Tensor): tensor to split.
  1287  //	    split_size_or_sections (int) or (list(int)): size of a single chunk or
  1288  //	        list of sizes for each chunk
  1289  //	    dim (int): dimension along which to split the tensor.
  1290  //
  1291  //	Example::
  1292  //
  1293  //	    >>> a = torch.arange(10).reshape(5, 2)
  1294  //	    >>> a
  1295  //	    tensor([[0, 1],
  1296  //	            [2, 3],
  1297  //	            [4, 5],
  1298  //	            [6, 7],
  1299  //	            [8, 9]])
  1300  //	    >>> torch.split(a, 2)
  1301  //	    (tensor([[0, 1],
  1302  //	             [2, 3]]),
  1303  //	     tensor([[4, 5],
  1304  //	             [6, 7]]),
  1305  //	     tensor([[8, 9]]))
  1306  //	    >>> torch.split(a, [1, 4])
  1307  //	    (tensor([[0, 1]]),
  1308  //	     tensor([[2, 3],
  1309  //	             [4, 5],
  1310  //	             [6, 7],
  1311  //	             [8, 9]]))
  1312  //
  1313  //go:linkname Split py.split
  1314  func Split(tensor *py.Object, splitSizeOrSections *py.Object, dim *py.Object) *py.Object
  1315  
  1316  // Short-time Fourier transform (STFT).
  1317  //
  1318  //	.. warning::
  1319  //	    From version 1.8.0, :attr:`return_complex` must always be given
  1320  //	    explicitly for real inputs and `return_complex=False` has been
  1321  //	    deprecated. Strongly prefer `return_complex=True` as in a future
  1322  //	    pytorch release, this function will only return complex tensors.
  1323  //
  1324  //	    Note that :func:`torch.view_as_real` can be used to recover a real
  1325  //	    tensor with an extra last dimension for real and imaginary components.
  1326  //
  1327  //	.. warning::
  1328  //	    From version 2.1, a warning will be provided if a :attr:`window` is
  1329  //	    not specified. In a future release, this attribute will be required.
  1330  //	    Not providing a window currently defaults to using a rectangular window,
  1331  //	    which may result in undesirable artifacts. Consider using tapered windows,
  1332  //	    such as :func:`torch.hann_window`.
  1333  //
  1334  //	The STFT computes the Fourier transform of short overlapping windows of the
  1335  //	input. This giving frequency components of the signal as they change over
  1336  //	time. The interface of this function is modeled after (but *not* a drop-in
  1337  //	replacement for) librosa_ stft function.
  1338  //
  1339  //	.. _librosa: https://librosa.org/doc/latest/generated/librosa.stft.html
  1340  //
  1341  //	Ignoring the optional batch dimension, this method computes the following
  1342  //	expression:
  1343  //
  1344  //	.. math::
  1345  //	    X[\omega, m] = \sum_{k = 0}^{\text{win\_length-1}}%
  1346  //	                        \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ %
  1347  //	                        \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{n\_fft}}\right),
  1348  //
  1349  //	where :math:`m` is the index of the sliding window, and :math:`\omega` is
  1350  //	the frequency :math:`0 \leq \omega < \text{n\_fft}` for ``onesided=False``,
  1351  //	or :math:`0 \leq \omega < \lfloor \text{n\_fft} / 2 \rfloor + 1` for ``onesided=True``.
  1352  //
  1353  //	* :attr:`input` must be either a 1-D time sequence or a 2-D batch of time
  1354  //	  sequences.
  1355  //
  1356  //	* If :attr:`hop_length` is ``None`` (default), it is treated as equal to
  1357  //	  ``floor(n_fft / 4)``.
  1358  //
  1359  //	* If :attr:`win_length` is ``None`` (default), it is treated as equal to
  1360  //	  :attr:`n_fft`.
  1361  //
  1362  //	* :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from
  1363  //	  :meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is
  1364  //	  treated as if having :math:`1` everywhere in the window. If
  1365  //	  :math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on
  1366  //	  both sides to length :attr:`n_fft` before being applied.
  1367  //
  1368  //	* If :attr:`center` is ``True`` (default), :attr:`input` will be padded on
  1369  //	  both sides so that the :math:`t`-th frame is centered at time
  1370  //	  :math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame
  1371  //	  begins at time  :math:`t \times \text{hop\_length}`.
  1372  //
  1373  //	* :attr:`pad_mode` determines the padding method used on :attr:`input` when
  1374  //	  :attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for
  1375  //	  all available options. Default is ``"reflect"``.
  1376  //
  1377  //	* If :attr:`onesided` is ``True`` (default for real input), only values for
  1378  //	  :math:`\omega` in :math:`\left[0, 1, 2, \dots, \left\lfloor
  1379  //	  \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` are returned because
  1380  //	  the real-to-complex Fourier transform satisfies the conjugate symmetry,
  1381  //	  i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`.
  1382  //	  Note if the input or window tensors are complex, then :attr:`onesided`
  1383  //	  output is not possible.
  1384  //
  1385  //	* If :attr:`normalized` is ``True`` (default is ``False``), the function
  1386  //	  returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`.
  1387  //
  1388  //	* If :attr:`return_complex` is ``True`` (default if input is complex), the
  1389  //	  return is a ``input.dim() + 1`` dimensional complex tensor. If ``False``,
  1390  //	  the output is a ``input.dim() + 2`` dimensional real tensor where the last
  1391  //	  dimension represents the real and imaginary components.
  1392  //
  1393  //	Returns either a complex tensor of size :math:`(* \times N \times T)` if
  1394  //	:attr:`return_complex` is true, or a real tensor of size :math:`(* \times N
  1395  //	\times T \times 2)`. Where :math:`*` is the optional batch size of
  1396  //	:attr:`input`, :math:`N` is the number of frequencies where STFT is applied
  1397  //	and :math:`T` is the total number of frames used.
  1398  //
  1399  //	.. warning::
  1400  //	  This function changed signature at version 0.4.1. Calling with the
  1401  //	  previous signature may cause error or return incorrect result.
  1402  //
  1403  //	Args:
  1404  //	    input (Tensor): the input tensor of shape `(B?, L)` where `B?` is an optional
  1405  //	        batch dimension
  1406  //	    n_fft (int): size of Fourier transform
  1407  //	    hop_length (int, optional): the distance between neighboring sliding window
  1408  //	        frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``)
  1409  //	    win_length (int, optional): the size of window frame and STFT filter.
  1410  //	        Default: ``None``  (treated as equal to :attr:`n_fft`)
  1411  //	    window (Tensor, optional): the optional window function.
  1412  //	        Shape must be 1d and `<= n_fft`
  1413  //	        Default: ``None`` (treated as window of all :math:`1` s)
  1414  //	    center (bool, optional): whether to pad :attr:`input` on both sides so
  1415  //	        that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
  1416  //	        Default: ``True``
  1417  //	    pad_mode (str, optional): controls the padding method used when
  1418  //	        :attr:`center` is ``True``. Default: ``"reflect"``
  1419  //	    normalized (bool, optional): controls whether to return the normalized STFT results
  1420  //	         Default: ``False``
  1421  //	    onesided (bool, optional): controls whether to return half of results to
  1422  //	        avoid redundancy for real inputs.
  1423  //	        Default: ``True`` for real :attr:`input` and :attr:`window`, ``False`` otherwise.
  1424  //	    return_complex (bool, optional): whether to return a complex tensor, or
  1425  //	        a real tensor with an extra last dimension for the real and
  1426  //	        imaginary components.
  1427  //
  1428  //	        .. versionchanged:: 2.0
  1429  //	           ``return_complex`` is now a required argument for real inputs,
  1430  //	           as the default is being transitioned to ``True``.
  1431  //
  1432  //	        .. deprecated:: 2.0
  1433  //	           ``return_complex=False`` is deprecated, instead use ``return_complex=True``
  1434  //	           Note that calling :func:`torch.view_as_real` on the output will
  1435  //	           recover the deprecated output format.
  1436  //
  1437  //	Returns:
  1438  //	    Tensor: A tensor containing the STFT result with shape `(B?, N, T, C?)` where
  1439  //	       - `B?` is an optional batch dimnsion from the input
  1440  //	       - `N` is the number of frequency samples, `(n_fft // 2) + 1` for
  1441  //	         `onesided=True`, or otherwise `n_fft`.
  1442  //	       - `T` is the number of frames, `1 + L // hop_length`
  1443  //	         for `center=True`, or `1 + (L - n_fft) // hop_length` otherwise.
  1444  //	       - `C?` is an optional length-2 dimension of real and imaginary
  1445  //	         components, present when `return_complex=False`.
  1446  //
  1447  //go:linkname Stft py.stft
  1448  func Stft(input *py.Object, nFft *py.Object, hopLength *py.Object, winLength *py.Object, window *py.Object, center *py.Object, padMode *py.Object, normalized *py.Object, onesided *py.Object, returnComplex *py.Object) *py.Object
  1449  
  1450  // Returns a contraction of a and b over multiple dimensions.
  1451  //
  1452  //	:attr:`tensordot` implements a generalized matrix product.
  1453  //
  1454  //	Args:
  1455  //	  a (Tensor): Left tensor to contract
  1456  //	  b (Tensor): Right tensor to contract
  1457  //	  dims (int or Tuple[List[int], List[int]] or List[List[int]] containing two lists or Tensor): number of dimensions to
  1458  //	     contract or explicit lists of dimensions for :attr:`a` and
  1459  //	     :attr:`b` respectively
  1460  //
  1461  //	When called with a non-negative integer argument :attr:`dims` = :math:`d`, and
  1462  //	the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`,
  1463  //	respectively, :func:`~torch.tensordot` computes
  1464  //
  1465  //	.. math::
  1466  //	    r_{i_0,...,i_{m-d}, i_d,...,i_n}
  1467  //	      = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}.
  1468  //
  1469  //	When called with :attr:`dims` of the list form, the given dimensions will be contracted
  1470  //	in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes
  1471  //	in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted
  1472  //	dimensions.
  1473  //
  1474  //	Examples::
  1475  //
  1476  //	    >>> a = torch.arange(60.).reshape(3, 4, 5)
  1477  //	    >>> b = torch.arange(24.).reshape(4, 3, 2)
  1478  //	    >>> torch.tensordot(a, b, dims=([1, 0], [0, 1]))
  1479  //	    tensor([[4400., 4730.],
  1480  //	            [4532., 4874.],
  1481  //	            [4664., 5018.],
  1482  //	            [4796., 5162.],
  1483  //	            [4928., 5306.]])
  1484  //
  1485  //	    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
  1486  //	    >>> a = torch.randn(3, 4, 5, device='cuda')
  1487  //	    >>> b = torch.randn(4, 5, 6, device='cuda')
  1488  //	    >>> c = torch.tensordot(a, b, dims=2).cpu()
  1489  //	    tensor([[ 8.3504, -2.5436,  6.2922,  2.7556, -1.0732,  3.2741],
  1490  //	            [ 3.3161,  0.0704,  5.0187, -0.4079, -4.3126,  4.8744],
  1491  //	            [ 0.8223,  3.9445,  3.2168, -0.2400,  3.4117,  1.7780]])
  1492  //
  1493  //	    >>> a = torch.randn(3, 5, 4, 6)
  1494  //	    >>> b = torch.randn(6, 4, 5, 3)
  1495  //	    >>> torch.tensordot(a, b, dims=([2, 1, 3], [1, 2, 0]))
  1496  //	    tensor([[  7.7193,  -2.4867, -10.3204],
  1497  //	            [  1.5513, -14.4737,  -6.5113],
  1498  //	            [ -0.2850,   4.2573,  -3.5997]])
  1499  //
  1500  //go:linkname Tensordot py.tensordot
  1501  func Tensordot(a *py.Object, b *py.Object, dims *py.Object, out *py.Object) *py.Object
  1502  
  1503  // Eliminates all but the first element from every consecutive group of equivalent elements.
  1504  //
  1505  //	.. note:: This function is different from :func:`torch.unique` in the sense that this function
  1506  //	    only eliminates consecutive duplicate values. This semantics is similar to `std::unique`
  1507  //	    in C++.
  1508  //
  1509  //	Args:
  1510  //	    input (Tensor): the input tensor
  1511  //	    return_inverse (bool): Whether to also return the indices for where
  1512  //	        elements in the original input ended up in the returned unique list.
  1513  //	    return_counts (bool): Whether to also return the counts for each unique
  1514  //	        element.
  1515  //	    dim (int): the dimension to apply unique. If ``None``, the unique of the
  1516  //	        flattened input is returned. default: ``None``
  1517  //
  1518  //	Returns:
  1519  //	    (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
  1520  //
  1521  //	        - **output** (*Tensor*): the output list of unique scalar elements.
  1522  //	        - **inverse_indices** (*Tensor*): (optional) if
  1523  //	          :attr:`return_inverse` is True, there will be an additional
  1524  //	          returned tensor (same shape as input) representing the indices
  1525  //	          for where elements in the original input map to in the output;
  1526  //	          otherwise, this function will only return a single tensor.
  1527  //	        - **counts** (*Tensor*): (optional) if
  1528  //	          :attr:`return_counts` is True, there will be an additional
  1529  //	          returned tensor (same shape as output or output.size(dim),
  1530  //	          if dim was specified) representing the number of occurrences
  1531  //	          for each unique value or tensor.
  1532  //
  1533  //	Example::
  1534  //
  1535  //	    >>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2])
  1536  //	    >>> output = torch.unique_consecutive(x)
  1537  //	    >>> output
  1538  //	    tensor([1, 2, 3, 1, 2])
  1539  //
  1540  //	    >>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True)
  1541  //	    >>> output
  1542  //	    tensor([1, 2, 3, 1, 2])
  1543  //	    >>> inverse_indices
  1544  //	    tensor([0, 0, 1, 1, 2, 3, 3, 4])
  1545  //
  1546  //	    >>> output, counts = torch.unique_consecutive(x, return_counts=True)
  1547  //	    >>> output
  1548  //	    tensor([1, 2, 3, 1, 2])
  1549  //	    >>> counts
  1550  //	    tensor([2, 2, 1, 2, 1])
  1551  //
  1552  //go:linkname UniqueConsecutive py.unique_consecutive
  1553  func UniqueConsecutive(__llgo_va_list ...interface{}) *py.Object
  1554  
  1555  // broadcast_shapes(*shapes) -> Size
  1556  //
  1557  //	Similar to :func:`broadcast_tensors` but for shapes.
  1558  //
  1559  //	This is equivalent to
  1560  //	``torch.broadcast_tensors(*map(torch.empty, shapes))[0].shape``
  1561  //	but avoids the need create to intermediate tensors. This is useful for
  1562  //	broadcasting tensors of common batch shape but different rightmost shape,
  1563  //	e.g. to broadcast mean vectors with covariance matrices.
  1564  //
  1565  //	Example::
  1566  //
  1567  //	    >>> torch.broadcast_shapes((2,), (3, 1), (1, 1, 1))
  1568  //	    torch.Size([1, 3, 2])
  1569  //
  1570  //	Args:
  1571  //	    \*shapes (torch.Size): Shapes of tensors.
  1572  //
  1573  //	Returns:
  1574  //	    shape (torch.Size): A shape compatible with all input shapes.
  1575  //
  1576  //	Raises:
  1577  //	    RuntimeError: If shapes are incompatible.
  1578  //
  1579  //go:linkname BroadcastShapes py.broadcast_shapes
  1580  func BroadcastShapes(__llgo_va_list ...interface{}) *py.Object
  1581  
  1582  // Computes the LU factorization of a matrix or batches of matrices
  1583  //
  1584  //	:attr:`A`. Returns a tuple containing the LU factorization and
  1585  //	pivots of :attr:`A`.  Pivoting is done if :attr:`pivot` is set to
  1586  //	``True``.
  1587  //
  1588  //	.. warning::
  1589  //
  1590  //	    :func:`torch.lu` is deprecated in favor of :func:`torch.linalg.lu_factor`
  1591  //	    and :func:`torch.linalg.lu_factor_ex`. :func:`torch.lu` will be removed in a
  1592  //	    future PyTorch release.
  1593  //	    ``LU, pivots, info = torch.lu(A, compute_pivots)`` should be replaced with
  1594  //
  1595  //	    .. code:: python
  1596  //
  1597  //	        LU, pivots = torch.linalg.lu_factor(A, compute_pivots)
  1598  //
  1599  //	    ``LU, pivots, info = torch.lu(A, compute_pivots, get_infos=True)`` should be replaced with
  1600  //
  1601  //	    .. code:: python
  1602  //
  1603  //	        LU, pivots, info = torch.linalg.lu_factor_ex(A, compute_pivots)
  1604  //
  1605  //	.. note::
  1606  //	    * The returned permutation matrix for every matrix in the batch is
  1607  //	      represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``.
  1608  //	      ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm,
  1609  //	      the ``i``-th row was permuted with the ``j-1``-th row.
  1610  //	    * LU factorization with :attr:`pivot` = ``False`` is not available
  1611  //	      for CPU, and attempting to do so will throw an error. However,
  1612  //	      LU factorization with :attr:`pivot` = ``False`` is available for
  1613  //	      CUDA.
  1614  //	    * This function does not check if the factorization was successful
  1615  //	      or not if :attr:`get_infos` is ``True`` since the status of the
  1616  //	      factorization is present in the third element of the return tuple.
  1617  //	    * In the case of batches of square matrices with size less or equal
  1618  //	      to 32 on a CUDA device, the LU factorization is repeated for
  1619  //	      singular matrices due to the bug in the MAGMA library
  1620  //	      (see magma issue 13).
  1621  //	    * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`.
  1622  //
  1623  //	.. warning::
  1624  //	    The gradients of this function will only be finite when :attr:`A` is full rank.
  1625  //	    This is because the LU decomposition is just differentiable at full rank matrices.
  1626  //	    Furthermore, if :attr:`A` is close to not being full rank,
  1627  //	    the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`.
  1628  //
  1629  //	Args:
  1630  //	    A (Tensor): the tensor to factor of size :math:`(*, m, n)`
  1631  //	    pivot (bool, optional): controls whether pivoting is done. Default: ``True``
  1632  //	    get_infos (bool, optional): if set to ``True``, returns an info IntTensor.
  1633  //	                                Default: ``False``
  1634  //	    out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``,
  1635  //	                           then the elements in the tuple are Tensor, IntTensor,
  1636  //	                           and IntTensor. If :attr:`get_infos` is ``False``, then the
  1637  //	                           elements in the tuple are Tensor, IntTensor. Default: ``None``
  1638  //
  1639  //	Returns:
  1640  //	    (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing
  1641  //
  1642  //	        - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)`
  1643  //
  1644  //	        - **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`.
  1645  //	          ``pivots`` stores all the intermediate transpositions of rows.
  1646  //	          The final permutation ``perm`` could be reconstructed by
  1647  //	          applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``,
  1648  //	          where ``perm`` is initially the identity permutation of :math:`m` elements
  1649  //	          (essentially this is what :func:`torch.lu_unpack` is doing).
  1650  //
  1651  //	        - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of
  1652  //	          size :math:`(*)` where non-zero values indicate whether factorization for the matrix or
  1653  //	          each minibatch has succeeded or failed
  1654  //
  1655  //	Example::
  1656  //
  1657  //	    >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
  1658  //	    >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  1659  //	    >>> A = torch.randn(2, 3, 3)
  1660  //	    >>> A_LU, pivots = torch.lu(A)
  1661  //	    >>> A_LU
  1662  //	    tensor([[[ 1.3506,  2.5558, -0.0816],
  1663  //	             [ 0.1684,  1.1551,  0.1940],
  1664  //	             [ 0.1193,  0.6189, -0.5497]],
  1665  //
  1666  //	            [[ 0.4526,  1.2526, -0.3285],
  1667  //	             [-0.7988,  0.7175, -0.9701],
  1668  //	             [ 0.2634, -0.9255, -0.3459]]])
  1669  //	    >>> pivots
  1670  //	    tensor([[ 3,  3,  3],
  1671  //	            [ 3,  3,  3]], dtype=torch.int32)
  1672  //	    >>> A_LU, pivots, info = torch.lu(A, get_infos=True)
  1673  //	    >>> if info.nonzero().size(0) == 0:
  1674  //	    ...     print('LU factorization succeeded for all samples!')
  1675  //	    LU factorization succeeded for all samples!
  1676  //
  1677  //go:linkname Lu py.lu
  1678  func Lu(__llgo_va_list ...interface{}) *py.Object
  1679  
  1680  // Performs linear Principal Component Analysis (PCA) on a low-rank
  1681  //
  1682  //	matrix, batches of such matrices, or sparse matrix.
  1683  //
  1684  //	This function returns a namedtuple ``(U, S, V)`` which is the
  1685  //	nearly optimal approximation of a singular value decomposition of
  1686  //	a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.
  1687  //
  1688  //	.. note:: The relation of ``(U, S, V)`` to PCA is as follows:
  1689  //
  1690  //	            - :math:`A` is a data matrix with ``m`` samples and
  1691  //	              ``n`` features
  1692  //
  1693  //	            - the :math:`V` columns represent the principal directions
  1694  //
  1695  //	            - :math:`S ** 2 / (m - 1)` contains the eigenvalues of
  1696  //	              :math:`A^T A / (m - 1)` which is the covariance of
  1697  //	              ``A`` when ``center=True`` is provided.
  1698  //
  1699  //	            - ``matmul(A, V[:, :k])`` projects data to the first k
  1700  //	              principal components
  1701  //
  1702  //	.. note:: Different from the standard SVD, the size of returned
  1703  //	          matrices depend on the specified rank and q
  1704  //	          values as follows:
  1705  //
  1706  //	            - :math:`U` is m x q matrix
  1707  //
  1708  //	            - :math:`S` is q-vector
  1709  //
  1710  //	            - :math:`V` is n x q matrix
  1711  //
  1712  //	.. note:: To obtain repeatable results, reset the seed for the
  1713  //	          pseudorandom number generator
  1714  //
  1715  //	Args:
  1716  //
  1717  //	    A (Tensor): the input tensor of size :math:`(*, m, n)`
  1718  //
  1719  //	    q (int, optional): a slightly overestimated rank of
  1720  //	                       :math:`A`. By default, ``q = min(6, m,
  1721  //	                       n)``.
  1722  //
  1723  //	    center (bool, optional): if True, center the input tensor,
  1724  //	                             otherwise, assume that the input is
  1725  //	                             centered.
  1726  //
  1727  //	    niter (int, optional): the number of subspace iterations to
  1728  //	                           conduct; niter must be a nonnegative
  1729  //	                           integer, and defaults to 2.
  1730  //
  1731  //	References::
  1732  //
  1733  //	    - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
  1734  //	      structure with randomness: probabilistic algorithms for
  1735  //	      constructing approximate matrix decompositions,
  1736  //	      arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
  1737  //	      `arXiv <http://arxiv.org/abs/0909.4061>`_).
  1738  //
  1739  //go:linkname PcaLowrank py.pca_lowrank
  1740  func PcaLowrank(A *py.Object, q *py.Object, center *py.Object, niter *py.Object) *py.Object
  1741  
  1742  // Return the singular value decomposition “(U, S, V)“ of a matrix,
  1743  //
  1744  //	batches of matrices, or a sparse matrix :math:`A` such that
  1745  //	:math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then
  1746  //	SVD is computed for the matrix :math:`A - M`.
  1747  //
  1748  //	.. note:: The implementation is based on the Algorithm 5.1 from
  1749  //	          Halko et al, 2009.
  1750  //
  1751  //	.. note:: To obtain repeatable results, reset the seed for the
  1752  //	          pseudorandom number generator
  1753  //
  1754  //	.. note:: The input is assumed to be a low-rank matrix.
  1755  //
  1756  //	.. note:: In general, use the full-rank SVD implementation
  1757  //	          :func:`torch.linalg.svd` for dense matrices due to its 10-fold
  1758  //	          higher performance characteristics. The low-rank SVD
  1759  //	          will be useful for huge sparse matrices that
  1760  //	          :func:`torch.linalg.svd` cannot handle.
  1761  //
  1762  //	Args::
  1763  //	    A (Tensor): the input tensor of size :math:`(*, m, n)`
  1764  //
  1765  //	    q (int, optional): a slightly overestimated rank of A.
  1766  //
  1767  //	    niter (int, optional): the number of subspace iterations to
  1768  //	                           conduct; niter must be a nonnegative
  1769  //	                           integer, and defaults to 2
  1770  //
  1771  //	    M (Tensor, optional): the input tensor's mean of size
  1772  //	                          :math:`(*, 1, n)`.
  1773  //
  1774  //	References::
  1775  //	    - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
  1776  //	      structure with randomness: probabilistic algorithms for
  1777  //	      constructing approximate matrix decompositions,
  1778  //	      arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
  1779  //	      `arXiv <https://arxiv.org/abs/0909.4061>`_).
  1780  //
  1781  //go:linkname SvdLowrank py.svd_lowrank
  1782  func SvdLowrank(A *py.Object, q *py.Object, niter *py.Object, M *py.Object) *py.Object
  1783  
  1784  // unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) -> Tuple[Tensor, Tensor, Tensor]
  1785  //
  1786  //	Returns the unique elements of the input tensor.
  1787  //
  1788  //	.. note:: This function is different from :func:`torch.unique_consecutive` in the sense that
  1789  //	    this function also eliminates non-consecutive duplicate values.
  1790  //
  1791  //	.. note:: Currently in the CUDA implementation and the CPU implementation,
  1792  //	    `torch.unique` always sort the tensor at the beginning regardless of the `sort` argument.
  1793  //	    Sorting could be slow, so if your input tensor is already sorted, it is recommended to use
  1794  //	    :func:`torch.unique_consecutive` which avoids the sorting.
  1795  //
  1796  //	Args:
  1797  //	    input (Tensor): the input tensor
  1798  //	    sorted (bool): Whether to sort the unique elements in ascending order
  1799  //	        before returning as output.
  1800  //	    return_inverse (bool): Whether to also return the indices for where
  1801  //	        elements in the original input ended up in the returned unique list.
  1802  //	    return_counts (bool): Whether to also return the counts for each unique
  1803  //	        element.
  1804  //	    dim (int, optional): the dimension to operate upon. If ``None``, the
  1805  //	        unique of the flattened input is returned. Otherwise, each of the
  1806  //	        tensors indexed by the given dimension is treated as one of the
  1807  //	        elements to apply the unique operation upon. See examples for more
  1808  //	        details. Default: ``None``
  1809  //
  1810  //	Returns:
  1811  //	    (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing
  1812  //
  1813  //	        - **output** (*Tensor*): the output list of unique scalar elements.
  1814  //	        - **inverse_indices** (*Tensor*): (optional) if
  1815  //	          :attr:`return_inverse` is True, there will be an additional
  1816  //	          returned tensor (same shape as input) representing the indices
  1817  //	          for where elements in the original input map to in the output;
  1818  //	          otherwise, this function will only return a single tensor.
  1819  //	        - **counts** (*Tensor*): (optional) if
  1820  //	          :attr:`return_counts` is True, there will be an additional
  1821  //	          returned tensor (same shape as output or output.size(dim),
  1822  //	          if dim was specified) representing the number of occurrences
  1823  //	          for each unique value or tensor.
  1824  //
  1825  //	Example::
  1826  //
  1827  //	    >>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long))
  1828  //	    >>> output
  1829  //	    tensor([1, 2, 3])
  1830  //
  1831  //	    >>> output, inverse_indices = torch.unique(
  1832  //	    ...     torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True)
  1833  //	    >>> output
  1834  //	    tensor([1, 2, 3])
  1835  //	    >>> inverse_indices
  1836  //	    tensor([0, 2, 1, 2])
  1837  //
  1838  //	    >>> output, inverse_indices = torch.unique(
  1839  //	    ...     torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True)
  1840  //	    >>> output
  1841  //	    tensor([1, 2, 3])
  1842  //	    >>> inverse_indices
  1843  //	    tensor([[0, 2],
  1844  //	            [1, 2]])
  1845  //
  1846  //	    >>> a = torch.tensor([
  1847  //	    ...     [
  1848  //	    ...         [1, 1, 0, 0],
  1849  //	    ...         [1, 1, 0, 0],
  1850  //	    ...         [0, 0, 1, 1],
  1851  //	    ...     ],
  1852  //	    ...     [
  1853  //	    ...         [0, 0, 1, 1],
  1854  //	    ...         [0, 0, 1, 1],
  1855  //	    ...         [1, 1, 1, 1],
  1856  //	    ...     ],
  1857  //	    ...     [
  1858  //	    ...         [1, 1, 0, 0],
  1859  //	    ...         [1, 1, 0, 0],
  1860  //	    ...         [0, 0, 1, 1],
  1861  //	    ...     ],
  1862  //	    ... ])
  1863  //
  1864  //	    >>> # If we call `torch.unique(a, dim=0)`, each of the tensors `a[idx, :, :]`
  1865  //	    >>> # will be compared. We can see that `a[0, :, :]` and `a[2, :, :]` match
  1866  //	    >>> # each other, so one of them will be removed.
  1867  //	    >>> (a[0, :, :] == a[2, :, :]).all()
  1868  //	    tensor(True)
  1869  //	    >>> a_unique_dim0 = torch.unique(a, dim=0)
  1870  //	    >>> a_unique_dim0
  1871  //	    tensor([[[0, 0, 1, 1],
  1872  //	             [0, 0, 1, 1],
  1873  //	             [1, 1, 1, 1]],
  1874  //	            [[1, 1, 0, 0],
  1875  //	             [1, 1, 0, 0],
  1876  //	             [0, 0, 1, 1]]])
  1877  //
  1878  //	    >>> # Notice which sub-tensors from `a` match with the sub-tensors from
  1879  //	    >>> # `a_unique_dim0`:
  1880  //	    >>> (a_unique_dim0[0, :, :] == a[1, :, :]).all()
  1881  //	    tensor(True)
  1882  //	    >>> (a_unique_dim0[1, :, :] == a[0, :, :]).all()
  1883  //	    tensor(True)
  1884  //
  1885  //	    >>> # For `torch.unique(a, dim=1)`, each of the tensors `a[:, idx, :]` are
  1886  //	    >>> # compared. `a[:, 0, :]` and `a[:, 1, :]` match each other, so one of
  1887  //	    >>> # them will be removed.
  1888  //	    >>> (a[:, 0, :] == a[:, 1, :]).all()
  1889  //	    tensor(True)
  1890  //	    >>> torch.unique(a, dim=1)
  1891  //	    tensor([[[0, 0, 1, 1],
  1892  //	             [1, 1, 0, 0]],
  1893  //	            [[1, 1, 1, 1],
  1894  //	             [0, 0, 1, 1]],
  1895  //	            [[0, 0, 1, 1],
  1896  //	             [1, 1, 0, 0]]])
  1897  //
  1898  //	    >>> # For `torch.unique(a, dim=2)`, the tensors `a[:, :, idx]` are compared.
  1899  //	    >>> # `a[:, :, 0]` and `a[:, :, 1]` match each other. Also, `a[:, :, 2]` and
  1900  //	    >>> # `a[:, :, 3]` match each other as well. So in this case, two of the
  1901  //	    >>> # sub-tensors will be removed.
  1902  //	    >>> (a[:, :, 0] == a[:, :, 1]).all()
  1903  //	    tensor(True)
  1904  //	    >>> (a[:, :, 2] == a[:, :, 3]).all()
  1905  //	    tensor(True)
  1906  //	    >>> torch.unique(a, dim=2)
  1907  //	    tensor([[[0, 1],
  1908  //	             [0, 1],
  1909  //	             [1, 0]],
  1910  //	            [[1, 0],
  1911  //	             [1, 0],
  1912  //	             [1, 1]],
  1913  //	            [[0, 1],
  1914  //	             [0, 1],
  1915  //	             [1, 0]]])
  1916  //
  1917  //go:linkname Unique py.unique
  1918  func Unique(__llgo_va_list ...interface{}) *py.Object
  1919  
  1920  // Converts a tensor of flat indices into a tuple of coordinate tensors that
  1921  //
  1922  //	index into an arbitrary tensor of the specified shape.
  1923  //
  1924  //	Args:
  1925  //	    indices (Tensor): An integer tensor containing indices into the
  1926  //	        flattened version of an arbitrary tensor of shape :attr:`shape`.
  1927  //	        All elements must be in the range ``[0, prod(shape) - 1]``.
  1928  //
  1929  //	    shape (int, sequence of ints, or torch.Size): The shape of the arbitrary
  1930  //	        tensor. All elements must be non-negative.
  1931  //
  1932  //	Returns:
  1933  //	    tuple of Tensors: Each ``i``-th tensor in the ouput corresponds with
  1934  //	    dimension ``i`` of :attr:`shape`. Each tensor has the same shape as
  1935  //	    ``indices`` and contains one index into dimension ``i`` for each of the
  1936  //	    flat indices given by ``indices``.
  1937  //
  1938  //	Example::
  1939  //
  1940  //	    >>> import torch
  1941  //	    >>> torch.unravel_index(torch.tensor(4), (3, 2))
  1942  //	    (tensor(2),
  1943  //	     tensor(0))
  1944  //
  1945  //	    >>> torch.unravel_index(torch.tensor([4, 1]), (3, 2))
  1946  //	    (tensor([2, 0]),
  1947  //	     tensor([0, 1]))
  1948  //
  1949  //	    >>> torch.unravel_index(torch.tensor([0, 1, 2, 3, 4, 5]), (3, 2))
  1950  //	    (tensor([0, 0, 1, 1, 2, 2]),
  1951  //	     tensor([0, 1, 0, 1, 0, 1]))
  1952  //
  1953  //	    >>> torch.unravel_index(torch.tensor([1234, 5678]), (10, 10, 10, 10))
  1954  //	    (tensor([1, 5]),
  1955  //	     tensor([2, 6]),
  1956  //	     tensor([3, 7]),
  1957  //	     tensor([4, 8]))
  1958  //
  1959  //	    >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (10, 10, 10, 10))
  1960  //	    (tensor([[1], [5]]),
  1961  //	     tensor([[2], [6]]),
  1962  //	     tensor([[3], [7]]),
  1963  //	     tensor([[4], [8]]))
  1964  //
  1965  //	    >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (100, 100))
  1966  //	    (tensor([[12], [56]]),
  1967  //	     tensor([[34], [78]]))
  1968  //
  1969  //go:linkname UnravelIndex py.unravel_index
  1970  func UnravelIndex(indices *py.Object, shape *py.Object) *py.Object
  1971  
  1972  // Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1
  1973  //
  1974  //go:linkname CompiledWithCxx11Abi py.compiled_with_cxx11_abi
  1975  func CompiledWithCxx11Abi() *py.Object
  1976  
  1977  // Find the k largest (or smallest) eigenvalues and the corresponding
  1978  //
  1979  //	eigenvectors of a symmetric positive definite generalized
  1980  //	eigenvalue problem using matrix-free LOBPCG methods.
  1981  //
  1982  //	This function is a front-end to the following LOBPCG algorithms
  1983  //	selectable via `method` argument:
  1984  //
  1985  //	  `method="basic"` - the LOBPCG method introduced by Andrew
  1986  //	  Knyazev, see [Knyazev2001]. A less robust method, may fail when
  1987  //	  Cholesky is applied to singular input.
  1988  //
  1989  //	  `method="ortho"` - the LOBPCG method with orthogonal basis
  1990  //	  selection [StathopoulosEtal2002]. A robust method.
  1991  //
  1992  //	Supported inputs are dense, sparse, and batches of dense matrices.
  1993  //
  1994  //	.. note:: In general, the basic method spends least time per
  1995  //	  iteration. However, the robust methods converge much faster and
  1996  //	  are more stable. So, the usage of the basic method is generally
  1997  //	  not recommended but there exist cases where the usage of the
  1998  //	  basic method may be preferred.
  1999  //
  2000  //	.. warning:: The backward method does not support sparse and complex inputs.
  2001  //	  It works only when `B` is not provided (i.e. `B == None`).
  2002  //	  We are actively working on extensions, and the details of
  2003  //	  the algorithms are going to be published promptly.
  2004  //
  2005  //	.. warning:: While it is assumed that `A` is symmetric, `A.grad` is not.
  2006  //	  To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric
  2007  //	  in first-order optimization routines, prior to running `lobpcg`
  2008  //	  we do the following symmetrization map: `A -> (A + A.t()) / 2`.
  2009  //	  The map is performed only when the `A` requires gradients.
  2010  //
  2011  //	Args:
  2012  //
  2013  //	  A (Tensor): the input tensor of size :math:`(*, m, m)`
  2014  //
  2015  //	  B (Tensor, optional): the input tensor of size :math:`(*, m,
  2016  //	              m)`. When not specified, `B` is interpreted as
  2017  //	              identity matrix.
  2018  //
  2019  //	  X (tensor, optional): the input tensor of size :math:`(*, m, n)`
  2020  //	              where `k <= n <= m`. When specified, it is used as
  2021  //	              initial approximation of eigenvectors. X must be a
  2022  //	              dense tensor.
  2023  //
  2024  //	  iK (tensor, optional): the input tensor of size :math:`(*, m,
  2025  //	              m)`. When specified, it will be used as preconditioner.
  2026  //
  2027  //	  k (integer, optional): the number of requested
  2028  //	              eigenpairs. Default is the number of :math:`X`
  2029  //	              columns (when specified) or `1`.
  2030  //
  2031  //	  n (integer, optional): if :math:`X` is not specified then `n`
  2032  //	              specifies the size of the generated random
  2033  //	              approximation of eigenvectors. Default value for `n`
  2034  //	              is `k`. If :math:`X` is specified, the value of `n`
  2035  //	              (when specified) must be the number of :math:`X`
  2036  //	              columns.
  2037  //
  2038  //	  tol (float, optional): residual tolerance for stopping
  2039  //	             criterion. Default is `feps ** 0.5` where `feps` is
  2040  //	             smallest non-zero floating-point number of the given
  2041  //	             input tensor `A` data type.
  2042  //
  2043  //	  largest (bool, optional): when True, solve the eigenproblem for
  2044  //	             the largest eigenvalues. Otherwise, solve the
  2045  //	             eigenproblem for smallest eigenvalues. Default is
  2046  //	             `True`.
  2047  //
  2048  //	  method (str, optional): select LOBPCG method. See the
  2049  //	             description of the function above. Default is
  2050  //	             "ortho".
  2051  //
  2052  //	  niter (int, optional): maximum number of iterations. When
  2053  //	             reached, the iteration process is hard-stopped and
  2054  //	             the current approximation of eigenpairs is returned.
  2055  //	             For infinite iteration but until convergence criteria
  2056  //	             is met, use `-1`.
  2057  //
  2058  //	  tracker (callable, optional) : a function for tracing the
  2059  //	             iteration process. When specified, it is called at
  2060  //	             each iteration step with LOBPCG instance as an
  2061  //	             argument. The LOBPCG instance holds the full state of
  2062  //	             the iteration process in the following attributes:
  2063  //
  2064  //	               `iparams`, `fparams`, `bparams` - dictionaries of
  2065  //	               integer, float, and boolean valued input
  2066  //	               parameters, respectively
  2067  //
  2068  //	               `ivars`, `fvars`, `bvars`, `tvars` - dictionaries
  2069  //	               of integer, float, boolean, and Tensor valued
  2070  //	               iteration variables, respectively.
  2071  //
  2072  //	               `A`, `B`, `iK` - input Tensor arguments.
  2073  //
  2074  //	               `E`, `X`, `S`, `R` - iteration Tensor variables.
  2075  //
  2076  //	             For instance:
  2077  //
  2078  //	               `ivars["istep"]` - the current iteration step
  2079  //	               `X` - the current approximation of eigenvectors
  2080  //	               `E` - the current approximation of eigenvalues
  2081  //	               `R` - the current residual
  2082  //	               `ivars["converged_count"]` - the current number of converged eigenpairs
  2083  //	               `tvars["rerr"]` - the current state of convergence criteria
  2084  //
  2085  //	             Note that when `tracker` stores Tensor objects from
  2086  //	             the LOBPCG instance, it must make copies of these.
  2087  //
  2088  //	             If `tracker` sets `bvars["force_stop"] = True`, the
  2089  //	             iteration process will be hard-stopped.
  2090  //
  2091  //	  ortho_iparams, ortho_fparams, ortho_bparams (dict, optional):
  2092  //	             various parameters to LOBPCG algorithm when using
  2093  //	             `method="ortho"`.
  2094  //
  2095  //	Returns:
  2096  //
  2097  //	  E (Tensor): tensor of eigenvalues of size :math:`(*, k)`
  2098  //
  2099  //	  X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)`
  2100  //
  2101  //	References:
  2102  //
  2103  //	  [Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal
  2104  //	  Preconditioned Eigensolver: Locally Optimal Block Preconditioned
  2105  //	  Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2),
  2106  //	  517-541. (25 pages)
  2107  //	  https://epubs.siam.org/doi/abs/10.1137/S1064827500366124
  2108  //
  2109  //	  [StathopoulosEtal2002] Andreas Stathopoulos and Kesheng
  2110  //	  Wu. (2002) A Block Orthogonalization Procedure with Constant
  2111  //	  Synchronization Requirements. SIAM J. Sci. Comput., 23(6),
  2112  //	  2165-2182. (18 pages)
  2113  //	  https://epubs.siam.org/doi/10.1137/S1064827500370883
  2114  //
  2115  //	  [DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming
  2116  //	  Gu. (2018) A Robust and Efficient Implementation of LOBPCG.
  2117  //	  SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages)
  2118  //	  https://epubs.siam.org/doi/abs/10.1137/17M1129830
  2119  //
  2120  //go:linkname Lobpcg py.lobpcg
  2121  func Lobpcg(A *py.Object, k *py.Object, B *py.Object, X *py.Object, n *py.Object, iK *py.Object, niter *py.Object, tol *py.Object, largest *py.Object, method *py.Object, tracker *py.Object, orthoIparams *py.Object, orthoFparams *py.Object, orthoBparams *py.Object) *py.Object
  2122  
  2123  // from_dlpack(ext_tensor) -> Tensor
  2124  //
  2125  //	Converts a tensor from an external library into a ``torch.Tensor``.
  2126  //
  2127  //	The returned PyTorch tensor will share the memory with the input tensor
  2128  //	(which may have come from another library). Note that in-place operations
  2129  //	will therefore also affect the data of the input tensor. This may lead to
  2130  //	unexpected issues (e.g., other libraries may have read-only flags or
  2131  //	immutable data structures), so the user should only do this if they know
  2132  //	for sure that this is fine.
  2133  //
  2134  //	Args:
  2135  //	    ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
  2136  //	        The tensor or DLPack capsule to convert.
  2137  //
  2138  //	        If ``ext_tensor`` is a tensor (or ndarray) object, it must support
  2139  //	        the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
  2140  //	        method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
  2141  //	        an opaque ``PyCapsule`` instance, typically produced by a
  2142  //	        ``to_dlpack`` function or method.
  2143  //
  2144  //	Examples::
  2145  //
  2146  //	    >>> import torch.utils.dlpack
  2147  //	    >>> t = torch.arange(4)
  2148  //
  2149  //	    # Convert a tensor directly (supported in PyTorch >= 1.10)
  2150  //	    >>> t2 = torch.from_dlpack(t)
  2151  //	    >>> t2[:2] = -1  # show that memory is shared
  2152  //	    >>> t2
  2153  //	    tensor([-1, -1,  2,  3])
  2154  //	    >>> t
  2155  //	    tensor([-1, -1,  2,  3])
  2156  //
  2157  //	    # The old-style DLPack usage, with an intermediate capsule object
  2158  //	    >>> capsule = torch.utils.dlpack.to_dlpack(t)
  2159  //	    >>> capsule
  2160  //	    <capsule object "dltensor" at ...>
  2161  //	    >>> t3 = torch.from_dlpack(capsule)
  2162  //	    >>> t3
  2163  //	    tensor([-1, -1,  2,  3])
  2164  //	    >>> t3[0] = -9  # now we're sharing memory between 3 tensors
  2165  //	    >>> t3
  2166  //	    tensor([-9, -1,  2,  3])
  2167  //	    >>> t2
  2168  //	    tensor([-9, -1,  2,  3])
  2169  //	    >>> t
  2170  //	    tensor([-9, -1,  2,  3])
  2171  //
  2172  //go:linkname FromDlpack py.from_dlpack
  2173  func FromDlpack(extTensor *py.Object) *py.Object
  2174  
  2175  // Optimizes given model/function using TorchDynamo and specified backend.
  2176  //
  2177  // Concretely, for every frame executed within the compiled region, we will attempt
  2178  // to compile it and cache the compiled result on the code object for future
  2179  // use.  A single frame may be compiled multiple times if previous compiled
  2180  // results are not applicable for subsequent calls (this is called a "guard
  2181  // failure), you can use TORCH_LOGS=guards to debug these situations.
  2182  // Multiple compiled results can be associated with a frame up to
  2183  // “torch._dynamo.config.cache_size_limit“, which defaults to 64; at which
  2184  // point we will fall back to eager.  Note that compile caches are per
  2185  // *code object*, not frame; if you dynamically create multiple copies of a
  2186  // function, they will all share the same code cache.
  2187  //
  2188  // Args:
  2189  //
  2190  //	model (Callable): Module/function to optimize
  2191  //	fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions
  2192  //	 in the function that it will optimize. If True, then we require that the entire function be
  2193  //	 capturable into a single graph. If this is not possible (that is, if there are graph breaks),
  2194  //	 then this will raise an error.
  2195  //	dynamic (bool or None): Use dynamic shape tracing.  When this is True, we will up-front attempt
  2196  //	 to generate a kernel that is as dynamic as possible to avoid recompilations when
  2197  //	 sizes change.  This may not always work as some operations/optimizations will
  2198  //	 force specialization; use TORCH_LOGS=dynamic to debug overspecialization.
  2199  //	 When this is False, we will NEVER generate dynamic kernels, we will always specialize.
  2200  //	 By default (None), we automatically detect if dynamism has occurred and compile a more
  2201  //	 dynamic kernel upon recompile.
  2202  //	backend (str or Callable): backend to be used
  2203  //
  2204  //	 - "inductor" is the default backend, which is a good balance between performance and overhead
  2205  //
  2206  //	 - Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()`
  2207  //
  2208  //	 - Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)`
  2209  //
  2210  //	 - To register an out-of-tree custom backend: https://pytorch.org/docs/main/compile/custom-backends.html
  2211  //	mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs"
  2212  //
  2213  //	 - "default" is the default mode, which is a good balance between performance and overhead
  2214  //
  2215  //	 - "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs,
  2216  //	   useful for small batches.  Reduction of overhead can come at the cost of more memory
  2217  //	   usage, as we will cache the workspace memory required for the invocation so that we
  2218  //	   do not have to reallocate it on subsequent runs.  Reduction of overhead is not guaranteed
  2219  //	   to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs.
  2220  //	   There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints
  2221  //	   to debug.
  2222  //
  2223  //	 - "max-autotune" is a mode that leverages Triton based matrix multiplications and convolutions
  2224  //	   It enables CUDA graphs by default.
  2225  //
  2226  //	 - "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs
  2227  //
  2228  //	 - To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()`
  2229  //
  2230  //	options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are
  2231  //
  2232  //	 - `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set
  2233  //
  2234  //	 - `max_autotune` which will profile to pick the best matmul configuration
  2235  //
  2236  //	 - `fallback_random` which is useful when debugging accuracy issues
  2237  //
  2238  //	 - `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores
  2239  //
  2240  //	 - `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs
  2241  //
  2242  //	 - `trace.enabled` which is the most useful debugging flag to turn on
  2243  //
  2244  //	 - `trace.graph_diagram` which will show you a picture of your graph after fusion
  2245  //
  2246  //	 - For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()`
  2247  //	disable (bool): Turn torch.compile() into a no-op for testing
  2248  //
  2249  // Example::
  2250  //
  2251  //	@torch.compile(options={"triton.cudagraphs": True}, fullgraph=True)
  2252  //	def foo(x):
  2253  //	    return torch.sin(x) + torch.cos(x)
  2254  //
  2255  //go:linkname Compile py.compile
  2256  func Compile(model *py.Object) *py.Object
  2257  
  2258  // Conditionally applies `true_fn` or `false_fn`.
  2259  //
  2260  // .. warning::
  2261  //
  2262  //	`torch.cond` is a prototype feature in PyTorch. It has limited support for input and output types and
  2263  //	doesn't support training currently. Please look forward to a more stable implementation in a future version of PyTorch.
  2264  //	Read more about feature classification at: https://pytorch.org/blog/pytorch-feature-classification-changes/#prototype
  2265  //
  2266  // `cond` is structured control flow operator. That is, it is like a Python if-statement,
  2267  // but has restrictions on `true_fn`, `false_fn`, and `operands` that enable it to be
  2268  // capturable using torch.compile and torch.export.
  2269  //
  2270  // Assuming the constraints on `cond`'s arguments are met, `cond` is equivalent to the following::
  2271  //
  2272  //	def cond(pred, true_branch, false_branch, operands):
  2273  //	    if pred:
  2274  //	        return true_branch(*operands)
  2275  //	    else:
  2276  //	        return false_branch(*operands)
  2277  //
  2278  // Args:
  2279  //
  2280  //	pred (Union[bool, torch.Tensor]): A boolean expression or a tensor with one element,
  2281  //	  indicating which branch function to apply.
  2282  //
  2283  //	true_fn (Callable): A callable function (a -> b) that is within the
  2284  //	  scope that is being traced.
  2285  //
  2286  //	false_fn (Callable): A callable function (a -> b) that is within the
  2287  //	  scope that is being traced. The true branch and false branch must
  2288  //	  have consistent input and outputs, meaning the inputs have to be
  2289  //	  the same, and the outputs have to be the same type and shape.
  2290  //
  2291  //	operands (Tuple of possibly nested dict/list/tuple of torch.Tensor): A tuple of inputs to the true/false functions.
  2292  //
  2293  // Example::
  2294  //
  2295  //	def true_fn(x: torch.Tensor):
  2296  //	    return x.cos()
  2297  //	def false_fn(x: torch.Tensor):
  2298  //	    return x.sin()
  2299  //	return cond(x.shape[0] > 4, true_fn, false_fn, (x,))
  2300  //
  2301  // Restrictions:
  2302  //
  2303  //   - The conditional statement (aka `pred`) must meet one of the following constraints:
  2304  //
  2305  //   - It's a `torch.Tensor` with only one element, and torch.bool dtype
  2306  //
  2307  //   - It's a boolean expression, e.g. `x.shape[0] > 10` or `x.dim() > 1 and x.shape[1] > 10`
  2308  //
  2309  //   - The branch function (aka `true_fn`/`false_fn`) must meet all of the following constraints:
  2310  //
  2311  //   - The function signature must match with operands.
  2312  //
  2313  //   - The function must return a tensor with the same metadata, e.g. shape,
  2314  //     dtype, etc.
  2315  //
  2316  //   - The function cannot have in-place mutations on inputs or global variables.
  2317  //     (Note: in-place tensor operations such as `add_` for intermediate results
  2318  //     are allowed in a branch)
  2319  //
  2320  // .. warning::
  2321  //
  2322  //	Temporal Limitations:
  2323  //
  2324  //	- `cond` only supports **inference** right now. Autograd will be supported in the future.
  2325  //
  2326  //	- The **output** of branches must be a **single Tensor**. Pytree of tensors will be supported in the future.
  2327  //
  2328  //go:linkname Cond py.cond
  2329  func Cond(pred *py.Object, trueFn *py.Object, falseFn *py.Object, operands *py.Object) *py.Object
  2330  
  2331  // See https://pytorch.org/docs/stable/generated/torch.get_num_threads.html
  2332  //
  2333  //go:linkname GetNumThreads py.get_num_threads
  2334  func GetNumThreads() *py.Object
  2335  
  2336  // See https://pytorch.org/docs/stable/generated/torch.set_num_threads.html
  2337  //
  2338  //go:linkname SetNumThreads py.set_num_threads
  2339  func SetNumThreads(int *py.Object) *py.Object
  2340  
  2341  // See https://pytorch.org/docs/stable/generated/torch.get_num_interop_threads.html
  2342  //
  2343  //go:linkname GetNumInteropThreads py.get_num_interop_threads
  2344  func GetNumInteropThreads() *py.Object
  2345  
  2346  // See https://pytorch.org/docs/stable/generated/torch.set_num_interop_threads.html
  2347  //
  2348  //go:linkname SetNumInteropThreads py.set_num_interop_threads
  2349  func SetNumInteropThreads(int *py.Object) *py.Object
  2350  
  2351  // See https://pytorch.org/docs/stable/generated/torch.set_flush_denormal.html
  2352  //
  2353  //go:linkname SetFlushDenormal py.set_flush_denormal
  2354  func SetFlushDenormal(mode *py.Object) *py.Object
  2355  
  2356  // See https://pytorch.org/docs/stable/generated/torch.get_default_dtype.html
  2357  //
  2358  //go:linkname GetDefaultDtype py.get_default_dtype
  2359  func GetDefaultDtype() *py.Object
  2360  
  2361  // See https://pytorch.org/docs/stable/generated/torch.is_grad_enabled.html
  2362  //
  2363  //go:linkname IsGradEnabled py.is_grad_enabled
  2364  func IsGradEnabled() *py.Object
  2365  
  2366  // See https://pytorch.org/docs/stable/generated/torch.is_inference_mode_enabled.html
  2367  //
  2368  //go:linkname IsInferenceModeEnabled py.is_inference_mode_enabled
  2369  func IsInferenceModeEnabled() *py.Object
  2370  
  2371  // See https://pytorch.org/docs/stable/generated/torch.sym_ite.html
  2372  //
  2373  //go:linkname SymIte py.sym_ite
  2374  func SymIte(b *py.Object, t *py.Object, f *py.Object) *py.Object
  2375  
  2376  // See https://pytorch.org/docs/stable/generated/torch.abs.html
  2377  //
  2378  //go:linkname Abs py.abs
  2379  func Abs(input *py.Object) *py.Object
  2380  
  2381  // See https://pytorch.org/docs/stable/generated/torch.absolute.html
  2382  //
  2383  //go:linkname Absolute py.absolute
  2384  func Absolute(input *py.Object) *py.Object
  2385  
  2386  // See https://pytorch.org/docs/stable/generated/torch.acos.html
  2387  //
  2388  //go:linkname Acos py.acos
  2389  func Acos(input *py.Object) *py.Object
  2390  
  2391  // See https://pytorch.org/docs/stable/generated/torch.acosh.html
  2392  //
  2393  //go:linkname Acosh py.acosh
  2394  func Acosh(input *py.Object) *py.Object
  2395  
  2396  // See https://pytorch.org/docs/stable/generated/torch.add.html
  2397  //
  2398  //go:linkname Add py.add
  2399  func Add(input *py.Object, other *py.Object) *py.Object
  2400  
  2401  // See https://pytorch.org/docs/stable/generated/torch.addbmm.html
  2402  //
  2403  //go:linkname Addbmm py.addbmm
  2404  func Addbmm(input *py.Object, batch1 *py.Object, batch2 *py.Object) *py.Object
  2405  
  2406  // See https://pytorch.org/docs/stable/generated/torch.addcdiv.html
  2407  //
  2408  //go:linkname Addcdiv py.addcdiv
  2409  func Addcdiv(input *py.Object, tensor1 *py.Object, tensor2 *py.Object) *py.Object
  2410  
  2411  // See https://pytorch.org/docs/stable/generated/torch.addcmul.html
  2412  //
  2413  //go:linkname Addcmul py.addcmul
  2414  func Addcmul(input *py.Object, tensor1 *py.Object, tensor2 *py.Object) *py.Object
  2415  
  2416  // See https://pytorch.org/docs/stable/generated/torch.addmm.html
  2417  //
  2418  //go:linkname Addmm py.addmm
  2419  func Addmm(input *py.Object, mat1 *py.Object, mat2 *py.Object) *py.Object
  2420  
  2421  // See https://pytorch.org/docs/stable/generated/torch.addmv.html
  2422  //
  2423  //go:linkname Addmv py.addmv
  2424  func Addmv(input *py.Object, mat *py.Object, vec *py.Object) *py.Object
  2425  
  2426  // See https://pytorch.org/docs/stable/generated/torch.addr.html
  2427  //
  2428  //go:linkname Addr py.addr
  2429  func Addr(input *py.Object, vec1 *py.Object, vec2 *py.Object) *py.Object
  2430  
  2431  // See https://pytorch.org/docs/stable/generated/torch.adjoint.html
  2432  //
  2433  //go:linkname Adjoint py.adjoint
  2434  func Adjoint(Tensor *py.Object) *py.Object
  2435  
  2436  // See https://pytorch.org/docs/stable/generated/torch.all.html
  2437  //
  2438  //go:linkname All py.all
  2439  func All(input *py.Object) *py.Object
  2440  
  2441  // See https://pytorch.org/docs/stable/generated/torch.allclose.html
  2442  //
  2443  //go:linkname Allclose py.allclose
  2444  func Allclose(input *py.Object, other *py.Object, rtol *py.Object, atol *py.Object, equalNan *py.Object) *py.Object
  2445  
  2446  // See https://pytorch.org/docs/stable/generated/torch.amax.html
  2447  //
  2448  //go:linkname Amax py.amax
  2449  func Amax(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  2450  
  2451  // See https://pytorch.org/docs/stable/generated/torch.amin.html
  2452  //
  2453  //go:linkname Amin py.amin
  2454  func Amin(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  2455  
  2456  // See https://pytorch.org/docs/stable/generated/torch.aminmax.html
  2457  //
  2458  //go:linkname Aminmax py.aminmax
  2459  func Aminmax(input *py.Object) *py.Object
  2460  
  2461  // See https://pytorch.org/docs/stable/generated/torch.angle.html
  2462  //
  2463  //go:linkname Angle py.angle
  2464  func Angle(input *py.Object) *py.Object
  2465  
  2466  // See https://pytorch.org/docs/stable/generated/torch.any.html
  2467  //
  2468  //go:linkname Any py.any
  2469  func Any(input *py.Object) *py.Object
  2470  
  2471  // See https://pytorch.org/docs/stable/generated/torch.arange.html
  2472  //
  2473  //go:linkname Arange py.arange
  2474  func Arange(start *py.Object, end *py.Object, step *py.Object) *py.Object
  2475  
  2476  // See https://pytorch.org/docs/stable/generated/torch.arccos.html
  2477  //
  2478  //go:linkname Arccos py.arccos
  2479  func Arccos(input *py.Object) *py.Object
  2480  
  2481  // See https://pytorch.org/docs/stable/generated/torch.arccosh.html
  2482  //
  2483  //go:linkname Arccosh py.arccosh
  2484  func Arccosh(input *py.Object) *py.Object
  2485  
  2486  // See https://pytorch.org/docs/stable/generated/torch.arcsin.html
  2487  //
  2488  //go:linkname Arcsin py.arcsin
  2489  func Arcsin(input *py.Object) *py.Object
  2490  
  2491  // See https://pytorch.org/docs/stable/generated/torch.arcsinh.html
  2492  //
  2493  //go:linkname Arcsinh py.arcsinh
  2494  func Arcsinh(input *py.Object) *py.Object
  2495  
  2496  // See https://pytorch.org/docs/stable/generated/torch.arctan.html
  2497  //
  2498  //go:linkname Arctan py.arctan
  2499  func Arctan(input *py.Object) *py.Object
  2500  
  2501  // See https://pytorch.org/docs/stable/generated/torch.arctan2.html
  2502  //
  2503  //go:linkname Arctan2 py.arctan2
  2504  func Arctan2(input *py.Object, other *py.Object) *py.Object
  2505  
  2506  // See https://pytorch.org/docs/stable/generated/torch.arctanh.html
  2507  //
  2508  //go:linkname Arctanh py.arctanh
  2509  func Arctanh(input *py.Object) *py.Object
  2510  
  2511  // See https://pytorch.org/docs/stable/generated/torch.argmax.html
  2512  //
  2513  //go:linkname Argmax py.argmax
  2514  func Argmax(input *py.Object) *py.Object
  2515  
  2516  // See https://pytorch.org/docs/stable/generated/torch.argmin.html
  2517  //
  2518  //go:linkname Argmin py.argmin
  2519  func Argmin(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  2520  
  2521  // See https://pytorch.org/docs/stable/generated/torch.argsort.html
  2522  //
  2523  //go:linkname Argsort py.argsort
  2524  func Argsort(input *py.Object, dim *py.Object, descending *py.Object, stable *py.Object) *py.Object
  2525  
  2526  // See https://pytorch.org/docs/stable/generated/torch.argwhere.html
  2527  //
  2528  //go:linkname Argwhere py.argwhere
  2529  func Argwhere(input *py.Object) *py.Object
  2530  
  2531  // See https://pytorch.org/docs/stable/generated/torch.as_strided.html
  2532  //
  2533  //go:linkname AsStrided py.as_strided
  2534  func AsStrided(input *py.Object, size *py.Object, stride *py.Object, storageOffset *py.Object) *py.Object
  2535  
  2536  // See https://pytorch.org/docs/stable/generated/torch.as_tensor.html
  2537  //
  2538  //go:linkname AsTensor py.as_tensor
  2539  func AsTensor(data *py.Object, dtype *py.Object, device *py.Object) *py.Object
  2540  
  2541  // See https://pytorch.org/docs/stable/generated/torch.asarray.html
  2542  //
  2543  //go:linkname Asarray py.asarray
  2544  func Asarray(obj *py.Object) *py.Object
  2545  
  2546  // See https://pytorch.org/docs/stable/generated/torch.asin.html
  2547  //
  2548  //go:linkname Asin py.asin
  2549  func Asin(input *py.Object) *py.Object
  2550  
  2551  // See https://pytorch.org/docs/stable/generated/torch.asinh.html
  2552  //
  2553  //go:linkname Asinh py.asinh
  2554  func Asinh(input *py.Object) *py.Object
  2555  
  2556  // See https://pytorch.org/docs/stable/generated/torch.atan.html
  2557  //
  2558  //go:linkname Atan py.atan
  2559  func Atan(input *py.Object) *py.Object
  2560  
  2561  // See https://pytorch.org/docs/stable/generated/torch.atan2.html
  2562  //
  2563  //go:linkname Atan2 py.atan2
  2564  func Atan2(input *py.Object, other *py.Object) *py.Object
  2565  
  2566  // See https://pytorch.org/docs/stable/generated/torch.atanh.html
  2567  //
  2568  //go:linkname Atanh py.atanh
  2569  func Atanh(input *py.Object) *py.Object
  2570  
  2571  // See https://pytorch.org/docs/stable/generated/torch.baddbmm.html
  2572  //
  2573  //go:linkname Baddbmm py.baddbmm
  2574  func Baddbmm(input *py.Object, batch1 *py.Object, batch2 *py.Object) *py.Object
  2575  
  2576  // See https://pytorch.org/docs/stable/generated/torch.bartlett_window.html
  2577  //
  2578  //go:linkname BartlettWindow py.bartlett_window
  2579  func BartlettWindow(windowLength *py.Object, periodic *py.Object) *py.Object
  2580  
  2581  // See https://pytorch.org/docs/stable/generated/torch.bernoulli.html
  2582  //
  2583  //go:linkname Bernoulli py.bernoulli
  2584  func Bernoulli(input *py.Object) *py.Object
  2585  
  2586  // See https://pytorch.org/docs/stable/generated/torch.bincount.html
  2587  //
  2588  //go:linkname Bincount py.bincount
  2589  func Bincount(input *py.Object, weights *py.Object, minlength *py.Object) *py.Object
  2590  
  2591  // See https://pytorch.org/docs/stable/generated/torch.bitwise_and.html
  2592  //
  2593  //go:linkname BitwiseAnd py.bitwise_and
  2594  func BitwiseAnd(input *py.Object, other *py.Object) *py.Object
  2595  
  2596  // See https://pytorch.org/docs/stable/generated/torch.bitwise_left_shift.html
  2597  //
  2598  //go:linkname BitwiseLeftShift py.bitwise_left_shift
  2599  func BitwiseLeftShift(input *py.Object, other *py.Object) *py.Object
  2600  
  2601  // See https://pytorch.org/docs/stable/generated/torch.bitwise_not.html
  2602  //
  2603  //go:linkname BitwiseNot py.bitwise_not
  2604  func BitwiseNot(input *py.Object) *py.Object
  2605  
  2606  // See https://pytorch.org/docs/stable/generated/torch.bitwise_or.html
  2607  //
  2608  //go:linkname BitwiseOr py.bitwise_or
  2609  func BitwiseOr(input *py.Object, other *py.Object) *py.Object
  2610  
  2611  // See https://pytorch.org/docs/stable/generated/torch.bitwise_right_shift.html
  2612  //
  2613  //go:linkname BitwiseRightShift py.bitwise_right_shift
  2614  func BitwiseRightShift(input *py.Object, other *py.Object) *py.Object
  2615  
  2616  // See https://pytorch.org/docs/stable/generated/torch.bitwise_xor.html
  2617  //
  2618  //go:linkname BitwiseXor py.bitwise_xor
  2619  func BitwiseXor(input *py.Object, other *py.Object) *py.Object
  2620  
  2621  // See https://pytorch.org/docs/stable/generated/torch.blackman_window.html
  2622  //
  2623  //go:linkname BlackmanWindow py.blackman_window
  2624  func BlackmanWindow(windowLength *py.Object, periodic *py.Object) *py.Object
  2625  
  2626  // See https://pytorch.org/docs/stable/generated/torch.bmm.html
  2627  //
  2628  //go:linkname Bmm py.bmm
  2629  func Bmm(input *py.Object, mat2 *py.Object) *py.Object
  2630  
  2631  // See https://pytorch.org/docs/stable/generated/torch.broadcast_to.html
  2632  //
  2633  //go:linkname BroadcastTo py.broadcast_to
  2634  func BroadcastTo(input *py.Object, shape *py.Object) *py.Object
  2635  
  2636  // See https://pytorch.org/docs/stable/generated/torch.bucketize.html
  2637  //
  2638  //go:linkname Bucketize py.bucketize
  2639  func Bucketize(input *py.Object, boundaries *py.Object) *py.Object
  2640  
  2641  // See https://pytorch.org/docs/stable/generated/torch.can_cast.html
  2642  //
  2643  //go:linkname CanCast py.can_cast
  2644  func CanCast(from *py.Object, to *py.Object) *py.Object
  2645  
  2646  // See https://pytorch.org/docs/stable/generated/torch.cat.html
  2647  //
  2648  //go:linkname Cat py.cat
  2649  func Cat(tensors *py.Object, dim *py.Object) *py.Object
  2650  
  2651  // See https://pytorch.org/docs/stable/generated/torch.ceil.html
  2652  //
  2653  //go:linkname Ceil py.ceil
  2654  func Ceil(input *py.Object) *py.Object
  2655  
  2656  // See https://pytorch.org/docs/stable/generated/torch.cholesky.html
  2657  //
  2658  //go:linkname Cholesky py.cholesky
  2659  func Cholesky(input *py.Object, upper *py.Object) *py.Object
  2660  
  2661  // See https://pytorch.org/docs/stable/generated/torch.cholesky_inverse.html
  2662  //
  2663  //go:linkname CholeskyInverse py.cholesky_inverse
  2664  func CholeskyInverse(L *py.Object, upper *py.Object) *py.Object
  2665  
  2666  // See https://pytorch.org/docs/stable/generated/torch.cholesky_solve.html
  2667  //
  2668  //go:linkname CholeskySolve py.cholesky_solve
  2669  func CholeskySolve(B *py.Object, L *py.Object, upper *py.Object) *py.Object
  2670  
  2671  // See https://pytorch.org/docs/stable/generated/torch.chunk.html
  2672  //
  2673  //go:linkname Chunk py.chunk
  2674  func Chunk(input *py.Object, chunks *py.Object, dim *py.Object) *py.Object
  2675  
  2676  // See https://pytorch.org/docs/stable/generated/torch.clamp.html
  2677  //
  2678  //go:linkname Clamp py.clamp
  2679  func Clamp(input *py.Object, min *py.Object, max *py.Object) *py.Object
  2680  
  2681  // See https://pytorch.org/docs/stable/generated/torch.clip.html
  2682  //
  2683  //go:linkname Clip py.clip
  2684  func Clip(input *py.Object, min *py.Object, max *py.Object) *py.Object
  2685  
  2686  // See https://pytorch.org/docs/stable/generated/torch.clone.html
  2687  //
  2688  //go:linkname Clone py.clone
  2689  func Clone(input *py.Object) *py.Object
  2690  
  2691  // See https://pytorch.org/docs/stable/generated/torch.column_stack.html
  2692  //
  2693  //go:linkname ColumnStack py.column_stack
  2694  func ColumnStack(tensors *py.Object) *py.Object
  2695  
  2696  // See https://pytorch.org/docs/stable/generated/torch.combinations.html
  2697  //
  2698  //go:linkname Combinations py.combinations
  2699  func Combinations(input *py.Object, r *py.Object, withReplacement *py.Object) *py.Object
  2700  
  2701  // See https://pytorch.org/docs/stable/generated/torch.complex.html
  2702  //
  2703  //go:linkname Complex py.complex
  2704  func Complex(real *py.Object, imag *py.Object) *py.Object
  2705  
  2706  // See https://pytorch.org/docs/stable/generated/torch.concat.html
  2707  //
  2708  //go:linkname Concat py.concat
  2709  func Concat(tensors *py.Object, dim *py.Object) *py.Object
  2710  
  2711  // See https://pytorch.org/docs/stable/generated/torch.concatenate.html
  2712  //
  2713  //go:linkname Concatenate py.concatenate
  2714  func Concatenate(tensors *py.Object, axis *py.Object, out *py.Object) *py.Object
  2715  
  2716  // See https://pytorch.org/docs/stable/generated/torch.conj.html
  2717  //
  2718  //go:linkname Conj py.conj
  2719  func Conj(input *py.Object) *py.Object
  2720  
  2721  // See https://pytorch.org/docs/stable/generated/torch.conj_physical.html
  2722  //
  2723  //go:linkname ConjPhysical py.conj_physical
  2724  func ConjPhysical(input *py.Object) *py.Object
  2725  
  2726  // See https://pytorch.org/docs/stable/generated/torch.copysign.html
  2727  //
  2728  //go:linkname Copysign py.copysign
  2729  func Copysign(input *py.Object, other *py.Object) *py.Object
  2730  
  2731  // See https://pytorch.org/docs/stable/generated/torch.corrcoef.html
  2732  //
  2733  //go:linkname Corrcoef py.corrcoef
  2734  func Corrcoef(input *py.Object) *py.Object
  2735  
  2736  // See https://pytorch.org/docs/stable/generated/torch.cos.html
  2737  //
  2738  //go:linkname Cos py.cos
  2739  func Cos(input *py.Object) *py.Object
  2740  
  2741  // See https://pytorch.org/docs/stable/generated/torch.cosh.html
  2742  //
  2743  //go:linkname Cosh py.cosh
  2744  func Cosh(input *py.Object) *py.Object
  2745  
  2746  // See https://pytorch.org/docs/stable/generated/torch.count_nonzero.html
  2747  //
  2748  //go:linkname CountNonzero py.count_nonzero
  2749  func CountNonzero(input *py.Object, dim *py.Object) *py.Object
  2750  
  2751  // See https://pytorch.org/docs/stable/generated/torch.cov.html
  2752  //
  2753  //go:linkname Cov py.cov
  2754  func Cov(input *py.Object) *py.Object
  2755  
  2756  // See https://pytorch.org/docs/stable/generated/torch.cross.html
  2757  //
  2758  //go:linkname Cross py.cross
  2759  func Cross(input *py.Object, other *py.Object, dim *py.Object) *py.Object
  2760  
  2761  // See https://pytorch.org/docs/stable/generated/torch.cummax.html
  2762  //
  2763  //go:linkname Cummax py.cummax
  2764  func Cummax(input *py.Object, dim *py.Object) *py.Object
  2765  
  2766  // See https://pytorch.org/docs/stable/generated/torch.cummin.html
  2767  //
  2768  //go:linkname Cummin py.cummin
  2769  func Cummin(input *py.Object, dim *py.Object) *py.Object
  2770  
  2771  // See https://pytorch.org/docs/stable/generated/torch.cumprod.html
  2772  //
  2773  //go:linkname Cumprod py.cumprod
  2774  func Cumprod(input *py.Object, dim *py.Object) *py.Object
  2775  
  2776  // See https://pytorch.org/docs/stable/generated/torch.cumsum.html
  2777  //
  2778  //go:linkname Cumsum py.cumsum
  2779  func Cumsum(input *py.Object, dim *py.Object) *py.Object
  2780  
  2781  // See https://pytorch.org/docs/stable/generated/torch.cumulative_trapezoid.html
  2782  //
  2783  //go:linkname CumulativeTrapezoid py.cumulative_trapezoid
  2784  func CumulativeTrapezoid(y *py.Object, x *py.Object) *py.Object
  2785  
  2786  // See https://pytorch.org/docs/stable/generated/torch.deg2rad.html
  2787  //
  2788  //go:linkname Deg2rad py.deg2rad
  2789  func Deg2rad(input *py.Object) *py.Object
  2790  
  2791  // See https://pytorch.org/docs/stable/generated/torch.dequantize.html
  2792  //
  2793  //go:linkname Dequantize py.dequantize
  2794  func Dequantize(tensor *py.Object) *py.Object
  2795  
  2796  // See https://pytorch.org/docs/stable/generated/torch.det.html
  2797  //
  2798  //go:linkname Det py.det
  2799  func Det(input *py.Object) *py.Object
  2800  
  2801  // See https://pytorch.org/docs/stable/generated/torch.diag.html
  2802  //
  2803  //go:linkname Diag py.diag
  2804  func Diag(input *py.Object, diagonal *py.Object) *py.Object
  2805  
  2806  // See https://pytorch.org/docs/stable/generated/torch.diag_embed.html
  2807  //
  2808  //go:linkname DiagEmbed py.diag_embed
  2809  func DiagEmbed(input *py.Object, offset *py.Object, dim1 *py.Object, dim2 *py.Object) *py.Object
  2810  
  2811  // See https://pytorch.org/docs/stable/generated/torch.diagflat.html
  2812  //
  2813  //go:linkname Diagflat py.diagflat
  2814  func Diagflat(input *py.Object, offset *py.Object) *py.Object
  2815  
  2816  // See https://pytorch.org/docs/stable/generated/torch.diagonal.html
  2817  //
  2818  //go:linkname Diagonal py.diagonal
  2819  func Diagonal(input *py.Object, offset *py.Object, dim1 *py.Object, dim2 *py.Object) *py.Object
  2820  
  2821  // See https://pytorch.org/docs/stable/generated/torch.diagonal_scatter.html
  2822  //
  2823  //go:linkname DiagonalScatter py.diagonal_scatter
  2824  func DiagonalScatter(input *py.Object, src *py.Object, offset *py.Object, dim1 *py.Object, dim2 *py.Object) *py.Object
  2825  
  2826  // See https://pytorch.org/docs/stable/generated/torch.diff.html
  2827  //
  2828  //go:linkname Diff py.diff
  2829  func Diff(input *py.Object, n *py.Object, dim *py.Object, prepend *py.Object, append *py.Object) *py.Object
  2830  
  2831  // See https://pytorch.org/docs/stable/generated/torch.digamma.html
  2832  //
  2833  //go:linkname Digamma py.digamma
  2834  func Digamma(input *py.Object) *py.Object
  2835  
  2836  // See https://pytorch.org/docs/stable/generated/torch.dist.html
  2837  //
  2838  //go:linkname Dist py.dist
  2839  func Dist(input *py.Object, other *py.Object, p *py.Object) *py.Object
  2840  
  2841  // See https://pytorch.org/docs/stable/generated/torch.div.html
  2842  //
  2843  //go:linkname Div py.div
  2844  func Div(input *py.Object, other *py.Object) *py.Object
  2845  
  2846  // See https://pytorch.org/docs/stable/generated/torch.divide.html
  2847  //
  2848  //go:linkname Divide py.divide
  2849  func Divide(input *py.Object, other *py.Object) *py.Object
  2850  
  2851  // See https://pytorch.org/docs/stable/generated/torch.dot.html
  2852  //
  2853  //go:linkname Dot py.dot
  2854  func Dot(input *py.Object, other *py.Object) *py.Object
  2855  
  2856  // See https://pytorch.org/docs/stable/generated/torch.dsplit.html
  2857  //
  2858  //go:linkname Dsplit py.dsplit
  2859  func Dsplit(input *py.Object, indicesOrSections *py.Object) *py.Object
  2860  
  2861  // See https://pytorch.org/docs/stable/generated/torch.dstack.html
  2862  //
  2863  //go:linkname Dstack py.dstack
  2864  func Dstack(tensors *py.Object) *py.Object
  2865  
  2866  // See https://pytorch.org/docs/stable/generated/torch.empty.html
  2867  //
  2868  //go:linkname Empty py.empty
  2869  func Empty(__llgo_va_list ...interface{}) *py.Object
  2870  
  2871  // See https://pytorch.org/docs/stable/generated/torch.empty_like.html
  2872  //
  2873  //go:linkname EmptyLike py.empty_like
  2874  func EmptyLike(input *py.Object) *py.Object
  2875  
  2876  // See https://pytorch.org/docs/stable/generated/torch.empty_strided.html
  2877  //
  2878  //go:linkname EmptyStrided py.empty_strided
  2879  func EmptyStrided(size *py.Object, stride *py.Object) *py.Object
  2880  
  2881  // See https://pytorch.org/docs/stable/generated/torch.eq.html
  2882  //
  2883  //go:linkname Eq py.eq
  2884  func Eq(input *py.Object, other *py.Object) *py.Object
  2885  
  2886  // See https://pytorch.org/docs/stable/generated/torch.equal.html
  2887  //
  2888  //go:linkname Equal py.equal
  2889  func Equal(input *py.Object, other *py.Object) *py.Object
  2890  
  2891  // See https://pytorch.org/docs/stable/generated/torch.erf.html
  2892  //
  2893  //go:linkname Erf py.erf
  2894  func Erf(input *py.Object) *py.Object
  2895  
  2896  // See https://pytorch.org/docs/stable/generated/torch.erfc.html
  2897  //
  2898  //go:linkname Erfc py.erfc
  2899  func Erfc(input *py.Object) *py.Object
  2900  
  2901  // See https://pytorch.org/docs/stable/generated/torch.erfinv.html
  2902  //
  2903  //go:linkname Erfinv py.erfinv
  2904  func Erfinv(input *py.Object) *py.Object
  2905  
  2906  // See https://pytorch.org/docs/stable/generated/torch.exp.html
  2907  //
  2908  //go:linkname Exp py.exp
  2909  func Exp(input *py.Object) *py.Object
  2910  
  2911  // See https://pytorch.org/docs/stable/generated/torch.exp2.html
  2912  //
  2913  //go:linkname Exp2 py.exp2
  2914  func Exp2(input *py.Object) *py.Object
  2915  
  2916  // See https://pytorch.org/docs/stable/generated/torch.expm1.html
  2917  //
  2918  //go:linkname Expm1 py.expm1
  2919  func Expm1(input *py.Object) *py.Object
  2920  
  2921  // See https://pytorch.org/docs/stable/generated/torch.eye.html
  2922  //
  2923  //go:linkname Eye py.eye
  2924  func Eye(n *py.Object, m *py.Object) *py.Object
  2925  
  2926  // See https://pytorch.org/docs/stable/generated/torch.fake_quantize_per_channel_affine.html
  2927  //
  2928  //go:linkname FakeQuantizePerChannelAffine py.fake_quantize_per_channel_affine
  2929  func FakeQuantizePerChannelAffine(input *py.Object, scale *py.Object, zeroPoint *py.Object, axis *py.Object, quantMin *py.Object, quantMax *py.Object) *py.Object
  2930  
  2931  // See https://pytorch.org/docs/stable/generated/torch.fake_quantize_per_tensor_affine.html
  2932  //
  2933  //go:linkname FakeQuantizePerTensorAffine py.fake_quantize_per_tensor_affine
  2934  func FakeQuantizePerTensorAffine(input *py.Object, scale *py.Object, zeroPoint *py.Object, quantMin *py.Object, quantMax *py.Object) *py.Object
  2935  
  2936  // See https://pytorch.org/docs/stable/generated/torch.fix.html
  2937  //
  2938  //go:linkname Fix py.fix
  2939  func Fix(input *py.Object) *py.Object
  2940  
  2941  // See https://pytorch.org/docs/stable/generated/torch.flatten.html
  2942  //
  2943  //go:linkname Flatten py.flatten
  2944  func Flatten(input *py.Object, startDim *py.Object, endDim *py.Object) *py.Object
  2945  
  2946  // See https://pytorch.org/docs/stable/generated/torch.flip.html
  2947  //
  2948  //go:linkname Flip py.flip
  2949  func Flip(input *py.Object, dims *py.Object) *py.Object
  2950  
  2951  // See https://pytorch.org/docs/stable/generated/torch.fliplr.html
  2952  //
  2953  //go:linkname Fliplr py.fliplr
  2954  func Fliplr(input *py.Object) *py.Object
  2955  
  2956  // See https://pytorch.org/docs/stable/generated/torch.flipud.html
  2957  //
  2958  //go:linkname Flipud py.flipud
  2959  func Flipud(input *py.Object) *py.Object
  2960  
  2961  // See https://pytorch.org/docs/stable/generated/torch.float_power.html
  2962  //
  2963  //go:linkname FloatPower py.float_power
  2964  func FloatPower(input *py.Object, exponent *py.Object) *py.Object
  2965  
  2966  // See https://pytorch.org/docs/stable/generated/torch.floor.html
  2967  //
  2968  //go:linkname Floor py.floor
  2969  func Floor(input *py.Object) *py.Object
  2970  
  2971  // See https://pytorch.org/docs/stable/generated/torch.floor_divide.html
  2972  //
  2973  //go:linkname FloorDivide py.floor_divide
  2974  func FloorDivide(input *py.Object, other *py.Object) *py.Object
  2975  
  2976  // See https://pytorch.org/docs/stable/generated/torch.fmax.html
  2977  //
  2978  //go:linkname Fmax py.fmax
  2979  func Fmax(input *py.Object, other *py.Object) *py.Object
  2980  
  2981  // See https://pytorch.org/docs/stable/generated/torch.fmin.html
  2982  //
  2983  //go:linkname Fmin py.fmin
  2984  func Fmin(input *py.Object, other *py.Object) *py.Object
  2985  
  2986  // See https://pytorch.org/docs/stable/generated/torch.fmod.html
  2987  //
  2988  //go:linkname Fmod py.fmod
  2989  func Fmod(input *py.Object, other *py.Object) *py.Object
  2990  
  2991  // See https://pytorch.org/docs/stable/generated/torch.frac.html
  2992  //
  2993  //go:linkname Frac py.frac
  2994  func Frac(input *py.Object) *py.Object
  2995  
  2996  // See https://pytorch.org/docs/stable/generated/torch.frexp.html
  2997  //
  2998  //go:linkname Frexp py.frexp
  2999  func Frexp(input *py.Object) *py.Object
  3000  
  3001  // See https://pytorch.org/docs/stable/generated/torch.from_file.html
  3002  //
  3003  //go:linkname FromFile py.from_file
  3004  func FromFile(filename *py.Object, shared *py.Object, size *py.Object) *py.Object
  3005  
  3006  // See https://pytorch.org/docs/stable/generated/torch.from_numpy.html
  3007  //
  3008  //go:linkname FromNumpy py.from_numpy
  3009  func FromNumpy(ndarray *py.Object) *py.Object
  3010  
  3011  // See https://pytorch.org/docs/stable/generated/torch.frombuffer.html
  3012  //
  3013  //go:linkname Frombuffer py.frombuffer
  3014  func Frombuffer(buffer *py.Object) *py.Object
  3015  
  3016  // See https://pytorch.org/docs/stable/generated/torch.full.html
  3017  //
  3018  //go:linkname Full py.full
  3019  func Full(size *py.Object, fillValue *py.Object) *py.Object
  3020  
  3021  // See https://pytorch.org/docs/stable/generated/torch.full_like.html
  3022  //
  3023  //go:linkname FullLike py.full_like
  3024  func FullLike(input *py.Object, fillValue *py.Object) *py.Object
  3025  
  3026  // See https://pytorch.org/docs/stable/generated/torch.gather.html
  3027  //
  3028  //go:linkname Gather py.gather
  3029  func Gather(input *py.Object, dim *py.Object, index *py.Object) *py.Object
  3030  
  3031  // See https://pytorch.org/docs/stable/generated/torch.gcd.html
  3032  //
  3033  //go:linkname Gcd py.gcd
  3034  func Gcd(input *py.Object, other *py.Object) *py.Object
  3035  
  3036  // See https://pytorch.org/docs/stable/generated/torch.ge.html
  3037  //
  3038  //go:linkname Ge py.ge
  3039  func Ge(input *py.Object, other *py.Object) *py.Object
  3040  
  3041  // See https://pytorch.org/docs/stable/generated/torch.geqrf.html
  3042  //
  3043  //go:linkname Geqrf py.geqrf
  3044  func Geqrf(input *py.Object) *py.Object
  3045  
  3046  // See https://pytorch.org/docs/stable/generated/torch.ger.html
  3047  //
  3048  //go:linkname Ger py.ger
  3049  func Ger(input *py.Object, vec2 *py.Object) *py.Object
  3050  
  3051  // See https://pytorch.org/docs/stable/generated/torch.gradient.html
  3052  //
  3053  //go:linkname Gradient py.gradient
  3054  func Gradient(input *py.Object) *py.Object
  3055  
  3056  // See https://pytorch.org/docs/stable/generated/torch.greater.html
  3057  //
  3058  //go:linkname Greater py.greater
  3059  func Greater(input *py.Object, other *py.Object) *py.Object
  3060  
  3061  // See https://pytorch.org/docs/stable/generated/torch.greater_equal.html
  3062  //
  3063  //go:linkname GreaterEqual py.greater_equal
  3064  func GreaterEqual(input *py.Object, other *py.Object) *py.Object
  3065  
  3066  // See https://pytorch.org/docs/stable/generated/torch.gt.html
  3067  //
  3068  //go:linkname Gt py.gt
  3069  func Gt(input *py.Object, other *py.Object) *py.Object
  3070  
  3071  // See https://pytorch.org/docs/stable/generated/torch.hamming_window.html
  3072  //
  3073  //go:linkname HammingWindow py.hamming_window
  3074  func HammingWindow(windowLength *py.Object, periodic *py.Object, alpha *py.Object, beta *py.Object) *py.Object
  3075  
  3076  // See https://pytorch.org/docs/stable/generated/torch.hann_window.html
  3077  //
  3078  //go:linkname HannWindow py.hann_window
  3079  func HannWindow(windowLength *py.Object, periodic *py.Object) *py.Object
  3080  
  3081  // See https://pytorch.org/docs/stable/generated/torch.heaviside.html
  3082  //
  3083  //go:linkname Heaviside py.heaviside
  3084  func Heaviside(input *py.Object, values *py.Object) *py.Object
  3085  
  3086  // See https://pytorch.org/docs/stable/generated/torch.histc.html
  3087  //
  3088  //go:linkname Histc py.histc
  3089  func Histc(input *py.Object, bins *py.Object, min *py.Object, max *py.Object) *py.Object
  3090  
  3091  // See https://pytorch.org/docs/stable/generated/torch.histogram.html
  3092  //
  3093  //go:linkname Histogram py.histogram
  3094  func Histogram(input *py.Object, bins *py.Object) *py.Object
  3095  
  3096  // See https://pytorch.org/docs/stable/generated/torch.histogramdd.html
  3097  //
  3098  //go:linkname Histogramdd py.histogramdd
  3099  func Histogramdd(input *py.Object, bins *py.Object) *py.Object
  3100  
  3101  // See https://pytorch.org/docs/stable/generated/torch.hsplit.html
  3102  //
  3103  //go:linkname Hsplit py.hsplit
  3104  func Hsplit(input *py.Object, indicesOrSections *py.Object) *py.Object
  3105  
  3106  // See https://pytorch.org/docs/stable/generated/torch.hspmm.html
  3107  //
  3108  //go:linkname Hspmm py.hspmm
  3109  func Hspmm(mat1 *py.Object, mat2 *py.Object) *py.Object
  3110  
  3111  // See https://pytorch.org/docs/stable/generated/torch.hstack.html
  3112  //
  3113  //go:linkname Hstack py.hstack
  3114  func Hstack(tensors *py.Object) *py.Object
  3115  
  3116  // See https://pytorch.org/docs/stable/generated/torch.hypot.html
  3117  //
  3118  //go:linkname Hypot py.hypot
  3119  func Hypot(input *py.Object, other *py.Object) *py.Object
  3120  
  3121  // See https://pytorch.org/docs/stable/generated/torch.i0.html
  3122  //
  3123  //go:linkname I0 py.i0
  3124  func I0(input *py.Object) *py.Object
  3125  
  3126  // See https://pytorch.org/docs/stable/generated/torch.igamma.html
  3127  //
  3128  //go:linkname Igamma py.igamma
  3129  func Igamma(input *py.Object, other *py.Object) *py.Object
  3130  
  3131  // See https://pytorch.org/docs/stable/generated/torch.igammac.html
  3132  //
  3133  //go:linkname Igammac py.igammac
  3134  func Igammac(input *py.Object, other *py.Object) *py.Object
  3135  
  3136  // See https://pytorch.org/docs/stable/generated/torch.imag.html
  3137  //
  3138  //go:linkname Imag py.imag
  3139  func Imag(input *py.Object) *py.Object
  3140  
  3141  // See https://pytorch.org/docs/stable/generated/torch.index_add.html
  3142  //
  3143  //go:linkname IndexAdd py.index_add
  3144  func IndexAdd(input *py.Object, dim *py.Object, index *py.Object, source *py.Object) *py.Object
  3145  
  3146  // See https://pytorch.org/docs/stable/generated/torch.index_copy.html
  3147  //
  3148  //go:linkname IndexCopy py.index_copy
  3149  func IndexCopy(input *py.Object, dim *py.Object, index *py.Object, source *py.Object) *py.Object
  3150  
  3151  // See https://pytorch.org/docs/stable/generated/torch.index_reduce.html
  3152  //
  3153  //go:linkname IndexReduce py.index_reduce
  3154  func IndexReduce(input *py.Object, dim *py.Object, index *py.Object, source *py.Object, reduce *py.Object) *py.Object
  3155  
  3156  // See https://pytorch.org/docs/stable/generated/torch.index_select.html
  3157  //
  3158  //go:linkname IndexSelect py.index_select
  3159  func IndexSelect(input *py.Object, dim *py.Object, index *py.Object) *py.Object
  3160  
  3161  // See https://pytorch.org/docs/stable/generated/torch.inner.html
  3162  //
  3163  //go:linkname Inner py.inner
  3164  func Inner(input *py.Object, other *py.Object) *py.Object
  3165  
  3166  // See https://pytorch.org/docs/stable/generated/torch.inverse.html
  3167  //
  3168  //go:linkname Inverse py.inverse
  3169  func Inverse(input *py.Object) *py.Object
  3170  
  3171  // See https://pytorch.org/docs/stable/generated/torch.is_complex.html
  3172  //
  3173  //go:linkname IsComplex py.is_complex
  3174  func IsComplex(input *py.Object) *py.Object
  3175  
  3176  // See https://pytorch.org/docs/stable/generated/torch.is_conj.html
  3177  //
  3178  //go:linkname IsConj py.is_conj
  3179  func IsConj(input *py.Object) *py.Object
  3180  
  3181  // See https://pytorch.org/docs/stable/generated/torch.is_floating_point.html
  3182  //
  3183  //go:linkname IsFloatingPoint py.is_floating_point
  3184  func IsFloatingPoint(input *py.Object) *py.Object
  3185  
  3186  // See https://pytorch.org/docs/stable/generated/torch.is_nonzero.html
  3187  //
  3188  //go:linkname IsNonzero py.is_nonzero
  3189  func IsNonzero(input *py.Object) *py.Object
  3190  
  3191  // See https://pytorch.org/docs/stable/generated/torch.isclose.html
  3192  //
  3193  //go:linkname Isclose py.isclose
  3194  func Isclose(input *py.Object, other *py.Object, rtol *py.Object, atol *py.Object, equalNan *py.Object) *py.Object
  3195  
  3196  // See https://pytorch.org/docs/stable/generated/torch.isfinite.html
  3197  //
  3198  //go:linkname Isfinite py.isfinite
  3199  func Isfinite(input *py.Object) *py.Object
  3200  
  3201  // See https://pytorch.org/docs/stable/generated/torch.isin.html
  3202  //
  3203  //go:linkname Isin py.isin
  3204  func Isin(elements *py.Object, testElements *py.Object) *py.Object
  3205  
  3206  // See https://pytorch.org/docs/stable/generated/torch.isinf.html
  3207  //
  3208  //go:linkname Isinf py.isinf
  3209  func Isinf(input *py.Object) *py.Object
  3210  
  3211  // See https://pytorch.org/docs/stable/generated/torch.isnan.html
  3212  //
  3213  //go:linkname Isnan py.isnan
  3214  func Isnan(input *py.Object) *py.Object
  3215  
  3216  // See https://pytorch.org/docs/stable/generated/torch.isneginf.html
  3217  //
  3218  //go:linkname Isneginf py.isneginf
  3219  func Isneginf(input *py.Object) *py.Object
  3220  
  3221  // See https://pytorch.org/docs/stable/generated/torch.isposinf.html
  3222  //
  3223  //go:linkname Isposinf py.isposinf
  3224  func Isposinf(input *py.Object) *py.Object
  3225  
  3226  // See https://pytorch.org/docs/stable/generated/torch.isreal.html
  3227  //
  3228  //go:linkname Isreal py.isreal
  3229  func Isreal(input *py.Object) *py.Object
  3230  
  3231  // See https://pytorch.org/docs/stable/generated/torch.istft.html
  3232  //
  3233  //go:linkname Istft py.istft
  3234  func Istft(input *py.Object, nFft *py.Object, hopLength *py.Object, winLength *py.Object, window *py.Object, center *py.Object, normalized *py.Object, onesided *py.Object, length *py.Object, returnComplex *py.Object) *py.Object
  3235  
  3236  // See https://pytorch.org/docs/stable/generated/torch.kaiser_window.html
  3237  //
  3238  //go:linkname KaiserWindow py.kaiser_window
  3239  func KaiserWindow(windowLength *py.Object, periodic *py.Object, beta *py.Object) *py.Object
  3240  
  3241  // See https://pytorch.org/docs/stable/generated/torch.kron.html
  3242  //
  3243  //go:linkname Kron py.kron
  3244  func Kron(input *py.Object, other *py.Object) *py.Object
  3245  
  3246  // See https://pytorch.org/docs/stable/generated/torch.kthvalue.html
  3247  //
  3248  //go:linkname Kthvalue py.kthvalue
  3249  func Kthvalue(input *py.Object, k *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  3250  
  3251  // See https://pytorch.org/docs/stable/generated/torch.lcm.html
  3252  //
  3253  //go:linkname Lcm py.lcm
  3254  func Lcm(input *py.Object, other *py.Object) *py.Object
  3255  
  3256  // See https://pytorch.org/docs/stable/generated/torch.ldexp.html
  3257  //
  3258  //go:linkname Ldexp py.ldexp
  3259  func Ldexp(input *py.Object, other *py.Object) *py.Object
  3260  
  3261  // See https://pytorch.org/docs/stable/generated/torch.le.html
  3262  //
  3263  //go:linkname Le py.le
  3264  func Le(input *py.Object, other *py.Object) *py.Object
  3265  
  3266  // See https://pytorch.org/docs/stable/generated/torch.lerp.html
  3267  //
  3268  //go:linkname Lerp py.lerp
  3269  func Lerp(input *py.Object, end *py.Object, weight *py.Object) *py.Object
  3270  
  3271  // See https://pytorch.org/docs/stable/generated/torch.less.html
  3272  //
  3273  //go:linkname Less py.less
  3274  func Less(input *py.Object, other *py.Object) *py.Object
  3275  
  3276  // See https://pytorch.org/docs/stable/generated/torch.less_equal.html
  3277  //
  3278  //go:linkname LessEqual py.less_equal
  3279  func LessEqual(input *py.Object, other *py.Object) *py.Object
  3280  
  3281  // See https://pytorch.org/docs/stable/generated/torch.lgamma.html
  3282  //
  3283  //go:linkname Lgamma py.lgamma
  3284  func Lgamma(input *py.Object) *py.Object
  3285  
  3286  // See https://pytorch.org/docs/stable/generated/torch.linspace.html
  3287  //
  3288  //go:linkname Linspace py.linspace
  3289  func Linspace(start *py.Object, end *py.Object, steps *py.Object) *py.Object
  3290  
  3291  // See https://pytorch.org/docs/stable/generated/torch.log.html
  3292  //
  3293  //go:linkname Log py.log
  3294  func Log(input *py.Object) *py.Object
  3295  
  3296  // See https://pytorch.org/docs/stable/generated/torch.log10.html
  3297  //
  3298  //go:linkname Log10 py.log10
  3299  func Log10(input *py.Object) *py.Object
  3300  
  3301  // See https://pytorch.org/docs/stable/generated/torch.log1p.html
  3302  //
  3303  //go:linkname Log1p py.log1p
  3304  func Log1p(input *py.Object) *py.Object
  3305  
  3306  // See https://pytorch.org/docs/stable/generated/torch.log2.html
  3307  //
  3308  //go:linkname Log2 py.log2
  3309  func Log2(input *py.Object) *py.Object
  3310  
  3311  // See https://pytorch.org/docs/stable/generated/torch.logaddexp.html
  3312  //
  3313  //go:linkname Logaddexp py.logaddexp
  3314  func Logaddexp(input *py.Object, other *py.Object) *py.Object
  3315  
  3316  // See https://pytorch.org/docs/stable/generated/torch.logaddexp2.html
  3317  //
  3318  //go:linkname Logaddexp2 py.logaddexp2
  3319  func Logaddexp2(input *py.Object, other *py.Object) *py.Object
  3320  
  3321  // See https://pytorch.org/docs/stable/generated/torch.logcumsumexp.html
  3322  //
  3323  //go:linkname Logcumsumexp py.logcumsumexp
  3324  func Logcumsumexp(input *py.Object, dim *py.Object) *py.Object
  3325  
  3326  // See https://pytorch.org/docs/stable/generated/torch.logdet.html
  3327  //
  3328  //go:linkname Logdet py.logdet
  3329  func Logdet(input *py.Object) *py.Object
  3330  
  3331  // See https://pytorch.org/docs/stable/generated/torch.logical_and.html
  3332  //
  3333  //go:linkname LogicalAnd py.logical_and
  3334  func LogicalAnd(input *py.Object, other *py.Object) *py.Object
  3335  
  3336  // See https://pytorch.org/docs/stable/generated/torch.logical_not.html
  3337  //
  3338  //go:linkname LogicalNot py.logical_not
  3339  func LogicalNot(input *py.Object) *py.Object
  3340  
  3341  // See https://pytorch.org/docs/stable/generated/torch.logical_or.html
  3342  //
  3343  //go:linkname LogicalOr py.logical_or
  3344  func LogicalOr(input *py.Object, other *py.Object) *py.Object
  3345  
  3346  // See https://pytorch.org/docs/stable/generated/torch.logical_xor.html
  3347  //
  3348  //go:linkname LogicalXor py.logical_xor
  3349  func LogicalXor(input *py.Object, other *py.Object) *py.Object
  3350  
  3351  // See https://pytorch.org/docs/stable/generated/torch.logit.html
  3352  //
  3353  //go:linkname Logit py.logit
  3354  func Logit(input *py.Object, eps *py.Object) *py.Object
  3355  
  3356  // See https://pytorch.org/docs/stable/generated/torch.logspace.html
  3357  //
  3358  //go:linkname Logspace py.logspace
  3359  func Logspace(start *py.Object, end *py.Object, steps *py.Object, base *py.Object) *py.Object
  3360  
  3361  // See https://pytorch.org/docs/stable/generated/torch.logsumexp.html
  3362  //
  3363  //go:linkname Logsumexp py.logsumexp
  3364  func Logsumexp(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  3365  
  3366  // See https://pytorch.org/docs/stable/generated/torch.lt.html
  3367  //
  3368  //go:linkname Lt py.lt
  3369  func Lt(input *py.Object, other *py.Object) *py.Object
  3370  
  3371  // See https://pytorch.org/docs/stable/generated/torch.lu_solve.html
  3372  //
  3373  //go:linkname LuSolve py.lu_solve
  3374  func LuSolve(b *py.Object, LUData *py.Object, LUPivots *py.Object) *py.Object
  3375  
  3376  // See https://pytorch.org/docs/stable/generated/torch.lu_unpack.html
  3377  //
  3378  //go:linkname LuUnpack py.lu_unpack
  3379  func LuUnpack(LUData *py.Object, LUPivots *py.Object, unpackData *py.Object, unpackPivots *py.Object) *py.Object
  3380  
  3381  // See https://pytorch.org/docs/stable/generated/torch.masked_select.html
  3382  //
  3383  //go:linkname MaskedSelect py.masked_select
  3384  func MaskedSelect(input *py.Object, mask *py.Object) *py.Object
  3385  
  3386  // See https://pytorch.org/docs/stable/generated/torch.matmul.html
  3387  //
  3388  //go:linkname Matmul py.matmul
  3389  func Matmul(input *py.Object, other *py.Object) *py.Object
  3390  
  3391  // See https://pytorch.org/docs/stable/generated/torch.matrix_exp.html
  3392  //
  3393  //go:linkname MatrixExp py.matrix_exp
  3394  func MatrixExp(A *py.Object) *py.Object
  3395  
  3396  // See https://pytorch.org/docs/stable/generated/torch.matrix_power.html
  3397  //
  3398  //go:linkname MatrixPower py.matrix_power
  3399  func MatrixPower(input *py.Object, n *py.Object) *py.Object
  3400  
  3401  // See https://pytorch.org/docs/stable/generated/torch.max.html
  3402  //
  3403  //go:linkname Max py.max
  3404  func Max(input *py.Object) *py.Object
  3405  
  3406  // See https://pytorch.org/docs/stable/generated/torch.maximum.html
  3407  //
  3408  //go:linkname Maximum py.maximum
  3409  func Maximum(input *py.Object, other *py.Object) *py.Object
  3410  
  3411  // See https://pytorch.org/docs/stable/generated/torch.mean.html
  3412  //
  3413  //go:linkname Mean py.mean
  3414  func Mean(input *py.Object) *py.Object
  3415  
  3416  // See https://pytorch.org/docs/stable/generated/torch.median.html
  3417  //
  3418  //go:linkname Median py.median
  3419  func Median(input *py.Object) *py.Object
  3420  
  3421  // See https://pytorch.org/docs/stable/generated/torch.min.html
  3422  //
  3423  //go:linkname Min py.min
  3424  func Min(input *py.Object) *py.Object
  3425  
  3426  // See https://pytorch.org/docs/stable/generated/torch.minimum.html
  3427  //
  3428  //go:linkname Minimum py.minimum
  3429  func Minimum(input *py.Object, other *py.Object) *py.Object
  3430  
  3431  // See https://pytorch.org/docs/stable/generated/torch.mm.html
  3432  //
  3433  //go:linkname Mm py.mm
  3434  func Mm(input *py.Object, mat2 *py.Object) *py.Object
  3435  
  3436  // See https://pytorch.org/docs/stable/generated/torch.mode.html
  3437  //
  3438  //go:linkname Mode py.mode
  3439  func Mode(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  3440  
  3441  // See https://pytorch.org/docs/stable/generated/torch.moveaxis.html
  3442  //
  3443  //go:linkname Moveaxis py.moveaxis
  3444  func Moveaxis(input *py.Object, source *py.Object, destination *py.Object) *py.Object
  3445  
  3446  // See https://pytorch.org/docs/stable/generated/torch.movedim.html
  3447  //
  3448  //go:linkname Movedim py.movedim
  3449  func Movedim(input *py.Object, source *py.Object, destination *py.Object) *py.Object
  3450  
  3451  // See https://pytorch.org/docs/stable/generated/torch.msort.html
  3452  //
  3453  //go:linkname Msort py.msort
  3454  func Msort(input *py.Object) *py.Object
  3455  
  3456  // See https://pytorch.org/docs/stable/generated/torch.mul.html
  3457  //
  3458  //go:linkname Mul py.mul
  3459  func Mul(input *py.Object, other *py.Object) *py.Object
  3460  
  3461  // See https://pytorch.org/docs/stable/generated/torch.multinomial.html
  3462  //
  3463  //go:linkname Multinomial py.multinomial
  3464  func Multinomial(input *py.Object, numSamples *py.Object, replacement *py.Object) *py.Object
  3465  
  3466  // See https://pytorch.org/docs/stable/generated/torch.multiply.html
  3467  //
  3468  //go:linkname Multiply py.multiply
  3469  func Multiply(input *py.Object, other *py.Object) *py.Object
  3470  
  3471  // See https://pytorch.org/docs/stable/generated/torch.mv.html
  3472  //
  3473  //go:linkname Mv py.mv
  3474  func Mv(input *py.Object, vec *py.Object) *py.Object
  3475  
  3476  // See https://pytorch.org/docs/stable/generated/torch.mvlgamma.html
  3477  //
  3478  //go:linkname Mvlgamma py.mvlgamma
  3479  func Mvlgamma(input *py.Object, p *py.Object) *py.Object
  3480  
  3481  // See https://pytorch.org/docs/stable/generated/torch.nan_to_num.html
  3482  //
  3483  //go:linkname NanToNum py.nan_to_num
  3484  func NanToNum(input *py.Object, nan *py.Object, posinf *py.Object, neginf *py.Object) *py.Object
  3485  
  3486  // See https://pytorch.org/docs/stable/generated/torch.nanmean.html
  3487  //
  3488  //go:linkname Nanmean py.nanmean
  3489  func Nanmean(input *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  3490  
  3491  // See https://pytorch.org/docs/stable/generated/torch.nanmedian.html
  3492  //
  3493  //go:linkname Nanmedian py.nanmedian
  3494  func Nanmedian(input *py.Object) *py.Object
  3495  
  3496  // See https://pytorch.org/docs/stable/generated/torch.nanquantile.html
  3497  //
  3498  //go:linkname Nanquantile py.nanquantile
  3499  func Nanquantile(input *py.Object, q *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  3500  
  3501  // See https://pytorch.org/docs/stable/generated/torch.nansum.html
  3502  //
  3503  //go:linkname Nansum py.nansum
  3504  func Nansum(input *py.Object) *py.Object
  3505  
  3506  // See https://pytorch.org/docs/stable/generated/torch.narrow.html
  3507  //
  3508  //go:linkname Narrow py.narrow
  3509  func Narrow(input *py.Object, dim *py.Object, start *py.Object, length *py.Object) *py.Object
  3510  
  3511  // See https://pytorch.org/docs/stable/generated/torch.narrow_copy.html
  3512  //
  3513  //go:linkname NarrowCopy py.narrow_copy
  3514  func NarrowCopy(input *py.Object, dim *py.Object, start *py.Object, length *py.Object) *py.Object
  3515  
  3516  // See https://pytorch.org/docs/stable/generated/torch.ne.html
  3517  //
  3518  //go:linkname Ne py.ne
  3519  func Ne(input *py.Object, other *py.Object) *py.Object
  3520  
  3521  // See https://pytorch.org/docs/stable/generated/torch.neg.html
  3522  //
  3523  //go:linkname Neg py.neg
  3524  func Neg(input *py.Object) *py.Object
  3525  
  3526  // See https://pytorch.org/docs/stable/generated/torch.negative.html
  3527  //
  3528  //go:linkname Negative py.negative
  3529  func Negative(input *py.Object) *py.Object
  3530  
  3531  // See https://pytorch.org/docs/stable/generated/torch.nextafter.html
  3532  //
  3533  //go:linkname Nextafter py.nextafter
  3534  func Nextafter(input *py.Object, other *py.Object) *py.Object
  3535  
  3536  // See https://pytorch.org/docs/stable/generated/torch.nonzero.html
  3537  //
  3538  //go:linkname Nonzero py.nonzero
  3539  func Nonzero(input *py.Object) *py.Object
  3540  
  3541  // See https://pytorch.org/docs/stable/generated/torch.normal.html
  3542  //
  3543  //go:linkname Normal py.normal
  3544  func Normal(mean *py.Object, std *py.Object) *py.Object
  3545  
  3546  // See https://pytorch.org/docs/stable/generated/torch.not_equal.html
  3547  //
  3548  //go:linkname NotEqual py.not_equal
  3549  func NotEqual(input *py.Object, other *py.Object) *py.Object
  3550  
  3551  // See https://pytorch.org/docs/stable/generated/torch.numel.html
  3552  //
  3553  //go:linkname Numel py.numel
  3554  func Numel(input *py.Object) *py.Object
  3555  
  3556  // See https://pytorch.org/docs/stable/generated/torch.ones.html
  3557  //
  3558  //go:linkname Ones py.ones
  3559  func Ones(__llgo_va_list ...interface{}) *py.Object
  3560  
  3561  // See https://pytorch.org/docs/stable/generated/torch.ones_like.html
  3562  //
  3563  //go:linkname OnesLike py.ones_like
  3564  func OnesLike(input *py.Object) *py.Object
  3565  
  3566  // See https://pytorch.org/docs/stable/generated/torch.orgqr.html
  3567  //
  3568  //go:linkname Orgqr py.orgqr
  3569  func Orgqr(input *py.Object, tau *py.Object) *py.Object
  3570  
  3571  // See https://pytorch.org/docs/stable/generated/torch.ormqr.html
  3572  //
  3573  //go:linkname Ormqr py.ormqr
  3574  func Ormqr(input *py.Object, tau *py.Object, other *py.Object, left *py.Object, transpose *py.Object) *py.Object
  3575  
  3576  // See https://pytorch.org/docs/stable/generated/torch.outer.html
  3577  //
  3578  //go:linkname Outer py.outer
  3579  func Outer(input *py.Object, vec2 *py.Object) *py.Object
  3580  
  3581  // See https://pytorch.org/docs/stable/generated/torch.permute.html
  3582  //
  3583  //go:linkname Permute py.permute
  3584  func Permute(input *py.Object, dims *py.Object) *py.Object
  3585  
  3586  // See https://pytorch.org/docs/stable/generated/torch.pinverse.html
  3587  //
  3588  //go:linkname Pinverse py.pinverse
  3589  func Pinverse(input *py.Object, rcond *py.Object) *py.Object
  3590  
  3591  // See https://pytorch.org/docs/stable/generated/torch.poisson.html
  3592  //
  3593  //go:linkname Poisson py.poisson
  3594  func Poisson(input *py.Object, generator *py.Object) *py.Object
  3595  
  3596  // See https://pytorch.org/docs/stable/generated/torch.polar.html
  3597  //
  3598  //go:linkname Polar py.polar
  3599  func Polar(abs *py.Object, angle *py.Object) *py.Object
  3600  
  3601  // See https://pytorch.org/docs/stable/generated/torch.polygamma.html
  3602  //
  3603  //go:linkname Polygamma py.polygamma
  3604  func Polygamma(n *py.Object, input *py.Object) *py.Object
  3605  
  3606  // See https://pytorch.org/docs/stable/generated/torch.positive.html
  3607  //
  3608  //go:linkname Positive py.positive
  3609  func Positive(input *py.Object) *py.Object
  3610  
  3611  // See https://pytorch.org/docs/stable/generated/torch.pow.html
  3612  //
  3613  //go:linkname Pow py.pow
  3614  func Pow(input *py.Object, exponent *py.Object) *py.Object
  3615  
  3616  // See https://pytorch.org/docs/stable/generated/torch.prod.html
  3617  //
  3618  //go:linkname Prod py.prod
  3619  func Prod(input *py.Object) *py.Object
  3620  
  3621  // See https://pytorch.org/docs/stable/generated/torch.promote_types.html
  3622  //
  3623  //go:linkname PromoteTypes py.promote_types
  3624  func PromoteTypes(type1 *py.Object, type2 *py.Object) *py.Object
  3625  
  3626  // See https://pytorch.org/docs/stable/generated/torch.qr.html
  3627  //
  3628  //go:linkname Qr py.qr
  3629  func Qr(input *py.Object, some *py.Object) *py.Object
  3630  
  3631  // See https://pytorch.org/docs/stable/generated/torch.quantile.html
  3632  //
  3633  //go:linkname Quantile py.quantile
  3634  func Quantile(input *py.Object, q *py.Object, dim *py.Object, keepdim *py.Object) *py.Object
  3635  
  3636  // See https://pytorch.org/docs/stable/generated/torch.quantize_per_channel.html
  3637  //
  3638  //go:linkname QuantizePerChannel py.quantize_per_channel
  3639  func QuantizePerChannel(input *py.Object, scales *py.Object, zeroPoints *py.Object, axis *py.Object, dtype *py.Object) *py.Object
  3640  
  3641  // See https://pytorch.org/docs/stable/generated/torch.quantize_per_tensor.html
  3642  //
  3643  //go:linkname QuantizePerTensor py.quantize_per_tensor
  3644  func QuantizePerTensor(input *py.Object, scale *py.Object, zeroPoint *py.Object, dtype *py.Object) *py.Object
  3645  
  3646  // See https://pytorch.org/docs/stable/generated/torch.quantized_batch_norm.html
  3647  //
  3648  //go:linkname QuantizedBatchNorm py.quantized_batch_norm
  3649  func QuantizedBatchNorm(input *py.Object, weight *py.Object, bias *py.Object, mean *py.Object, var_ *py.Object, eps *py.Object, outputScale *py.Object, outputZeroPoint *py.Object) *py.Object
  3650  
  3651  // See https://pytorch.org/docs/stable/generated/torch.quantized_max_pool1d.html
  3652  //
  3653  //go:linkname QuantizedMaxPool1d py.quantized_max_pool1d
  3654  func QuantizedMaxPool1d(input *py.Object, kernelSize *py.Object, stride *py.Object, padding *py.Object, dilation *py.Object, ceilMode *py.Object) *py.Object
  3655  
  3656  // See https://pytorch.org/docs/stable/generated/torch.quantized_max_pool2d.html
  3657  //
  3658  //go:linkname QuantizedMaxPool2d py.quantized_max_pool2d
  3659  func QuantizedMaxPool2d(input *py.Object, kernelSize *py.Object, stride *py.Object, padding *py.Object, dilation *py.Object, ceilMode *py.Object) *py.Object
  3660  
  3661  // See https://pytorch.org/docs/stable/generated/torch.rad2deg.html
  3662  //
  3663  //go:linkname Rad2deg py.rad2deg
  3664  func Rad2deg(input *py.Object) *py.Object
  3665  
  3666  // See https://pytorch.org/docs/stable/generated/torch.rand.html
  3667  //
  3668  //go:linkname Rand py.rand
  3669  func Rand(__llgo_va_list ...interface{}) *py.Object
  3670  
  3671  // See https://pytorch.org/docs/stable/generated/torch.rand_like.html
  3672  //
  3673  //go:linkname RandLike py.rand_like
  3674  func RandLike(input *py.Object) *py.Object
  3675  
  3676  // See https://pytorch.org/docs/stable/generated/torch.randint.html
  3677  //
  3678  //go:linkname Randint py.randint
  3679  func Randint(low *py.Object, high *py.Object, size *py.Object) *py.Object
  3680  
  3681  // See https://pytorch.org/docs/stable/generated/torch.randint_like.html
  3682  //
  3683  //go:linkname RandintLike py.randint_like
  3684  func RandintLike(input *py.Object, low *py.Object, high *py.Object) *py.Object
  3685  
  3686  // See https://pytorch.org/docs/stable/generated/torch.randn.html
  3687  //
  3688  //go:linkname Randn py.randn
  3689  func Randn(__llgo_va_list ...interface{}) *py.Object
  3690  
  3691  // See https://pytorch.org/docs/stable/generated/torch.randn_like.html
  3692  //
  3693  //go:linkname RandnLike py.randn_like
  3694  func RandnLike(input *py.Object) *py.Object
  3695  
  3696  // See https://pytorch.org/docs/stable/generated/torch.randperm.html
  3697  //
  3698  //go:linkname Randperm py.randperm
  3699  func Randperm(n *py.Object) *py.Object
  3700  
  3701  // See https://pytorch.org/docs/stable/generated/torch.range.html
  3702  //
  3703  //go:linkname Range py.range
  3704  func Range(start *py.Object, end *py.Object, step *py.Object) *py.Object
  3705  
  3706  // See https://pytorch.org/docs/stable/generated/torch.ravel.html
  3707  //
  3708  //go:linkname Ravel py.ravel
  3709  func Ravel(input *py.Object) *py.Object
  3710  
  3711  // See https://pytorch.org/docs/stable/generated/torch.real.html
  3712  //
  3713  //go:linkname Real py.real
  3714  func Real(input *py.Object) *py.Object
  3715  
  3716  // See https://pytorch.org/docs/stable/generated/torch.reciprocal.html
  3717  //
  3718  //go:linkname Reciprocal py.reciprocal
  3719  func Reciprocal(input *py.Object) *py.Object
  3720  
  3721  // See https://pytorch.org/docs/stable/generated/torch.remainder.html
  3722  //
  3723  //go:linkname Remainder py.remainder
  3724  func Remainder(input *py.Object, other *py.Object) *py.Object
  3725  
  3726  // See https://pytorch.org/docs/stable/generated/torch.renorm.html
  3727  //
  3728  //go:linkname Renorm py.renorm
  3729  func Renorm(input *py.Object, p *py.Object, dim *py.Object, maxnorm *py.Object) *py.Object
  3730  
  3731  // See https://pytorch.org/docs/stable/generated/torch.repeat_interleave.html
  3732  //
  3733  //go:linkname RepeatInterleave py.repeat_interleave
  3734  func RepeatInterleave(input *py.Object, repeats *py.Object, dim *py.Object) *py.Object
  3735  
  3736  // See https://pytorch.org/docs/stable/generated/torch.reshape.html
  3737  //
  3738  //go:linkname Reshape py.reshape
  3739  func Reshape(input *py.Object, shape *py.Object) *py.Object
  3740  
  3741  // See https://pytorch.org/docs/stable/generated/torch.resolve_conj.html
  3742  //
  3743  //go:linkname ResolveConj py.resolve_conj
  3744  func ResolveConj(input *py.Object) *py.Object
  3745  
  3746  // See https://pytorch.org/docs/stable/generated/torch.resolve_neg.html
  3747  //
  3748  //go:linkname ResolveNeg py.resolve_neg
  3749  func ResolveNeg(input *py.Object) *py.Object
  3750  
  3751  // See https://pytorch.org/docs/stable/generated/torch.result_type.html
  3752  //
  3753  //go:linkname ResultType py.result_type
  3754  func ResultType(tensor1 *py.Object, tensor2 *py.Object) *py.Object
  3755  
  3756  // See https://pytorch.org/docs/stable/generated/torch.roll.html
  3757  //
  3758  //go:linkname Roll py.roll
  3759  func Roll(input *py.Object, shifts *py.Object, dims *py.Object) *py.Object
  3760  
  3761  // See https://pytorch.org/docs/stable/generated/torch.rot90.html
  3762  //
  3763  //go:linkname Rot90 py.rot90
  3764  func Rot90(input *py.Object, k *py.Object, dims *py.Object) *py.Object
  3765  
  3766  // See https://pytorch.org/docs/stable/generated/torch.round.html
  3767  //
  3768  //go:linkname Round py.round
  3769  func Round(input *py.Object) *py.Object
  3770  
  3771  // See https://pytorch.org/docs/stable/generated/torch.row_stack.html
  3772  //
  3773  //go:linkname RowStack py.row_stack
  3774  func RowStack(tensors *py.Object) *py.Object
  3775  
  3776  // See https://pytorch.org/docs/stable/generated/torch.rsqrt.html
  3777  //
  3778  //go:linkname Rsqrt py.rsqrt
  3779  func Rsqrt(input *py.Object) *py.Object
  3780  
  3781  // See https://pytorch.org/docs/stable/generated/torch.scatter.html
  3782  //
  3783  //go:linkname Scatter py.scatter
  3784  func Scatter(input *py.Object, dim *py.Object, index *py.Object, src *py.Object) *py.Object
  3785  
  3786  // See https://pytorch.org/docs/stable/generated/torch.scatter_add.html
  3787  //
  3788  //go:linkname ScatterAdd py.scatter_add
  3789  func ScatterAdd(input *py.Object, dim *py.Object, index *py.Object, src *py.Object) *py.Object
  3790  
  3791  // See https://pytorch.org/docs/stable/generated/torch.scatter_reduce.html
  3792  //
  3793  //go:linkname ScatterReduce py.scatter_reduce
  3794  func ScatterReduce(input *py.Object, dim *py.Object, index *py.Object, src *py.Object, reduce *py.Object) *py.Object
  3795  
  3796  // See https://pytorch.org/docs/stable/generated/torch.searchsorted.html
  3797  //
  3798  //go:linkname Searchsorted py.searchsorted
  3799  func Searchsorted(sortedSequence *py.Object, values *py.Object) *py.Object
  3800  
  3801  // See https://pytorch.org/docs/stable/generated/torch.select.html
  3802  //
  3803  //go:linkname Select py.select
  3804  func Select(input *py.Object, dim *py.Object, index *py.Object) *py.Object
  3805  
  3806  // See https://pytorch.org/docs/stable/generated/torch.select_scatter.html
  3807  //
  3808  //go:linkname SelectScatter py.select_scatter
  3809  func SelectScatter(input *py.Object, src *py.Object, dim *py.Object, index *py.Object) *py.Object
  3810  
  3811  // See https://pytorch.org/docs/stable/generated/torch.sgn.html
  3812  //
  3813  //go:linkname Sgn py.sgn
  3814  func Sgn(input *py.Object) *py.Object
  3815  
  3816  // See https://pytorch.org/docs/stable/generated/torch.sigmoid.html
  3817  //
  3818  //go:linkname Sigmoid py.sigmoid
  3819  func Sigmoid(input *py.Object) *py.Object
  3820  
  3821  // See https://pytorch.org/docs/stable/generated/torch.sign.html
  3822  //
  3823  //go:linkname Sign py.sign
  3824  func Sign(input *py.Object) *py.Object
  3825  
  3826  // See https://pytorch.org/docs/stable/generated/torch.signbit.html
  3827  //
  3828  //go:linkname Signbit py.signbit
  3829  func Signbit(input *py.Object) *py.Object
  3830  
  3831  // See https://pytorch.org/docs/stable/generated/torch.sin.html
  3832  //
  3833  //go:linkname Sin py.sin
  3834  func Sin(input *py.Object) *py.Object
  3835  
  3836  // See https://pytorch.org/docs/stable/generated/torch.sinc.html
  3837  //
  3838  //go:linkname Sinc py.sinc
  3839  func Sinc(input *py.Object) *py.Object
  3840  
  3841  // See https://pytorch.org/docs/stable/generated/torch.sinh.html
  3842  //
  3843  //go:linkname Sinh py.sinh
  3844  func Sinh(input *py.Object) *py.Object
  3845  
  3846  // See https://pytorch.org/docs/stable/generated/torch.slice_scatter.html
  3847  //
  3848  //go:linkname SliceScatter py.slice_scatter
  3849  func SliceScatter(input *py.Object, src *py.Object, dim *py.Object, start *py.Object, end *py.Object, step *py.Object) *py.Object
  3850  
  3851  // See https://pytorch.org/docs/stable/generated/torch.slogdet.html
  3852  //
  3853  //go:linkname Slogdet py.slogdet
  3854  func Slogdet(input *py.Object) *py.Object
  3855  
  3856  // See https://pytorch.org/docs/stable/generated/torch.smm.html
  3857  //
  3858  //go:linkname Smm py.smm
  3859  func Smm(input *py.Object, mat *py.Object) *py.Object
  3860  
  3861  // See https://pytorch.org/docs/stable/generated/torch.softmax.html
  3862  //
  3863  //go:linkname Softmax py.softmax
  3864  func Softmax(input *py.Object, dim *py.Object) *py.Object
  3865  
  3866  // See https://pytorch.org/docs/stable/generated/torch.sort.html
  3867  //
  3868  //go:linkname Sort py.sort
  3869  func Sort(input *py.Object, dim *py.Object, descending *py.Object, stable *py.Object) *py.Object
  3870  
  3871  // See https://pytorch.org/docs/stable/generated/torch.sparse_bsc_tensor.html
  3872  //
  3873  //go:linkname SparseBscTensor py.sparse_bsc_tensor
  3874  func SparseBscTensor(ccolIndices *py.Object, rowIndices *py.Object, values *py.Object, size *py.Object) *py.Object
  3875  
  3876  // See https://pytorch.org/docs/stable/generated/torch.sparse_bsr_tensor.html
  3877  //
  3878  //go:linkname SparseBsrTensor py.sparse_bsr_tensor
  3879  func SparseBsrTensor(crowIndices *py.Object, colIndices *py.Object, values *py.Object, size *py.Object) *py.Object
  3880  
  3881  // See https://pytorch.org/docs/stable/generated/torch.sparse_compressed_tensor.html
  3882  //
  3883  //go:linkname SparseCompressedTensor py.sparse_compressed_tensor
  3884  func SparseCompressedTensor(compressedIndices *py.Object, plainIndices *py.Object, values *py.Object, size *py.Object) *py.Object
  3885  
  3886  // See https://pytorch.org/docs/stable/generated/torch.sparse_coo_tensor.html
  3887  //
  3888  //go:linkname SparseCooTensor py.sparse_coo_tensor
  3889  func SparseCooTensor(indices *py.Object, values *py.Object, size *py.Object) *py.Object
  3890  
  3891  // See https://pytorch.org/docs/stable/generated/torch.sparse_csc_tensor.html
  3892  //
  3893  //go:linkname SparseCscTensor py.sparse_csc_tensor
  3894  func SparseCscTensor(ccolIndices *py.Object, rowIndices *py.Object, values *py.Object, size *py.Object) *py.Object
  3895  
  3896  // See https://pytorch.org/docs/stable/generated/torch.sparse_csr_tensor.html
  3897  //
  3898  //go:linkname SparseCsrTensor py.sparse_csr_tensor
  3899  func SparseCsrTensor(crowIndices *py.Object, colIndices *py.Object, values *py.Object, size *py.Object) *py.Object
  3900  
  3901  // See https://pytorch.org/docs/stable/generated/torch.sqrt.html
  3902  //
  3903  //go:linkname Sqrt py.sqrt
  3904  func Sqrt(input *py.Object) *py.Object
  3905  
  3906  // See https://pytorch.org/docs/stable/generated/torch.square.html
  3907  //
  3908  //go:linkname Square py.square
  3909  func Square(input *py.Object) *py.Object
  3910  
  3911  // See https://pytorch.org/docs/stable/generated/torch.squeeze.html
  3912  //
  3913  //go:linkname Squeeze py.squeeze
  3914  func Squeeze(input *py.Object, dim *py.Object) *py.Object
  3915  
  3916  // See https://pytorch.org/docs/stable/generated/torch.sspaddmm.html
  3917  //
  3918  //go:linkname Sspaddmm py.sspaddmm
  3919  func Sspaddmm(input *py.Object, mat1 *py.Object, mat2 *py.Object) *py.Object
  3920  
  3921  // See https://pytorch.org/docs/stable/generated/torch.stack.html
  3922  //
  3923  //go:linkname Stack py.stack
  3924  func Stack(tensors *py.Object, dim *py.Object) *py.Object
  3925  
  3926  // See https://pytorch.org/docs/stable/generated/torch.std.html
  3927  //
  3928  //go:linkname Std py.std
  3929  func Std(input *py.Object, dim *py.Object) *py.Object
  3930  
  3931  // See https://pytorch.org/docs/stable/generated/torch.std_mean.html
  3932  //
  3933  //go:linkname StdMean py.std_mean
  3934  func StdMean(input *py.Object, dim *py.Object) *py.Object
  3935  
  3936  // See https://pytorch.org/docs/stable/generated/torch.sub.html
  3937  //
  3938  //go:linkname Sub py.sub
  3939  func Sub(input *py.Object, other *py.Object) *py.Object
  3940  
  3941  // See https://pytorch.org/docs/stable/generated/torch.subtract.html
  3942  //
  3943  //go:linkname Subtract py.subtract
  3944  func Subtract(input *py.Object, other *py.Object) *py.Object
  3945  
  3946  // See https://pytorch.org/docs/stable/generated/torch.sum.html
  3947  //
  3948  //go:linkname Sum py.sum
  3949  func Sum(input *py.Object) *py.Object
  3950  
  3951  // See https://pytorch.org/docs/stable/generated/torch.svd.html
  3952  //
  3953  //go:linkname Svd py.svd
  3954  func Svd(input *py.Object, some *py.Object, computeUv *py.Object) *py.Object
  3955  
  3956  // See https://pytorch.org/docs/stable/generated/torch.swapaxes.html
  3957  //
  3958  //go:linkname Swapaxes py.swapaxes
  3959  func Swapaxes(input *py.Object, axis0 *py.Object, axis1 *py.Object) *py.Object
  3960  
  3961  // See https://pytorch.org/docs/stable/generated/torch.swapdims.html
  3962  //
  3963  //go:linkname Swapdims py.swapdims
  3964  func Swapdims(input *py.Object, dim0 *py.Object, dim1 *py.Object) *py.Object
  3965  
  3966  // See https://pytorch.org/docs/stable/generated/torch.t.html
  3967  //
  3968  //go:linkname T py.t
  3969  func T(input *py.Object) *py.Object
  3970  
  3971  // See https://pytorch.org/docs/stable/generated/torch.take.html
  3972  //
  3973  //go:linkname Take py.take
  3974  func Take(input *py.Object, index *py.Object) *py.Object
  3975  
  3976  // See https://pytorch.org/docs/stable/generated/torch.take_along_dim.html
  3977  //
  3978  //go:linkname TakeAlongDim py.take_along_dim
  3979  func TakeAlongDim(input *py.Object, indices *py.Object, dim *py.Object) *py.Object
  3980  
  3981  // See https://pytorch.org/docs/stable/generated/torch.tan.html
  3982  //
  3983  //go:linkname Tan py.tan
  3984  func Tan(input *py.Object) *py.Object
  3985  
  3986  // See https://pytorch.org/docs/stable/generated/torch.tanh.html
  3987  //
  3988  //go:linkname Tanh py.tanh
  3989  func Tanh(input *py.Object) *py.Object
  3990  
  3991  // See https://pytorch.org/docs/stable/generated/torch.tensor.html
  3992  //
  3993  //go:linkname Tensor py.tensor
  3994  func Tensor(data *py.Object) *py.Object
  3995  
  3996  // See https://pytorch.org/docs/stable/generated/torch.tensor_split.html
  3997  //
  3998  //go:linkname TensorSplit py.tensor_split
  3999  func TensorSplit(input *py.Object, indicesOrSections *py.Object, dim *py.Object) *py.Object
  4000  
  4001  // See https://pytorch.org/docs/stable/generated/torch.tile.html
  4002  //
  4003  //go:linkname Tile py.tile
  4004  func Tile(input *py.Object, dims *py.Object) *py.Object
  4005  
  4006  // See https://pytorch.org/docs/stable/generated/torch.topk.html
  4007  //
  4008  //go:linkname Topk py.topk
  4009  func Topk(input *py.Object, k *py.Object, dim *py.Object, largest *py.Object, sorted *py.Object) *py.Object
  4010  
  4011  // See https://pytorch.org/docs/stable/generated/torch.trace.html
  4012  //
  4013  //go:linkname Trace py.trace
  4014  func Trace(input *py.Object) *py.Object
  4015  
  4016  // See https://pytorch.org/docs/stable/generated/torch.transpose.html
  4017  //
  4018  //go:linkname Transpose py.transpose
  4019  func Transpose(input *py.Object, dim0 *py.Object, dim1 *py.Object) *py.Object
  4020  
  4021  // See https://pytorch.org/docs/stable/generated/torch.trapezoid.html
  4022  //
  4023  //go:linkname Trapezoid py.trapezoid
  4024  func Trapezoid(y *py.Object, x *py.Object) *py.Object
  4025  
  4026  // See https://pytorch.org/docs/stable/generated/torch.trapz.html
  4027  //
  4028  //go:linkname Trapz py.trapz
  4029  func Trapz(y *py.Object, x *py.Object) *py.Object
  4030  
  4031  // See https://pytorch.org/docs/stable/generated/torch.triangular_solve.html
  4032  //
  4033  //go:linkname TriangularSolve py.triangular_solve
  4034  func TriangularSolve(b *py.Object, A *py.Object, upper *py.Object, transpose *py.Object, unitriangular *py.Object) *py.Object
  4035  
  4036  // See https://pytorch.org/docs/stable/generated/torch.tril.html
  4037  //
  4038  //go:linkname Tril py.tril
  4039  func Tril(input *py.Object, diagonal *py.Object) *py.Object
  4040  
  4041  // See https://pytorch.org/docs/stable/generated/torch.tril_indices.html
  4042  //
  4043  //go:linkname TrilIndices py.tril_indices
  4044  func TrilIndices(row *py.Object, col *py.Object, offset *py.Object) *py.Object
  4045  
  4046  // See https://pytorch.org/docs/stable/generated/torch.triu.html
  4047  //
  4048  //go:linkname Triu py.triu
  4049  func Triu(input *py.Object, diagonal *py.Object) *py.Object
  4050  
  4051  // See https://pytorch.org/docs/stable/generated/torch.triu_indices.html
  4052  //
  4053  //go:linkname TriuIndices py.triu_indices
  4054  func TriuIndices(row *py.Object, col *py.Object, offset *py.Object) *py.Object
  4055  
  4056  // See https://pytorch.org/docs/stable/generated/torch.true_divide.html
  4057  //
  4058  //go:linkname TrueDivide py.true_divide
  4059  func TrueDivide(dividend *py.Object, divisor *py.Object) *py.Object
  4060  
  4061  // See https://pytorch.org/docs/stable/generated/torch.trunc.html
  4062  //
  4063  //go:linkname Trunc py.trunc
  4064  func Trunc(input *py.Object) *py.Object
  4065  
  4066  // See https://pytorch.org/docs/stable/generated/torch.unbind.html
  4067  //
  4068  //go:linkname Unbind py.unbind
  4069  func Unbind(input *py.Object, dim *py.Object) *py.Object
  4070  
  4071  // See https://pytorch.org/docs/stable/generated/torch.unflatten.html
  4072  //
  4073  //go:linkname Unflatten py.unflatten
  4074  func Unflatten(input *py.Object, dim *py.Object, sizes *py.Object) *py.Object
  4075  
  4076  // See https://pytorch.org/docs/stable/generated/torch.unsqueeze.html
  4077  //
  4078  //go:linkname Unsqueeze py.unsqueeze
  4079  func Unsqueeze(input *py.Object, dim *py.Object) *py.Object
  4080  
  4081  // See https://pytorch.org/docs/stable/generated/torch.vander.html
  4082  //
  4083  //go:linkname Vander py.vander
  4084  func Vander(x *py.Object, N *py.Object, increasing *py.Object) *py.Object
  4085  
  4086  // See https://pytorch.org/docs/stable/generated/torch.var.html
  4087  //
  4088  //go:linkname Var py.var
  4089  func Var(input *py.Object, dim *py.Object) *py.Object
  4090  
  4091  // See https://pytorch.org/docs/stable/generated/torch.var_mean.html
  4092  //
  4093  //go:linkname VarMean py.var_mean
  4094  func VarMean(input *py.Object, dim *py.Object) *py.Object
  4095  
  4096  // See https://pytorch.org/docs/stable/generated/torch.vdot.html
  4097  //
  4098  //go:linkname Vdot py.vdot
  4099  func Vdot(input *py.Object, other *py.Object) *py.Object
  4100  
  4101  // See https://pytorch.org/docs/stable/generated/torch.view_as_complex.html
  4102  //
  4103  //go:linkname ViewAsComplex py.view_as_complex
  4104  func ViewAsComplex(input *py.Object) *py.Object
  4105  
  4106  // See https://pytorch.org/docs/stable/generated/torch.view_as_real.html
  4107  //
  4108  //go:linkname ViewAsReal py.view_as_real
  4109  func ViewAsReal(input *py.Object) *py.Object
  4110  
  4111  // See https://pytorch.org/docs/stable/generated/torch.vsplit.html
  4112  //
  4113  //go:linkname Vsplit py.vsplit
  4114  func Vsplit(input *py.Object, indicesOrSections *py.Object) *py.Object
  4115  
  4116  // See https://pytorch.org/docs/stable/generated/torch.vstack.html
  4117  //
  4118  //go:linkname Vstack py.vstack
  4119  func Vstack(tensors *py.Object) *py.Object
  4120  
  4121  // See https://pytorch.org/docs/stable/generated/torch.where.html
  4122  //
  4123  //go:linkname Where py.where
  4124  func Where(condition *py.Object, input *py.Object, other *py.Object) *py.Object
  4125  
  4126  // See https://pytorch.org/docs/stable/generated/torch.xlogy.html
  4127  //
  4128  //go:linkname Xlogy py.xlogy
  4129  func Xlogy(input *py.Object, other *py.Object) *py.Object
  4130  
  4131  // See https://pytorch.org/docs/stable/generated/torch.zeros.html
  4132  //
  4133  //go:linkname Zeros py.zeros
  4134  func Zeros(__llgo_va_list ...interface{}) *py.Object
  4135  
  4136  // See https://pytorch.org/docs/stable/generated/torch.zeros_like.html
  4137  //
  4138  //go:linkname ZerosLike py.zeros_like
  4139  func ZerosLike(input *py.Object) *py.Object
  4140  
  4141  // See https://pytorch.org/docs/stable/generated/torch.vmap.html
  4142  //
  4143  //go:linkname Vmap py.vmap
  4144  func Vmap(func_ *py.Object, inDims *py.Object, outDims *py.Object, randomness *py.Object) *py.Object