144 lines
4.6 KiB
C
144 lines
4.6 KiB
C
|
#pragma once
|
||
|
|
||
|
// ${generated_comment}
|
||
|
|
||
|
#ifdef TORCH_ASSERT_NO_OPERATORS
|
||
|
#error This change adds a dependency on native_functions.yaml, \
|
||
|
meaning the file will need to be re-compiled every time an operator \
|
||
|
is changed or added. Consider if your change would be better placed in \
|
||
|
another file, or if a more specific header might achieve the same goal. \
|
||
|
See NOTE: [Tensor vs. TensorBase]
|
||
|
#endif
|
||
|
|
||
|
#if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS)
|
||
|
#error This change adds a dependency on all pytorch operators, meaning the \
|
||
|
file will need to be re-compiled every time an operator is changed or added. \
|
||
|
Consider including a specific operator from <ATen/ops/{my_operator}.h> and \
|
||
|
see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS].
|
||
|
#endif
|
||
|
|
||
|
// NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS]
|
||
|
//
|
||
|
// In ATen, certain generated headers files include the definitions of
|
||
|
// every single operator in PyTorch. Unfortunately this means every
|
||
|
// time an operator signature is updated or changed in
|
||
|
// native_functions.yaml, you (and every other PyTorch developer) need
|
||
|
// to recompile every source file that includes any of these headers.
|
||
|
//
|
||
|
// To break up these header dependencies, and improve incremental
|
||
|
// build times for all PyTorch developers. These headers are split
|
||
|
// into per-operator headers in the `ATen/ops` folder. This limits
|
||
|
// incremental builds to only changes to methods of `Tensor`, or files
|
||
|
// that use the specific operator being changed. With `at::sum` as an
|
||
|
// example, you should include
|
||
|
//
|
||
|
// <ATen/ops/sum.h> // instead of ATen/Functions.h
|
||
|
// <ATen/ops/sum_native.h> // instead of ATen/NativeFunctions.h
|
||
|
// <ATen/ops/sum_ops.h> // instead of ATen/Operators.h
|
||
|
// <ATen/ops/sum_cpu_dispatch.h> // instead of ATen/CPUFunctions.h
|
||
|
//
|
||
|
// However, even if you're careful to use this in your own code.
|
||
|
// `Functions.h` might be included indirectly through another header
|
||
|
// without you realising. To avoid this, you can add
|
||
|
//
|
||
|
// #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
|
||
|
//
|
||
|
// to the top of your source file. This way any time the non-specific
|
||
|
// headers are included, the compiler will error out.
|
||
|
//
|
||
|
// Also, be aware that `ops` are not available in all build
|
||
|
// configurations (namely fb-internal) so you must guard these
|
||
|
// includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g.
|
||
|
//
|
||
|
// #ifndef AT_PER_OPERATOR_HEADERS
|
||
|
// #include <ATen/Functions.h>
|
||
|
// #else
|
||
|
// #include <ATen/ops/sum.h>
|
||
|
// #endif
|
||
|
|
||
|
#include <ATen/Context.h>
|
||
|
#include <ATen/DeviceGuard.h>
|
||
|
#include <ATen/TensorUtils.h>
|
||
|
#include <ATen/TracerMode.h>
|
||
|
#include <ATen/core/Generator.h>
|
||
|
#include <ATen/core/Reduction.h>
|
||
|
#include <c10/core/SymInt.h>
|
||
|
#include <ATen/core/Tensor.h>
|
||
|
#include <c10/core/Scalar.h>
|
||
|
#include <c10/core/Storage.h>
|
||
|
#include <c10/core/TensorOptions.h>
|
||
|
#include <c10/util/Deprecated.h>
|
||
|
#include <c10/util/Optional.h>
|
||
|
#include <c10/util/OptionalArrayRef.h>
|
||
|
|
||
|
#include <ATen/ops/from_blob.h>
|
||
|
#include <ATen/ops/tensor.h>
|
||
|
|
||
|
${Functions_includes}
|
||
|
|
||
|
namespace at {
|
||
|
|
||
|
${Functions_declarations}
|
||
|
|
||
|
// Special C++ only overloads for std()-like functions (See gh-40287)
|
||
|
// These are needed because int -> bool conversion takes precedence over int -> IntArrayRef
|
||
|
// So, for example std(0) would select the std(unbiased=False) overload
|
||
|
TORCH_API inline Tensor var(const Tensor& self, int dim) {
|
||
|
return at::var(self, IntArrayRef{dim});
|
||
|
}
|
||
|
TORCH_API inline std::tuple<Tensor, Tensor> var_mean(const Tensor& self, int dim) {
|
||
|
return at::var_mean(self, IntArrayRef{dim});
|
||
|
}
|
||
|
TORCH_API inline Tensor std(const Tensor& self, int dim) {
|
||
|
return at::std(self, IntArrayRef{dim});
|
||
|
}
|
||
|
TORCH_API inline std::tuple<Tensor, Tensor> std_mean(const Tensor& self, int dim) {
|
||
|
return at::std_mean(self, IntArrayRef{dim});
|
||
|
}
|
||
|
|
||
|
inline int64_t numel(const Tensor& tensor) {
|
||
|
return tensor.numel();
|
||
|
}
|
||
|
|
||
|
inline int64_t size(const Tensor& tensor, int64_t dim) {
|
||
|
return tensor.size(dim);
|
||
|
}
|
||
|
|
||
|
inline int64_t stride(const Tensor& tensor, int64_t dim) {
|
||
|
return tensor.stride(dim);
|
||
|
}
|
||
|
|
||
|
inline bool is_complex(const Tensor& tensor) {
|
||
|
return tensor.is_complex();
|
||
|
}
|
||
|
|
||
|
inline bool is_floating_point(const Tensor& tensor) {
|
||
|
return tensor.is_floating_point();
|
||
|
}
|
||
|
|
||
|
inline bool is_signed(const Tensor& tensor) {
|
||
|
return tensor.is_signed();
|
||
|
}
|
||
|
|
||
|
inline bool is_inference(const Tensor& tensor) {
|
||
|
return tensor.is_inference();
|
||
|
}
|
||
|
|
||
|
inline bool _is_zerotensor(const Tensor& tensor) {
|
||
|
return tensor._is_zerotensor();
|
||
|
}
|
||
|
|
||
|
inline bool is_conj(const Tensor& tensor) {
|
||
|
return tensor.is_conj();
|
||
|
}
|
||
|
|
||
|
inline Tensor conj(const Tensor& tensor) {
|
||
|
return tensor.conj();
|
||
|
}
|
||
|
|
||
|
inline bool is_neg(const Tensor& tensor) {
|
||
|
return tensor.is_neg();
|
||
|
}
|
||
|
|
||
|
}
|