#pragma once // ${generated_comment} #ifdef TORCH_ASSERT_NO_OPERATORS #error This change adds a dependency on native_functions.yaml, \ meaning the file will need to be re-compiled every time an operator \ is changed or added. Consider if your change would be better placed in \ another file, or if a more specific header might achieve the same goal. \ See NOTE: [Tensor vs. TensorBase] #endif #if defined(AT_PER_OPERATOR_HEADERS) && defined(TORCH_ASSERT_ONLY_METHOD_OPERATORS) #error This change adds a dependency on all pytorch operators, meaning the \ file will need to be re-compiled every time an operator is changed or added. \ Consider including a specific operator from and \ see NOTE [TORCH_ASSERT_ONLY_METHOD_OPERATORS]. #endif // NOTE: [TORCH_ASSERT_ONLY_METHOD_OPERATORS] // // In ATen, certain generated headers files include the definitions of // every single operator in PyTorch. Unfortunately this means every // time an operator signature is updated or changed in // native_functions.yaml, you (and every other PyTorch developer) need // to recompile every source file that includes any of these headers. // // To break up these header dependencies, and improve incremental // build times for all PyTorch developers. These headers are split // into per-operator headers in the `ATen/ops` folder. This limits // incremental builds to only changes to methods of `Tensor`, or files // that use the specific operator being changed. With `at::sum` as an // example, you should include // // // instead of ATen/Functions.h // // instead of ATen/NativeFunctions.h // // instead of ATen/Operators.h // // instead of ATen/CPUFunctions.h // // However, even if you're careful to use this in your own code. // `Functions.h` might be included indirectly through another header // without you realising. To avoid this, you can add // // #define TORCH_ASSERT_ONLY_METHOD_OPERATORS // // to the top of your source file. This way any time the non-specific // headers are included, the compiler will error out. // // Also, be aware that `ops` are not available in all build // configurations (namely fb-internal) so you must guard these // includes with `#ifdef AT_PER_OPERATOR_HEADERS`. e.g. // // #ifndef AT_PER_OPERATOR_HEADERS // #include // #else // #include // #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ${Functions_includes} namespace at { ${Functions_declarations} // Special C++ only overloads for std()-like functions (See gh-40287) // These are needed because int -> bool conversion takes precedence over int -> IntArrayRef // So, for example std(0) would select the std(unbiased=False) overload TORCH_API inline Tensor var(const Tensor& self, int dim) { return at::var(self, IntArrayRef{dim}); } TORCH_API inline std::tuple var_mean(const Tensor& self, int dim) { return at::var_mean(self, IntArrayRef{dim}); } TORCH_API inline Tensor std(const Tensor& self, int dim) { return at::std(self, IntArrayRef{dim}); } TORCH_API inline std::tuple std_mean(const Tensor& self, int dim) { return at::std_mean(self, IntArrayRef{dim}); } inline int64_t numel(const Tensor& tensor) { return tensor.numel(); } inline int64_t size(const Tensor& tensor, int64_t dim) { return tensor.size(dim); } inline int64_t stride(const Tensor& tensor, int64_t dim) { return tensor.stride(dim); } inline bool is_complex(const Tensor& tensor) { return tensor.is_complex(); } inline bool is_floating_point(const Tensor& tensor) { return tensor.is_floating_point(); } inline bool is_signed(const Tensor& tensor) { return tensor.is_signed(); } inline bool is_inference(const Tensor& tensor) { return tensor.is_inference(); } inline bool _is_zerotensor(const Tensor& tensor) { return tensor._is_zerotensor(); } inline bool is_conj(const Tensor& tensor) { return tensor.is_conj(); } inline Tensor conj(const Tensor& tensor) { return tensor.conj(); } inline bool is_neg(const Tensor& tensor) { return tensor.is_neg(); } }