#pragma once // @generated by torchgen/gen.py from Function.h #include #include #include #include #include #include #include #include #include #include #include #include #include namespace at { // aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor inline at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { return at::_ops::conv1d::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); } namespace symint { template ::value>> at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1) { return at::_ops::conv1d::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), c10::fromIntArrayRefSlow(padding), c10::fromIntArrayRefSlow(dilation), groups); } } // aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor inline at::Tensor conv1d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups); } namespace symint { template ::value>> at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups); } } // aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor inline at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { return at::_ops::conv1d_padding::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); } namespace symint { template ::value>> at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1) { return at::_ops::conv1d_padding::call(input, weight, bias, c10::fromIntArrayRefSlow(stride), padding, c10::fromIntArrayRefSlow(dilation), groups); } } // aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding="valid", SymInt[1] dilation=1, SymInt groups=1) -> Tensor inline at::Tensor conv1d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups); } namespace symint { template ::value>> at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1) { return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups); } } }