1 #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
2 #include <ATen/core/Tensor.h>
3 #include <c10/util/Exception.h>
4
5 #ifndef AT_PER_OPERATOR_HEADERS
6 #include <ATen/NativeFunctions.h>
7 #else
8 #include <ATen/ops/sspaddmm_native.h>
9 #endif
10
11 namespace at::native {
12 // sparse, sparse, sparse, dense, real, real -> sparse
_sspaddmm_out_only_sparse_cuda(const Tensor & self,const Tensor & mat1,const Tensor & mat2,const Scalar & beta,const Scalar & alpha,Tensor & result)13 Tensor& _sspaddmm_out_only_sparse_cuda(const Tensor& self,
14 const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
15 AT_ERROR("tensor.sspaddmm(...) can only be called on sparse tensors");
16 }
_sspaddmm_out_cuda(const Tensor & self,const Tensor & mat1,const Tensor & mat2,const Scalar & beta,const Scalar & alpha,Tensor & result)17 Tensor& _sspaddmm_out_cuda(const Tensor& self,
18 const Tensor& mat1, const Tensor& mat2, const Scalar& beta, const Scalar& alpha, Tensor& result) {
19 AT_ERROR("NYI: CUDA sspaddmm is not implemented");
20 }
21 } // namespace at::native
22