// Ternary and higher-order pointwise operations #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include #include #include #ifndef AT_PER_OPERATOR_HEADERS #include #else #include #include #endif namespace at::meta { TORCH_META_FUNC(addcmul) (const Tensor& self, const Tensor& tensor1, const Tensor& tensor2, const Scalar& value) { build_ternary_op(maybe_get_output(), self, tensor1, tensor2); } TORCH_META_FUNC(addcdiv) (const Tensor& self, const Tensor& tensor1, const Tensor& tensor2, const Scalar& value) { if (isIntegralType(tensor1.scalar_type(), /*includeBool=*/true) && isIntegralType(tensor2.scalar_type(), /*includeBool=*/true)) { TORCH_CHECK( false, "Integer division with addcdiv is no longer supported, and in a future ", "release addcdiv will perform a true division of tensor1 and tensor2. ", "The historic addcdiv behavior can be implemented as ", "(input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) ", "for integer inputs and as ", "(input + value * tensor1 / tensor2) for float inputs. ", "The future addcdiv behavior is just the latter implementation: ", "(input + value * tensor1 / tensor2), for all dtypes."); } build_ternary_op(maybe_get_output(), self, tensor1, tensor2); } } // namespace at::meta namespace at::native { TORCH_IMPL_FUNC(addcmul_out) (const Tensor& self, const Tensor& tensor1, const Tensor& tensor2, const Scalar& value, const Tensor& result) { addcmul_stub(device_type(), *this, value); } TORCH_IMPL_FUNC(addcdiv_out) (const Tensor& self, const Tensor& tensor1, const Tensor& tensor2, const Scalar& value, const Tensor& result) { addcdiv_stub(device_type(), *this, value); } DEFINE_DISPATCH(addcmul_stub); DEFINE_DISPATCH(addcdiv_stub); } // namespace at::native