1 /*
2 * Copyright (c) 2022-2023 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "arm_compute/dynamic_fusion/sketch/gpu/operators/GpuAdd.h"
25 #include "arm_compute/dynamic_fusion/sketch/gpu/GpuWorkloadSketch.h"
26
27 #include "src/dynamic_fusion/sketch/gpu/operators/internal/GpuElementwiseBinaryCommon.h"
28
29 namespace arm_compute
30 {
31 namespace experimental
32 {
33 namespace dynamic_fusion
34 {
validate_op(const GpuWorkloadSketch & sketch,const ITensorInfo * lhs,const ITensorInfo * rhs)35 Status GpuAdd::validate_op(const GpuWorkloadSketch &sketch,
36 const ITensorInfo *lhs,
37 const ITensorInfo *rhs)
38 {
39 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
40 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32, DataType::U8, DataType::S16, DataType::S32);
41 ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->data_type() != rhs->data_type(), "Input tensors must be the same data type");
42
43 // Set the elementwise operation to Add then call the elementwise common validate_op
44 ElementwiseBinaryCommonAttributes common_attributes{};
45 common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Add);
46 return GpuElementwiseBinaryCommon::validate_op(sketch, lhs, rhs, common_attributes);
47 }
48
is_supported_op(const GpuWorkloadContext & context,const ITensorInfo * lhs,const ITensorInfo * rhs)49 Status GpuAdd::is_supported_op(const GpuWorkloadContext &context,
50 const ITensorInfo *lhs,
51 const ITensorInfo *rhs)
52 {
53 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(lhs, rhs);
54 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(lhs, 1, DataType::F16, DataType::F32, DataType::U8, DataType::S16, DataType::S32);
55 ARM_COMPUTE_RETURN_ERROR_ON_MSG(lhs->data_type() != rhs->data_type(), "Input tensors must be the same data type");
56
57 // Set the elementwise operation to Add then call the elementwise common is_supported_op
58 ElementwiseBinaryCommonAttributes common_attributes{};
59 common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Add);
60 return GpuElementwiseBinaryCommon::is_supported_op(context, lhs, rhs, common_attributes);
61 }
62
create_op(GpuWorkloadSketch & sketch,ITensorInfo * lhs,ITensorInfo * rhs)63 ITensorInfo *GpuAdd::create_op(GpuWorkloadSketch &sketch,
64 ITensorInfo *lhs,
65 ITensorInfo *rhs)
66 {
67 // No need to log or validate as they'll be handled inside GpuElementwiseBinaryCommon::create_op()
68 // Set the elementwise operation to Add then call the elementwise common create_op
69 ElementwiseBinaryCommonAttributes common_attributes{};
70 common_attributes.operation(ElementwiseBinaryCommonAttributes::ElementwiseOp::Add);
71 return GpuElementwiseBinaryCommon::create_op(sketch, lhs, rhs, common_attributes);
72 }
73
74 } // namespace dynamic_fusion
75 } // namespace experimental
76 } // namespace arm_compute
77