1 /* 2 * Copyright (c) 2022-2023 Arm Limited. 3 * 4 * SPDX-License-Identifier: MIT 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in all 14 * copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 * SOFTWARE. 23 */ 24 #ifndef ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUCAST 25 #define ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUCAST 26 27 #include "arm_compute/dynamic_fusion/sketch/attributes/CastAttributes.h" 28 29 namespace arm_compute 30 { 31 namespace experimental 32 { 33 namespace dynamic_fusion 34 { 35 /** Forward declaration */ 36 class GpuWorkloadContext; 37 class GpuWorkloadSketch; 38 39 /** Operator interface. */ 40 class GpuCast final 41 { 42 public: 43 /** Attributes are a set of backend-agnostic parameters that define what an operator does */ 44 using Attributes = CastAttributes; 45 /** Create an operator and fuse it into the workload sketch. 46 * @note If @ref validate_op() fails, the creation also fails and may throw an error. 47 * @note If @ref validate_op() fails, @p sketch remains unchanged and valid. 48 * 49 * Valid data type configurations: 50 * |src |dst | 51 * |:--------------|:--------------------------------------| 52 * |U8 | S8, U16, S16, U32, S32, F16, F32 | 53 * |U16 | U8, S8, S16, U32, S32, F16, F32 | 54 * |S16 | U8, S8, U16, U32, S32, F16, F32 | 55 * |U32 | U8, S8, U16, S16, S32, F16, F32 | 56 * |S32 | U8, S8, U16, S16, U32, F16, F32 | 57 * |F16 | U8, S8, U16, S16, U32, S32, F32 | 58 * |F32 | U8, S8, U16, S16, U32, S32, F16 | 59 * 60 * Input data type must be different than output data type. 61 * 62 * Valid data layouts: 63 * - Any 64 * 65 * @param[in,out] sketch Workload sketch into which the operator will be fused 66 * @param[in] src Left hand side tensor info. Data types supported: U8/S8/U16/S16/U32/S32/F16/F32. 67 * @param[in] attributes Operator attributes 68 * 69 * @return Pointer for the destination tensor info 70 */ 71 static ITensorInfo *create_op(GpuWorkloadSketch &sketch, 72 ITensorInfo *src, 73 const Attributes &attributes); 74 /** Check if the operator configuration is supported, irrespective of fusion 75 * 76 * @param[in] context Workload context within which the operator is running 77 * @param[in] src Left hand side tensor info. Data types supported: All. 78 * @param[in] attributes Operator attributes 79 * 80 * @return Status 81 */ 82 static Status is_supported_op(const GpuWorkloadContext &context, 83 const ITensorInfo *src, 84 const Attributes &attributes); 85 /** Validate the operator and check if the its configuration is supported and if it can be fused into the workload sketch. 86 * 87 * Parameters are similar to @ref GpuCast::create_op() 88 * 89 * @return Status 90 */ 91 static Status validate_op(const GpuWorkloadSketch &sketch, 92 const ITensorInfo *src, 93 const Attributes &attributes); 94 }; 95 } // namespace dynamic_fusion 96 } // namespace experimental 97 } // namespace arm_compute 98 #endif /* ARM_COMPUTE_DYNAMIC_FUSION_SKETCH_GPU_OPERATORS_GPUCAST */ 99