1 /* 2 * Copyright (C) 2019 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "fuzzing/operation_signatures/OperationSignatureUtils.h" 18 19 namespace android { 20 namespace nn { 21 namespace fuzzing_test { 22 23 // For pooling ops with explicit padding. 24 static void poolingExplicitOpConstructor(Type, uint32_t rank, RandomOperation* op) { 25 NN_FUZZER_CHECK(rank == 4); 26 27 // Parameters 28 int32_t paddingLeft = op->inputs[1]->value<int32_t>(); 29 int32_t paddingRight = op->inputs[2]->value<int32_t>(); 30 int32_t paddingTop = op->inputs[3]->value<int32_t>(); 31 int32_t paddingBottom = op->inputs[4]->value<int32_t>(); 32 int32_t strideWidth = op->inputs[5]->value<int32_t>(); 33 int32_t strideHeight = op->inputs[6]->value<int32_t>(); 34 auto filterWidth = op->inputs[7]->value<RandomVariable>(); 35 auto filterHeight = op->inputs[8]->value<RandomVariable>(); 36 bool useNchw = false; 37 if (op->inputs.size() > 10) useNchw = op->inputs[10]->value<bool8>(); 38 int heightIndex = useNchw ? 2 : 1; 39 int widthIndex = useNchw ? 3 : 2; 40 int channelIndex = useNchw ? 1 : 3; 41 42 // Input, [batch, height_in, width_in, channel] 43 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE, 44 RandomVariableType::FREE, RandomVariableType::FREE}; 45 46 // Output, [batch, height_out, width_out, channel] 47 op->outputs[0]->dimensions.resize(4); 48 49 // batch and channel 50 op->outputs[0]->dimensions[0] = op->inputs[0]->dimensions[0]; 51 op->outputs[0]->dimensions[channelIndex] = op->inputs[0]->dimensions[channelIndex]; 52 53 // height 54 explicitPadding(op->inputs[0]->dimensions[heightIndex], filterHeight, strideHeight, 55 /*dilation=*/1, paddingTop, paddingBottom, 56 &op->outputs[0]->dimensions[heightIndex]); 57 58 // width 59 explicitPadding(op->inputs[0]->dimensions[widthIndex], filterWidth, strideWidth, /*dilation=*/1, 60 paddingLeft, paddingRight, &op->outputs[0]->dimensions[widthIndex]); 61 62 setSameQuantization(op->outputs[0], op->inputs[0]); 63 } 64 65 // For pooling ops with implicit padding. 66 static void poolingImplicitOpConstructor(Type, uint32_t rank, RandomOperation* op) { 67 NN_FUZZER_CHECK(rank == 4); 68 69 // Parameters 70 int32_t paddingScheme = op->inputs[1]->value<int32_t>(); 71 int32_t strideWidth = op->inputs[2]->value<int32_t>(); 72 int32_t strideHeight = op->inputs[3]->value<int32_t>(); 73 auto filterWidth = op->inputs[4]->value<RandomVariable>(); 74 auto filterHeight = op->inputs[5]->value<RandomVariable>(); 75 bool useNchw = false; 76 if (op->inputs.size() > 7) useNchw = op->inputs[7]->value<bool8>(); 77 int heightIndex = useNchw ? 2 : 1; 78 int widthIndex = useNchw ? 3 : 2; 79 int channelIndex = useNchw ? 1 : 3; 80 81 // Input, [batch, height_in, width_in, channel] 82 op->inputs[0]->dimensions = {RandomVariableType::FREE, RandomVariableType::FREE, 83 RandomVariableType::FREE, RandomVariableType::FREE}; 84 85 // Output, [batch, height_out, width_out, channel] 86 op->outputs[0]->dimensions.resize(4); 87 88 // batch and channel 89 op->outputs[0]->dimensions[0] = op->inputs[0]->dimensions[0]; 90 op->outputs[0]->dimensions[channelIndex] = op->inputs[0]->dimensions[channelIndex]; 91 92 // height and width 93 implicitPadding(op->inputs[0]->dimensions[heightIndex], filterHeight, strideHeight, 94 /*dilation=*/1, paddingScheme, &op->outputs[0]->dimensions[heightIndex]); 95 implicitPadding(op->inputs[0]->dimensions[widthIndex], filterWidth, strideWidth, 96 /*dilation=*/1, paddingScheme, &op->outputs[0]->dimensions[widthIndex]); 97 98 setSameQuantization(op->outputs[0], op->inputs[0]); 99 } 100 101 #define DEFINE_POOLING_SIGNATURE(op, ver, ...) \ 102 DEFINE_OPERATION_SIGNATURE(op##_explicit_##ver){ \ 103 .opType = ANEURALNETWORKS_##op, \ 104 .supportedDataTypes = {__VA_ARGS__}, \ 105 .supportedRanks = {4}, \ 106 .version = HalVersion::ver, \ 107 .inputs = \ 108 { \ 109 INPUT_DEFAULT, \ 110 PARAMETER_RANGE(Type::INT32, 1, 3), \ 111 PARAMETER_RANGE(Type::INT32, 1, 3), \ 112 PARAMETER_RANGE(Type::INT32, 1, 3), \ 113 PARAMETER_RANGE(Type::INT32, 1, 3), \ 114 PARAMETER_RANGE(Type::INT32, 1, 3), \ 115 PARAMETER_RANGE(Type::INT32, 1, 3), \ 116 RANDOM_INT_RANGE(1, 4), \ 117 RANDOM_INT_RANGE(1, 4), \ 118 PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \ 119 }, \ 120 .outputs = {OUTPUT_DEFAULT}, \ 121 .constructor = poolingExplicitOpConstructor}; \ 122 DEFINE_OPERATION_SIGNATURE(op##_implicit_##ver){ \ 123 .opType = ANEURALNETWORKS_##op, \ 124 .supportedDataTypes = {__VA_ARGS__}, \ 125 .supportedRanks = {4}, \ 126 .version = HalVersion::ver, \ 127 .inputs = \ 128 { \ 129 INPUT_DEFAULT, \ 130 PARAMETER_CHOICE(Type::INT32, 1, 2), \ 131 PARAMETER_RANGE(Type::INT32, 1, 3), \ 132 PARAMETER_RANGE(Type::INT32, 1, 3), \ 133 RANDOM_INT_RANGE(1, 4), \ 134 RANDOM_INT_RANGE(1, 4), \ 135 PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \ 136 }, \ 137 .outputs = {OUTPUT_DEFAULT}, \ 138 .constructor = poolingImplicitOpConstructor}; 139 140 DEFINE_POOLING_SIGNATURE(AVERAGE_POOL_2D, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM); 141 DEFINE_POOLING_SIGNATURE(L2_POOL_2D, V1_0, Type::TENSOR_FLOAT32); 142 DEFINE_POOLING_SIGNATURE(MAX_POOL_2D, V1_0, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM); 143 144 DEFINE_POOLING_SIGNATURE(AVERAGE_POOL_2D, V1_2, Type::TENSOR_FLOAT16); 145 DEFINE_POOLING_SIGNATURE(L2_POOL_2D, V1_2, Type::TENSOR_FLOAT16); 146 DEFINE_POOLING_SIGNATURE(MAX_POOL_2D, V1_2, Type::TENSOR_FLOAT16); 147 148 #define DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(op, ver, ...) \ 149 DEFINE_OPERATION_SIGNATURE(op##_explicit_layout_##ver){ \ 150 .opType = ANEURALNETWORKS_##op, \ 151 .supportedDataTypes = {__VA_ARGS__}, \ 152 .supportedRanks = {4}, \ 153 .version = HalVersion::ver, \ 154 .inputs = \ 155 { \ 156 INPUT_DEFAULT, \ 157 PARAMETER_RANGE(Type::INT32, 1, 3), \ 158 PARAMETER_RANGE(Type::INT32, 1, 3), \ 159 PARAMETER_RANGE(Type::INT32, 1, 3), \ 160 PARAMETER_RANGE(Type::INT32, 1, 3), \ 161 PARAMETER_RANGE(Type::INT32, 1, 3), \ 162 PARAMETER_RANGE(Type::INT32, 1, 3), \ 163 RANDOM_INT_RANGE(1, 4), \ 164 RANDOM_INT_RANGE(1, 4), \ 165 PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \ 166 PARAMETER_CHOICE(Type::BOOL, true, false), \ 167 }, \ 168 .outputs = {OUTPUT_DEFAULT}, \ 169 .constructor = poolingExplicitOpConstructor}; \ 170 DEFINE_OPERATION_SIGNATURE(op##_implicit_layout_##ver){ \ 171 .opType = ANEURALNETWORKS_##op, \ 172 .supportedDataTypes = {__VA_ARGS__}, \ 173 .supportedRanks = {4}, \ 174 .version = HalVersion::ver, \ 175 .inputs = \ 176 { \ 177 INPUT_DEFAULT, \ 178 PARAMETER_CHOICE(Type::INT32, 1, 2), \ 179 PARAMETER_RANGE(Type::INT32, 1, 3), \ 180 PARAMETER_RANGE(Type::INT32, 1, 3), \ 181 RANDOM_INT_RANGE(1, 4), \ 182 RANDOM_INT_RANGE(1, 4), \ 183 PARAMETER_CHOICE(Type::INT32, 0, 1, 2, 3), \ 184 PARAMETER_CHOICE(Type::BOOL, true, false), \ 185 }, \ 186 .outputs = {OUTPUT_DEFAULT}, \ 187 .constructor = poolingImplicitOpConstructor}; 188 189 DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(AVERAGE_POOL_2D, V1_2, Type::TENSOR_FLOAT32, 190 Type::TENSOR_FLOAT16, Type::TENSOR_QUANT8_ASYMM); 191 DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(L2_POOL_2D, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16); 192 DEFINE_POOLING_WITH_LAYOUT_SIGNATURE(MAX_POOL_2D, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, 193 Type::TENSOR_QUANT8_ASYMM); 194 195 } // namespace fuzzing_test 196 } // namespace nn 197 } // namespace android 198