1 /* 2 * Copyright (C) 2018 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "CpuOperationUtils.h" 18 #include "OperationResolver.h" 19 20 #include <cfloat> 21 #include <cmath> 22 23 #include "Tracing.h" 24 #include "tensorflow/lite/kernels/internal/common.h" 25 26 namespace android { 27 namespace nn { 28 namespace transpose_conv_2d { 29 30 constexpr char kOperationName[] = "TRANSPOSE_CONV_2D"; 31 32 constexpr uint32_t kInputTensor = 0; 33 constexpr uint32_t kFilterTensor = 1; 34 constexpr uint32_t kBiasTensor = 2; 35 36 constexpr uint32_t kNumOutputs = 1; 37 constexpr uint32_t kOutputTensor = 0; 38 39 namespace { 40 41 // If possible we will use this static buffer for the tensor. 42 constexpr size_t kStaticBufferSize = 1605632; 43 char static_scratch_buffer[kStaticBufferSize]; 44 45 // executionMutex is used to protect concurrent access of the static_scratch_buffer. 46 // std::mutex is safe for pthreads on Android. 47 std::mutex executionMutex; 48 49 struct TransposeConv2dParam { 50 int32_t paddingLeft, paddingRight; 51 int32_t paddingTop, paddingBottom; 52 int32_t strideWidth, strideHeight; 53 int32_t activation; 54 bool useNchw = false; 55 56 bool initialize(const IOperationExecutionContext* context) { 57 uint32_t inCount = context->getNumInputs(); 58 int32_t paddingImplicit = 0; 59 if (inCount == 9) { 60 paddingImplicit = context->getInputValue<int32_t>(4); 61 strideWidth = context->getInputValue<int32_t>(5); 62 strideHeight = context->getInputValue<int32_t>(6); 63 activation = context->getInputValue<int32_t>(7); 64 useNchw = context->getInputValue<bool>(8); 65 Shape filterShape = context->getInputShape(kFilterTensor); 66 int32_t filterWidth = getSizeOfDimension(filterShape, 2); 67 int32_t filterHeight = getSizeOfDimension(filterShape, 1); 68 NN_RET_CHECK_EQ(getNumberOfDimensions(context->getInputShape(3)), 1); 69 NN_RET_CHECK_EQ(getSizeOfDimension(context->getInputShape(3), 0), 4); 70 const int32_t* outputShapeData = context->getInputBuffer<int32_t>(3); 71 int32_t outputWidth = useNchw ? outputShapeData[3] : outputShapeData[2]; 72 int32_t outputHeight = useNchw ? outputShapeData[2] : outputShapeData[1]; 73 calculateExplicitPaddingTransposeConv(outputWidth, strideWidth, filterWidth, 74 paddingImplicit, &paddingLeft, &paddingRight); 75 calculateExplicitPaddingTransposeConv(outputHeight, strideHeight, filterHeight, 76 paddingImplicit, &paddingTop, &paddingBottom); 77 } else if (inCount == 11) { 78 paddingLeft = context->getInputValue<int32_t>(3); 79 paddingRight = context->getInputValue<int32_t>(4); 80 paddingTop = context->getInputValue<int32_t>(5); 81 paddingBottom = context->getInputValue<int32_t>(6); 82 strideWidth = context->getInputValue<int32_t>(7); 83 strideHeight = context->getInputValue<int32_t>(8); 84 activation = context->getInputValue<int32_t>(9); 85 useNchw = context->getInputValue<bool>(10); 86 } else { 87 NN_RET_CHECK_FAIL() << "Unsupported input spec for operation " << kOperationName; 88 } 89 // paddingRight and paddingBottom in transpose conv may be less than 0 to resolve the 90 // ambiguous output shape issue in the case of stride > 1. 91 NN_RET_CHECK_GE(paddingLeft, 0); 92 NN_RET_CHECK_GE(paddingTop, 0); 93 NN_RET_CHECK_GT(strideWidth, 0); 94 NN_RET_CHECK_GT(strideHeight, 0); 95 NN_RET_CHECK_GE(activation, 0); 96 return true; 97 } 98 }; 99 100 #define ANDROID_NN_TRANSPOSE_CONV_PARAMETERS \ 101 uint32_t numBatches = getSizeOfDimension(inputShape, 0); \ 102 uint32_t inputHeight = getSizeOfDimension(inputShape, 1); \ 103 uint32_t inputWidth = getSizeOfDimension(inputShape, 2); \ 104 uint32_t inputDepth = getSizeOfDimension(inputShape, 3); \ 105 uint32_t filterHeight = getSizeOfDimension(filterShape, 1); \ 106 uint32_t filterWidth = getSizeOfDimension(filterShape, 2); \ 107 uint32_t outputHeight = getSizeOfDimension(outputShape, 1); \ 108 uint32_t outputWidth = getSizeOfDimension(outputShape, 2); \ 109 uint32_t outputDepth = getSizeOfDimension(outputShape, 3); \ 110 int32_t paddingLeft = param.paddingLeft, paddingRight = param.paddingRight; \ 111 int32_t paddingTop = param.paddingTop, paddingBottom = param.paddingBottom; \ 112 int32_t strideWidth = param.strideWidth, strideHeight = param.strideHeight; \ 113 int32_t activation = param.activation; 114 115 bool transposeConvNhwc(const float* inputData, const Shape& inputShape, const float* filterData, 116 const Shape& filterShape, const float* biasData, const Shape& biasShape, 117 const TransposeConv2dParam& param, float* outputData, 118 const Shape& outputShape) { 119 NNTRACE_TRANS("transposeConvFloat32"); 120 ANDROID_NN_TRANSPOSE_CONV_PARAMETERS 121 122 float outputActivationMin = 0.0f, outputActivationMax = 0.0f; 123 CalculateActivationRangeFloat(activation, &outputActivationMin, &outputActivationMax); 124 125 memset(outputData, 0, getNumberOfElements(outputShape) * sizeof(float)); 126 127 const float* inputBase = inputData; 128 float* outputBase = outputData; 129 for (uint32_t b = 0; b < numBatches; b++) { 130 for (uint32_t h = 0; h < inputHeight; h++) { 131 for (uint32_t w = 0; w < inputWidth; w++) { 132 int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft; 133 int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop; 134 135 const float* filterBase = filterData; 136 for (uint32_t k = 0; k < outputDepth; k++) { 137 for (uint32_t i = 0; i < filterHeight; i++) { 138 for (uint32_t j = 0; j < filterWidth; j++, filterBase += inputDepth) { 139 int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i); 140 int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j); 141 if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) && 142 wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) { 143 for (uint32_t d = 0; d < inputDepth; d++) { 144 uint32_t outputIndex = hOutput * outputWidth * outputDepth + 145 wOutput * outputDepth + k; 146 outputBase[outputIndex] += inputBase[d] * filterBase[d]; 147 } 148 } 149 } 150 } 151 } 152 153 inputBase += inputDepth; 154 } 155 } 156 outputBase += outputHeight * outputWidth * outputDepth; 157 } 158 159 const uint32_t outerSize = numBatches * outputHeight * outputWidth; 160 float* outPtr = outputData; 161 for (uint32_t i = 0; i < outerSize; i++) { 162 for (uint32_t d = 0; d < outputDepth; d++, outPtr++) { 163 *outPtr += biasData[d]; 164 *outPtr = std::max(std::min(*outPtr, outputActivationMax), outputActivationMin); 165 } 166 } 167 168 return true; 169 } 170 171 bool transposeConvNhwc(const uint8_t* inputData, const Shape& inputShape, const uint8_t* filterData, 172 const Shape& filterShape, const int32_t* biasData, const Shape& biasShape, 173 const TransposeConv2dParam& param, uint8_t* outputData, 174 const Shape& outputShape) { 175 NNTRACE_TRANS("transposeConvQuant8"); 176 ANDROID_NN_TRANSPOSE_CONV_PARAMETERS 177 178 int32_t* tempBuffer = nullptr; 179 std::unique_ptr<int32_t[]> bufferGuard; 180 uint32_t tempBufferByteSize = getNumberOfElements(outputShape) * sizeof(int32_t); 181 if (tempBufferByteSize <= kStaticBufferSize) { 182 tempBuffer = reinterpret_cast<int32_t*>(static_scratch_buffer); 183 } else { 184 tempBuffer = new (std::nothrow) int32_t[tempBufferByteSize / sizeof(int32_t)]; 185 if (tempBuffer == nullptr) { 186 LOG(ERROR) << "ConvTranspose size is too large, not enough memory"; 187 return false; 188 } 189 bufferGuard.reset(tempBuffer); 190 } 191 192 int32_t inputOffset = -inputShape.offset; 193 int32_t filterOffset = -filterShape.offset; 194 int32_t outputOffset = outputShape.offset; 195 196 double realMultiplier = 0.0; 197 int32_t outputMultiplier = 0; 198 int32_t outputShift = 0; 199 NN_RET_CHECK(GetQuantizedConvolutionMultipler(inputShape, filterShape, biasShape, outputShape, 200 &realMultiplier)); 201 int exponent; 202 NN_RET_CHECK(QuantizeMultiplier(realMultiplier, &outputMultiplier, &exponent)); 203 outputShift = -exponent; 204 205 int32_t outputActivationMin = 0, outputActivationMax = 0; 206 CalculateActivationRangeUint8(activation, outputShape, &outputActivationMin, 207 &outputActivationMax); 208 209 // Prevent concurrent executions that may access the scratch buffer 210 std::unique_lock<std::mutex> lock(executionMutex); 211 memset(tempBuffer, 0, tempBufferByteSize); 212 213 const uint8_t* inputPtr = inputData; 214 int32_t* outputBase = tempBuffer; 215 for (uint32_t b = 0; b < numBatches; b++) { 216 for (uint32_t h = 0; h < inputHeight; h++) { 217 for (uint32_t w = 0; w < inputWidth; w++) { 218 for (uint32_t d = 0; d < inputDepth; d++) { 219 int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft; 220 int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop; 221 222 for (uint32_t i = 0; i < filterHeight; i++) { 223 for (uint32_t j = 0; j < filterWidth; j++) { 224 for (uint32_t k = 0; k < outputDepth; k++) { 225 int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i); 226 int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j); 227 if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) && 228 wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) { 229 uint32_t filterIndex = 230 k * filterHeight * filterWidth * inputDepth + 231 i * filterWidth * inputDepth + j * inputDepth + d; 232 uint32_t outputIndex = hOutput * outputWidth * outputDepth + 233 wOutput * outputDepth + k; 234 outputBase[outputIndex] += 235 (static_cast<int32_t>(*inputPtr) + inputOffset) * 236 (static_cast<int32_t>(filterData[filterIndex]) + 237 filterOffset); 238 } 239 } 240 } 241 } 242 243 inputPtr++; 244 } 245 } 246 } 247 outputBase += outputHeight * outputWidth * outputDepth; 248 } 249 250 const uint32_t outerSize = numBatches * outputHeight * outputWidth; 251 int32_t* bufferPtr = tempBuffer; 252 uint8_t* outPtr = outputData; 253 for (uint32_t i = 0; i < outerSize; i++) { 254 for (uint32_t d = 0; d < outputDepth; d++, bufferPtr++, outPtr++) { 255 int32_t outVal = *bufferPtr + biasData[d]; 256 outVal = tflite::MultiplyByQuantizedMultiplier(outVal, outputMultiplier, -outputShift); 257 outVal += outputOffset; 258 outVal = std::max(std::min(outVal, outputActivationMax), outputActivationMin); 259 *outPtr = static_cast<uint8_t>(outVal); 260 } 261 } 262 263 return true; 264 } 265 266 bool transposeConvNhwc(const _Float16* inputData, const Shape& inputShape, 267 const _Float16* filterData, const Shape& filterShape, 268 const _Float16* biasData, const Shape& biasShape, 269 const TransposeConv2dParam& param, _Float16* outputData, 270 const Shape& outputShape) { 271 NNTRACE_TRANS("transposeConvFloat16"); 272 std::vector<float> inputData_float32(getNumberOfElements(inputShape)); 273 std::vector<float> filterData_float32(getNumberOfElements(filterShape)); 274 std::vector<float> biasData_float32(getNumberOfElements(biasShape)); 275 std::vector<float> outputData_float32(getNumberOfElements(outputShape)); 276 277 convertFloat16ToFloat32(inputData, &inputData_float32); 278 convertFloat16ToFloat32(filterData, &filterData_float32); 279 convertFloat16ToFloat32(biasData, &biasData_float32); 280 281 transposeConvNhwc(inputData_float32.data(), inputShape, filterData_float32.data(), filterShape, 282 biasData_float32.data(), biasShape, param, outputData_float32.data(), 283 outputShape); 284 convertFloat32ToFloat16(outputData_float32, outputData); 285 286 return true; 287 } 288 289 template <typename T_Input, typename T_Filter, typename T_Bias> 290 bool transposeConv(const T_Input* inputData, const Shape& inputShape, const T_Filter* filterData, 291 const Shape& filterShape, const T_Bias* biasData, const Shape& biasShape, 292 const TransposeConv2dParam& param, T_Input* outputData, 293 const Shape& outputShape) { 294 InputWithLayout<T_Input> input(param.useNchw); 295 OutputWithLayout<T_Input> output(param.useNchw); 296 NN_RET_CHECK(input.initialize(inputData, inputShape)); 297 NN_RET_CHECK(output.initialize(outputData, outputShape)); 298 NN_RET_CHECK(transposeConvNhwc(input.getNhwcBuffer(), input.getNhwcShape(), filterData, 299 filterShape, biasData, biasShape, param, output.getNhwcBuffer(), 300 output.getNhwcShape())); 301 NN_RET_CHECK(output.commit()); 302 return true; 303 } 304 305 bool transposeConvQuant8PerChannelNhwc(const uint8_t* inputData, const Shape& inputShape, 306 const int8_t* filterData, const Shape& filterShape, 307 const float* filterScales, const int32_t* biasData, 308 const Shape& biasShape, const TransposeConv2dParam& param, 309 uint8_t* outputData, const Shape& outputShape) { 310 NNTRACE_TRANS("transposeConvQuant8PerChannel"); 311 ANDROID_NN_TRANSPOSE_CONV_PARAMETERS 312 313 int32_t* tempBuffer = nullptr; 314 std::unique_ptr<int32_t[]> bufferGuard; 315 uint32_t tempBufferByteSize = getNumberOfElements(outputShape) * sizeof(int32_t); 316 if (tempBufferByteSize <= kStaticBufferSize) { 317 tempBuffer = reinterpret_cast<int32_t*>(static_scratch_buffer); 318 } else { 319 tempBuffer = new (std::nothrow) int32_t[tempBufferByteSize / sizeof(int32_t)]; 320 if (tempBuffer == nullptr) { 321 LOG(ERROR) << "ConvTranspose size is too large, not enough memory"; 322 return false; 323 } 324 bufferGuard.reset(tempBuffer); 325 } 326 327 int32_t inputOffset = -inputShape.offset; 328 int32_t outputOffset = outputShape.offset; 329 330 std::vector<double> realMultiplier(outputDepth, 0.0); 331 std::vector<int32_t> outputMultiplier(outputDepth, 0); 332 std::vector<int32_t> outputShift(outputDepth, 0); 333 for (int i = 0; i < outputDepth; ++i) { 334 Shape filterChannelShape = filterShape; 335 filterChannelShape.scale = filterScales[i]; 336 Shape biasChannelShape = biasShape; 337 biasChannelShape.scale = filterScales[i] * inputShape.scale; 338 339 NN_RET_CHECK(GetQuantizedConvolutionMultipler( 340 inputShape, filterChannelShape, biasChannelShape, outputShape, &realMultiplier[i])); 341 int exponent; 342 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent)); 343 outputShift[i] = -exponent; 344 } 345 346 int32_t outputActivationMin = 0, outputActivationMax = 0; 347 CalculateActivationRangeUint8(activation, outputShape, &outputActivationMin, 348 &outputActivationMax); 349 350 // Prevent concurrent executions that may access the scratch buffer 351 std::unique_lock<std::mutex> lock(executionMutex); 352 memset(tempBuffer, 0, tempBufferByteSize); 353 354 const uint8_t* inputPtr = inputData; 355 int32_t* outputBase = tempBuffer; 356 for (uint32_t b = 0; b < numBatches; b++) { 357 for (uint32_t h = 0; h < inputHeight; h++) { 358 for (uint32_t w = 0; w < inputWidth; w++) { 359 for (uint32_t d = 0; d < inputDepth; d++) { 360 int32_t wOutputOrigin = static_cast<int32_t>(w) * strideWidth - paddingLeft; 361 int32_t hOutputOrigin = static_cast<int32_t>(h) * strideHeight - paddingTop; 362 363 for (uint32_t i = 0; i < filterHeight; i++) { 364 for (uint32_t j = 0; j < filterWidth; j++) { 365 for (uint32_t k = 0; k < outputDepth; k++) { 366 int32_t hOutput = hOutputOrigin + static_cast<int32_t>(i); 367 int32_t wOutput = wOutputOrigin + static_cast<int32_t>(j); 368 if (hOutput >= 0 && hOutput < static_cast<int32_t>(outputHeight) && 369 wOutput >= 0 && wOutput < static_cast<int32_t>(outputWidth)) { 370 uint32_t filterIndex = 371 k * filterHeight * filterWidth * inputDepth + 372 i * filterWidth * inputDepth + j * inputDepth + d; 373 uint32_t outputIndex = hOutput * outputWidth * outputDepth + 374 wOutput * outputDepth + k; 375 outputBase[outputIndex] += 376 (static_cast<int32_t>(*inputPtr) + inputOffset) * 377 static_cast<int32_t>(filterData[filterIndex]); 378 } 379 } 380 } 381 } 382 383 inputPtr++; 384 } 385 } 386 } 387 outputBase += outputHeight * outputWidth * outputDepth; 388 } 389 390 const uint32_t outerSize = numBatches * outputHeight * outputWidth; 391 int32_t* bufferPtr = tempBuffer; 392 uint8_t* outPtr = outputData; 393 for (uint32_t i = 0; i < outerSize; i++) { 394 for (uint32_t d = 0; d < outputDepth; d++, bufferPtr++, outPtr++) { 395 int32_t outVal = *bufferPtr + biasData[d]; 396 outVal = tflite::MultiplyByQuantizedMultiplier(outVal, outputMultiplier[d], 397 -outputShift[d]); 398 outVal += outputOffset; 399 outVal = std::max(std::min(outVal, outputActivationMax), outputActivationMin); 400 *outPtr = static_cast<uint8_t>(outVal); 401 } 402 } 403 404 return true; 405 } 406 407 bool transposeConvQuant8PerChannel(const uint8_t* inputData, const Shape& inputShape, 408 const int8_t* filterData, const Shape& filterShape, 409 const float* filterScales, const int32_t* biasData, 410 const Shape& biasShape, const TransposeConv2dParam& param, 411 uint8_t* outputData, const Shape& outputShape) { 412 InputWithLayout<uint8_t> input(param.useNchw); 413 OutputWithLayout<uint8_t> output(param.useNchw); 414 NN_RET_CHECK(input.initialize(inputData, inputShape)); 415 NN_RET_CHECK(output.initialize(outputData, outputShape)); 416 NN_RET_CHECK(transposeConvQuant8PerChannelNhwc( 417 input.getNhwcBuffer(), input.getNhwcShape(), filterData, filterShape, filterScales, 418 biasData, biasShape, param, output.getNhwcBuffer(), output.getNhwcShape())); 419 NN_RET_CHECK(output.commit()); 420 return true; 421 } 422 423 #undef ANDROID_NN_TRANSPOSE_CONV_PARAMETERS 424 425 } // namespace 426 427 bool validate(const IOperationValidationContext* context) { 428 NN_RET_CHECK_EQ(context->getNumOutputs(), kNumOutputs); 429 auto inputCount = context->getNumInputs(); 430 auto inputType = context->getInputType(kInputTensor); 431 auto filterType = context->getInputType(kFilterTensor); 432 std::vector<OperandType> inExpectedTypes; 433 if (inputType == OperandType::TENSOR_FLOAT32 || inputType == OperandType::TENSOR_FLOAT16) { 434 inExpectedTypes = {inputType, inputType, inputType}; 435 } else if (inputType == OperandType::TENSOR_QUANT8_ASYMM) { 436 NN_RET_CHECK(filterType == OperandType::TENSOR_QUANT8_ASYMM || 437 filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) 438 << "Unsupported filter tensor type for operation " << kOperationName; 439 if (filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { 440 NN_RET_CHECK_EQ(context->getInputExtraParams(kFilterTensor).channelQuant().channelDim, 441 0) 442 << "Unsupported filter tensor channel dimension for operation " 443 << kOperationName; 444 } 445 inExpectedTypes = {inputType, filterType, OperandType::TENSOR_INT32}; 446 } else { 447 NN_RET_CHECK_FAIL() << "Unsupported input tensor type for operation " << kOperationName; 448 } 449 450 std::vector<OperandType> argExpectedTypes; 451 if (inputCount == 11) { 452 argExpectedTypes = {OperandType::INT32, OperandType::INT32, OperandType::INT32, 453 OperandType::INT32, OperandType::INT32, OperandType::INT32, 454 OperandType::INT32, OperandType::BOOL}; 455 } else { 456 argExpectedTypes = {OperandType::TENSOR_INT32, OperandType::INT32, OperandType::INT32, 457 OperandType::INT32, OperandType::INT32, OperandType::BOOL}; 458 } 459 inExpectedTypes.insert(inExpectedTypes.end(), argExpectedTypes.begin(), argExpectedTypes.end()); 460 NN_RET_CHECK(validateHalVersion(context, HalVersion::V1_2)); 461 return validateInputTypes(context, inExpectedTypes) && 462 validateOutputTypes(context, {inputType}); 463 } 464 465 bool prepare(IOperationExecutionContext* context) { 466 Shape input = context->getInputShape(kInputTensor); 467 Shape filter = context->getInputShape(kFilterTensor); 468 Shape bias = context->getInputShape(kBiasTensor); 469 470 if (filter.type == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { 471 NN_RET_CHECK(input.type == OperandType::TENSOR_QUANT8_ASYMM); 472 } else { 473 NN_RET_CHECK(input.type == filter.type); 474 } 475 if (input.type == OperandType::TENSOR_QUANT8_ASYMM) { 476 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32); 477 } else { 478 NN_RET_CHECK(input.type == bias.type); 479 } 480 NN_RET_CHECK_EQ(getNumberOfDimensions(input), 4); 481 NN_RET_CHECK_EQ(getNumberOfDimensions(filter), 4); 482 NN_RET_CHECK_EQ(getNumberOfDimensions(bias), 1); 483 484 TransposeConv2dParam param; 485 NN_RET_CHECK(param.initialize(context)); 486 487 uint32_t batches = getSizeOfDimension(input, 0); 488 uint32_t height = getSizeOfDimension(input, param.useNchw ? 2 : 1); 489 uint32_t width = getSizeOfDimension(input, param.useNchw ? 3 : 2); 490 uint32_t channels_in = getSizeOfDimension(input, param.useNchw ? 1 : 3); 491 uint32_t channels_out = getSizeOfDimension(filter, 0); 492 uint32_t filterHeight = getSizeOfDimension(filter, 1); 493 uint32_t filterWidth = getSizeOfDimension(filter, 2); 494 // Only batches can be zero. 495 NN_RET_CHECK_EQ(channels_in, getSizeOfDimension(filter, 3)); 496 NN_RET_CHECK_EQ(channels_out, getSizeOfDimension(bias, 0)); 497 NN_RET_CHECK_GT(height, 0); 498 NN_RET_CHECK_GT(width, 0); 499 NN_RET_CHECK_GT(channels_in, 0); 500 NN_RET_CHECK_GT(channels_out, 0); 501 NN_RET_CHECK_GT(filterWidth, 0); 502 NN_RET_CHECK_GT(filterHeight, 0); 503 504 uint32_t outWidth = computeOutSizeTransposeConv(width, filterWidth, param.strideWidth, 505 param.paddingLeft, param.paddingRight); 506 uint32_t outHeight = computeOutSizeTransposeConv(height, filterHeight, param.strideHeight, 507 param.paddingTop, param.paddingBottom); 508 NN_RET_CHECK_GT(outWidth, 0); 509 NN_RET_CHECK_GT(outHeight, 0); 510 511 Shape output = context->getOutputShape(kOutputTensor); 512 output.type = input.type; 513 if (param.useNchw) { 514 output.dimensions = {batches, channels_out, outHeight, outWidth}; 515 } else { 516 output.dimensions = {batches, outHeight, outWidth, channels_out}; 517 } 518 return context->setOutputShape(kOutputTensor, output); 519 } 520 521 bool execute(IOperationExecutionContext* context) { 522 // Bypass execution in the case of zero-sized input. 523 if (getNumberOfElements(context->getOutputShape(kOutputTensor)) == 0) return true; 524 TransposeConv2dParam param; 525 NN_RET_CHECK(param.initialize(context)); 526 switch (context->getInputType(kInputTensor)) { 527 case OperandType::TENSOR_FLOAT32: 528 return transposeConv(context->getInputBuffer<float>(kInputTensor), 529 context->getInputShape(kInputTensor), 530 context->getInputBuffer<float>(kFilterTensor), 531 context->getInputShape(kFilterTensor), 532 context->getInputBuffer<float>(kBiasTensor), 533 context->getInputShape(kBiasTensor), param, 534 context->getOutputBuffer<float>(kOutputTensor), 535 context->getOutputShape(kOutputTensor)); 536 case OperandType::TENSOR_FLOAT16: 537 return transposeConv(context->getInputBuffer<_Float16>(kInputTensor), 538 context->getInputShape(kInputTensor), 539 context->getInputBuffer<_Float16>(kFilterTensor), 540 context->getInputShape(kFilterTensor), 541 context->getInputBuffer<_Float16>(kBiasTensor), 542 context->getInputShape(kBiasTensor), param, 543 context->getOutputBuffer<_Float16>(kOutputTensor), 544 context->getOutputShape(kOutputTensor)); 545 case OperandType::TENSOR_QUANT8_ASYMM: 546 if (context->getInputType(kFilterTensor) == 547 OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL) { 548 return transposeConvQuant8PerChannel( 549 context->getInputBuffer<uint8_t>(kInputTensor), 550 context->getInputShape(kInputTensor), 551 context->getInputBuffer<int8_t>(kFilterTensor), 552 context->getInputShape(kFilterTensor), 553 context->getInputExtraParams(kFilterTensor).channelQuant().scales.data(), 554 context->getInputBuffer<int32_t>(kBiasTensor), 555 context->getInputShape(kBiasTensor), param, 556 context->getOutputBuffer<uint8_t>(kOutputTensor), 557 context->getOutputShape(kOutputTensor)); 558 } else if (context->getInputType(kFilterTensor) == OperandType::TENSOR_QUANT8_ASYMM) { 559 return transposeConv(context->getInputBuffer<uint8_t>(kInputTensor), 560 context->getInputShape(kInputTensor), 561 context->getInputBuffer<uint8_t>(kFilterTensor), 562 context->getInputShape(kFilterTensor), 563 context->getInputBuffer<int32_t>(kBiasTensor), 564 context->getInputShape(kBiasTensor), param, 565 context->getOutputBuffer<uint8_t>(kOutputTensor), 566 context->getOutputShape(kOutputTensor)); 567 } else { 568 NN_RET_CHECK_FAIL() << "Unsupported filter type for operation " << kOperationName; 569 } 570 default: 571 NN_RET_CHECK_FAIL() << "Unsupported tensor type for operation " << kOperationName; 572 } 573 } 574 575 } // namespace transpose_conv_2d 576 577 NN_REGISTER_OPERATION(TRANSPOSE_CONV_2D, transpose_conv_2d::kOperationName, 578 transpose_conv_2d::validate, transpose_conv_2d::prepare, 579 transpose_conv_2d::execute, .allowZeroSizedInput = true); 580 581 } // namespace nn 582 } // namespace android 583