1 /* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #define LOG_TAG "android.hardware.neuralnetworks (at) 1.0-impl-hvx" 18 19 #include "HexagonModel.h" 20 #include "HexagonOperations.h" 21 #include "OperationsUtils.h" 22 23 namespace android { 24 namespace hardware { 25 namespace neuralnetworks { 26 namespace V1_0 { 27 namespace implementation { 28 namespace hexagon { 29 30 using android::nn::Shape; 31 32 namespace { 33 34 bool addMul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 35 HexagonModel* model, OperationType op) { 36 HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for " << toString(op)); 37 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << toString(op)); 38 39 // get output size 40 const Shape in1Shape = model->getShape(ins[0]); 41 const Shape in2Shape = model->getShape(ins[1]); 42 Shape outShape = model->getShape(outs[0]); 43 HEXAGON_SOFT_ASSERT(addMulPrepare(in1Shape, in2Shape, &outShape), "Error getting shape"); 44 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 45 46 return true; 47 } 48 49 bool add(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) { 50 return addMul(ins, outs, model, OperationType::ADD); 51 } 52 53 bool mul(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model) { 54 return addMul(ins, outs, model, OperationType::MUL); 55 } 56 57 bool pool(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, HexagonModel* model, 58 OperationType op) { 59 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7, 60 "Need 7 or 10 inputs for " << toString(op)); 61 62 // get parameters 63 const Shape inShape = model->getShape(ins[0]); 64 65 // setup parameters 66 int32_t padding_left; 67 int32_t padding_right; 68 int32_t padding_top; 69 int32_t padding_bottom; 70 int32_t stride_width; 71 int32_t stride_height; 72 int32_t filter_width; 73 int32_t filter_height; 74 75 // get parameters 76 if (ins.size() == 10) { 77 padding_left = model->getScalar<int32_t>(ins[1]); 78 padding_right = model->getScalar<int32_t>(ins[2]); 79 padding_top = model->getScalar<int32_t>(ins[3]); 80 padding_bottom = model->getScalar<int32_t>(ins[4]); 81 stride_width = model->getScalar<int32_t>(ins[5]); 82 stride_height = model->getScalar<int32_t>(ins[6]); 83 filter_width = model->getScalar<int32_t>(ins[7]); 84 filter_height = model->getScalar<int32_t>(ins[8]); 85 86 HEXAGON_SOFT_ASSERT_NE(getPadding(inShape.dimensions[2], inShape.dimensions[1], 87 stride_width, stride_height, filter_width, filter_height, 88 padding_left, padding_right, padding_top, padding_bottom), 89 NN_PAD_NA, "Unknown padding"); 90 } else { 91 const int32_t padding_implicit = model->getScalar<int32_t>(ins[1]); 92 stride_width = model->getScalar<int32_t>(ins[2]); 93 stride_height = model->getScalar<int32_t>(ins[3]); 94 filter_width = model->getScalar<int32_t>(ins[4]); 95 filter_height = model->getScalar<int32_t>(ins[5]); 96 97 nn::calculateExplicitPadding(inShape.dimensions[2], stride_width, filter_width, 98 padding_implicit, &padding_left, &padding_right); 99 nn::calculateExplicitPadding(inShape.dimensions[1], stride_height, filter_height, 100 padding_implicit, &padding_top, &padding_bottom); 101 } 102 103 // get output size 104 Shape outShape = model->getShape(outs[0]); 105 HEXAGON_SOFT_ASSERT( 106 genericPoolingPrepare(inShape, padding_left, padding_right, padding_top, padding_bottom, 107 stride_width, stride_height, filter_width, filter_height, &outShape), 108 "Error getting shape"); 109 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 110 111 return true; 112 } 113 114 bool average_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 115 HexagonModel* model) { 116 return pool(ins, outs, model, OperationType::AVERAGE_POOL_2D); 117 } 118 119 bool l2_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 120 HexagonModel* model) { 121 return pool(ins, outs, model, OperationType::L2_POOL_2D); 122 } 123 124 bool max_pool_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 125 HexagonModel* model) { 126 return pool(ins, outs, model, OperationType::MAX_POOL_2D); 127 } 128 129 bool concatenation(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 130 HexagonModel* model) { 131 std::string name = toString(OperationType::CONCATENATION); 132 HEXAGON_SOFT_ASSERT_LE(3, ins.size(), "Need at least 3 inputs for " << name); 133 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name); 134 135 const size_t numInputTensors = ins.size() - 1; 136 137 const int32_t axis = model->getScalar<int32_t>(ins[numInputTensors]); 138 139 // get output size 140 std::vector<Shape> inShapes(numInputTensors); 141 for (size_t i = 0; i < numInputTensors; ++i) { 142 inShapes[i] = model->getShape(ins[i]); 143 } 144 Shape outShape = model->getShape(outs[0]); 145 HEXAGON_SOFT_ASSERT(concatenationPrepare(inShapes, axis, &outShape), "Error getting shape"); 146 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 147 148 return true; 149 } 150 151 bool conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 152 HexagonModel* model) { 153 std::string name = toString(OperationType::CONV_2D); 154 HEXAGON_SOFT_ASSERT(ins.size() == 10 || ins.size() == 7, "Need 7 or 10 inputs for " << name); 155 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name); 156 157 // setup shapes 158 const Shape inputShape = model->getShape(ins[0]); 159 const Shape filterShape = model->getShape(ins[1]); 160 const Shape biasShape = model->getShape(ins[2]); 161 162 // setup parameters 163 int32_t padding_left; 164 int32_t padding_right; 165 int32_t padding_top; 166 int32_t padding_bottom; 167 int32_t stride_width; 168 int32_t stride_height; 169 170 // get parameters 171 if (ins.size() == 10) { 172 padding_left = model->getScalar<int32_t>(ins[3]); 173 padding_right = model->getScalar<int32_t>(ins[4]); 174 padding_top = model->getScalar<int32_t>(ins[5]); 175 padding_bottom = model->getScalar<int32_t>(ins[6]); 176 stride_width = model->getScalar<int32_t>(ins[7]); 177 stride_height = model->getScalar<int32_t>(ins[8]); 178 179 HEXAGON_SOFT_ASSERT_NE( 180 getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width, 181 stride_height, filterShape.dimensions[2], filterShape.dimensions[1], 182 padding_left, padding_right, padding_top, padding_bottom), 183 NN_PAD_NA, "Unknown padding"); 184 } else { 185 const int32_t padding_implicit = model->getScalar<int32_t>(ins[3]); 186 stride_width = model->getScalar<int32_t>(ins[4]); 187 stride_height = model->getScalar<int32_t>(ins[5]); 188 189 nn::calculateExplicitPadding(inputShape.dimensions[2], stride_width, 190 filterShape.dimensions[2], padding_implicit, &padding_left, 191 &padding_right); 192 nn::calculateExplicitPadding(inputShape.dimensions[1], stride_height, 193 filterShape.dimensions[1], padding_implicit, &padding_top, 194 &padding_bottom); 195 } 196 197 // get output size 198 Shape outShape = model->getShape(outs[0]); 199 HEXAGON_SOFT_ASSERT( 200 convPrepare(inputShape, filterShape, biasShape, padding_left, padding_right, padding_top, 201 padding_bottom, stride_width, stride_height, &outShape), 202 "Error getting shape"); 203 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 204 205 // enforce filter is a constant 206 HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]), name << "requires filter to be constant data"); 207 208 return true; 209 } 210 211 bool depthwise_conv_2d(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 212 HexagonModel* model) { 213 std::string name = toString(OperationType::DEPTHWISE_CONV_2D); 214 HEXAGON_SOFT_ASSERT(ins.size() == 8 || ins.size() == 11, "Need 8 or 11 inputs for " << name); 215 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name); 216 217 // setup shapes 218 const Shape inputShape = model->getShape(ins[0]); 219 const Shape filterShape = model->getShape(ins[1]); 220 const Shape biasShape = model->getShape(ins[2]); 221 222 // setup parameters 223 int32_t padding_left; 224 int32_t padding_right; 225 int32_t padding_top; 226 int32_t padding_bottom; 227 int32_t stride_width; 228 int32_t stride_height; 229 230 // get parameters 231 if (ins.size() == 11) { 232 padding_left = model->getScalar<int32_t>(ins[3]); 233 padding_right = model->getScalar<int32_t>(ins[4]); 234 padding_top = model->getScalar<int32_t>(ins[5]); 235 padding_bottom = model->getScalar<int32_t>(ins[6]); 236 stride_width = model->getScalar<int32_t>(ins[7]); 237 stride_height = model->getScalar<int32_t>(ins[8]); 238 239 HEXAGON_SOFT_ASSERT_NE( 240 getPadding(inputShape.dimensions[2], inputShape.dimensions[1], stride_width, 241 stride_height, filterShape.dimensions[2], filterShape.dimensions[1], 242 padding_left, padding_right, padding_top, padding_bottom), 243 NN_PAD_NA, "Unknown padding"); 244 245 } else { 246 const int32_t padding_implicit = model->getScalar<int32_t>(ins[3]); 247 stride_width = model->getScalar<int32_t>(ins[4]); 248 stride_height = model->getScalar<int32_t>(ins[5]); 249 250 nn::calculateExplicitPadding(inputShape.dimensions[2], stride_width, 251 filterShape.dimensions[2], padding_implicit, &padding_left, 252 &padding_right); 253 nn::calculateExplicitPadding(inputShape.dimensions[1], stride_height, 254 filterShape.dimensions[1], padding_implicit, &padding_top, 255 &padding_bottom); 256 } 257 258 // get output size 259 Shape outShape = model->getShape(outs[0]); 260 HEXAGON_SOFT_ASSERT( 261 depthwiseConvPrepare(inputShape, filterShape, biasShape, padding_left, padding_right, 262 padding_top, padding_bottom, stride_width, stride_height, &outShape), 263 "Error getting shape"); 264 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 265 266 // enforce filter is a constant 267 HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]), name << " requires filter to be constant data"); 268 269 return true; 270 } 271 272 bool dequantize(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 273 HexagonModel* model) { 274 std::string name = toString(OperationType::DEQUANTIZE); 275 HEXAGON_SOFT_ASSERT_EQ(1, ins.size(), "Need 1 input for " << name); 276 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name); 277 278 // get output size 279 const Shape inputShape = model->getShape(ins[0]); 280 Shape outShape = model->getShape(outs[0]); 281 282 HEXAGON_SOFT_ASSERT(dequantizePrepare(inputShape, &outShape), "Error getting shape"); 283 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 284 285 return true; 286 } 287 288 bool fully_connected(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 289 HexagonModel* model) { 290 std::string name = toString(OperationType::FULLY_CONNECTED); 291 HEXAGON_SOFT_ASSERT_EQ(4, ins.size(), "Need 4 inputs for " << name); 292 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name); 293 294 // get output size 295 const Shape inputShape = model->getShape(ins[0]); 296 const Shape weightsShape = model->getShape(ins[1]); 297 const Shape biasShape = model->getShape(ins[2]); 298 Shape outShape = model->getShape(outs[0]); 299 HEXAGON_SOFT_ASSERT(fullyConnectedPrepare(inputShape, weightsShape, biasShape, &outShape), 300 "Error getting shape"); 301 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 302 303 // enforce weight is a constant 304 HEXAGON_SOFT_ASSERT(model->isConstant(ins[1]), name << "requires weight to be constant data"); 305 306 return true; 307 } 308 309 bool local_response_normalization(const std::vector<uint32_t>& ins, 310 const std::vector<uint32_t>& outs, HexagonModel* model) { 311 std::string name = toString(OperationType::LOCAL_RESPONSE_NORMALIZATION); 312 HEXAGON_SOFT_ASSERT_EQ(5, ins.size(), "Need 5 inputs for " << name); 313 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name); 314 315 // get output size 316 const Shape inShape = model->getShape(ins[0]); 317 Shape outShape = model->getShape(outs[0]); 318 HEXAGON_SOFT_ASSERT(genericNormalizationPrepare(inShape, &outShape), "Error getting shape"); 319 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 320 321 return true; 322 } 323 324 bool activation(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 325 HexagonModel* model, uint32_t numInputs, OperationType op) { 326 HEXAGON_SOFT_ASSERT_EQ(numInputs, ins.size(), 327 "Need " << numInputs << " input for " << toString(op)); 328 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << toString(op)); 329 330 // get output size 331 const Shape inShape = model->getShape(ins[0]); 332 Shape outShape = model->getShape(outs[0]); 333 HEXAGON_SOFT_ASSERT(genericActivationPrepare(inShape, &outShape), "Error getting shape"); 334 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 335 336 return true; 337 } 338 339 bool logistic(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 340 HexagonModel* model) { 341 return activation(ins, outs, model, 1, OperationType::LOGISTIC); 342 } 343 344 bool relu(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 345 HexagonModel* model) { 346 return activation(ins, outs, model, 1, OperationType::RELU); 347 } 348 349 bool relu1(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 350 HexagonModel* model) { 351 return activation(ins, outs, model, 1, OperationType::RELU1); 352 } 353 354 bool relu6(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 355 HexagonModel* model) { 356 return activation(ins, outs, model, 1, OperationType::RELU6); 357 } 358 359 bool softmax(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 360 HexagonModel* model) { 361 return activation(ins, outs, model, 2, OperationType::SOFTMAX); 362 } 363 364 bool tanh(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 365 HexagonModel* model) { 366 return activation(ins, outs, model, 1, OperationType::TANH); 367 } 368 369 bool reshape(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 370 HexagonModel* model) { 371 std::string name = toString(OperationType::RESHAPE); 372 HEXAGON_SOFT_ASSERT_EQ(2, ins.size(), "Need 2 inputs for " << name); 373 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name); 374 375 // get output size 376 const Shape inShape = model->getShape(ins[0]); 377 const Shape targetShape = model->getShape(ins[1]); 378 const int32_t* targetShapePtr = model->getPointer(ins[1]); 379 int32_t targetShapeNumElem = ::android::nn::getNumberOfElements(targetShape); 380 Shape outShape = model->getShape(outs[0]); 381 HEXAGON_SOFT_ASSERT(targetShapePtr != nullptr, "pointer value is currently nullptr"); 382 383 HEXAGON_SOFT_ASSERT(reshapePrepare(inShape, targetShapePtr, targetShapeNumElem, &outShape), 384 "Error getting shape"); 385 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 386 387 return true; 388 } 389 390 bool resize_bilinear(const std::vector<uint32_t>& ins, const std::vector<uint32_t>& outs, 391 HexagonModel* model) { 392 std::string name = toString(OperationType::RESIZE_BILINEAR); 393 HEXAGON_SOFT_ASSERT_EQ(3, ins.size(), "Need 3 inputs for " << name); 394 HEXAGON_SOFT_ASSERT_EQ(1, outs.size(), "Need 1 output for " << name); 395 396 // get parameters 397 const int32_t width = model->getScalar<int32_t>(ins[1]); 398 const int32_t height = model->getScalar<int32_t>(ins[2]); 399 400 // get output size 401 const Shape inShape = model->getShape(ins[0]); 402 Shape outShape = model->getShape(outs[0]); 403 HEXAGON_SOFT_ASSERT(resizeBilinearPrepare(inShape, width, height, &outShape), 404 "Error getting shape"); 405 HEXAGON_SOFT_ASSERT(model->setShape(outs[0], outShape), "Error setting shape"); 406 407 return true; 408 } 409 410 } // namespace 411 412 OperationTable& getOperationCheckTable() { 413 static OperationTable table = { 414 // NOTE: the operations that are commented out via inline represent 415 // operations that are valid for the Android O NNAPI release, but are 416 // currently not implemented in HVX. 417 418 // -------------------------- 32-BIT FLOAT ---------------------------- 419 // HVX is only performant when running on quantized values. Further, as 420 // an optimization, the current HVX driver will convert some floating 421 // point tensors into quantized values, perform the operation, and then 422 // convert them back to floating point. This results in a loss in 423 // precision causing some tests to fail. For these reasons, the FLOAT32 424 // operations are being temporarily disabled. 425 /* 426 {{OperationType::ADD, OperandType::TENSOR_FLOAT32}, add}, 427 {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_FLOAT32}, average_pool_2d}, 428 {{OperationType::CONCATENATION, OperandType::TENSOR_FLOAT32}, concatenation}, 429 {{OperationType::CONV_2D, OperandType::TENSOR_FLOAT32}, conv_2d}, 430 {{OperationType::DEPTHWISE_CONV_2D, OperandType::TENSOR_FLOAT32}, depthwise_conv_2d}, 431 //{{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_FLOAT32}, depth_to_space}, 432 //{{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_FLOAT32}, embedding_lookup}, 433 //{{OperationType::FLOOR, OperandType::TENSOR_FLOAT32}, floor}, 434 {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_FLOAT32}, fully_connected}, 435 //{{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_FLOAT32}, hashtable_lookup}, 436 //{{OperationType::L2_NORMALIZATION, OperandType::TENSOR_FLOAT32}, l2_normalization}, 437 {{OperationType::L2_POOL_2D, OperandType::TENSOR_FLOAT32}, l2_pool_2d}, 438 {{OperationType::LOCAL_RESPONSE_NORMALIZATION, OperandType::TENSOR_FLOAT32}, 439 local_response_normalization}, 440 {{OperationType::LOGISTIC, OperandType::TENSOR_FLOAT32}, logistic}, 441 //{{OperationType::LSH_PROJECTION, OperandType::TENSOR_FLOAT32}, lsh_projection}, 442 //{{OperationType::LSTM, OperandType::TENSOR_FLOAT32}, lstm }, 443 {{OperationType::MAX_POOL_2D, OperandType::TENSOR_FLOAT32}, max_pool_2d}, 444 {{OperationType::MUL, OperandType::TENSOR_FLOAT32}, mul}, 445 {{OperationType::RELU, OperandType::TENSOR_FLOAT32}, relu}, 446 {{OperationType::RELU1, OperandType::TENSOR_FLOAT32}, relu1}, 447 {{OperationType::RELU6, OperandType::TENSOR_FLOAT32}, relu6}, 448 {{OperationType::RESHAPE, OperandType::TENSOR_FLOAT32}, reshape}, 449 {{OperationType::RESIZE_BILINEAR, OperandType::TENSOR_FLOAT32}, resize_bilinear}, 450 //{{OperationType::RNN, OperandType::TENSOR_FLOAT32}, rnn}, 451 {{OperationType::SOFTMAX, OperandType::TENSOR_FLOAT32}, softmax}, 452 //{{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_FLOAT32}, space_to_depth}, 453 //{{OperationType::SVDF, OperandType::TENSOR_FLOAT32}, svdf }, 454 {{OperationType::TANH, OperandType::TENSOR_FLOAT32}, tanh}, 455 */ 456 457 // -------------------- QUANTIZED 8-BIT ASYMMETRICAL ------------------ 458 {{OperationType::ADD, OperandType::TENSOR_QUANT8_ASYMM}, add}, 459 {{OperationType::AVERAGE_POOL_2D, OperandType::TENSOR_QUANT8_ASYMM}, average_pool_2d}, 460 {{OperationType::CONCATENATION, OperandType::TENSOR_QUANT8_ASYMM}, concatenation}, 461 {{OperationType::CONV_2D, OperandType::TENSOR_QUANT8_ASYMM}, conv_2d}, 462 {{OperationType::DEPTHWISE_CONV_2D, OperandType::TENSOR_QUANT8_ASYMM}, depthwise_conv_2d}, 463 //{{OperationType::DEPTH_TO_SPACE, OperandType::TENSOR_QUANT8_ASYMM}, depth_to_space}, 464 {{OperationType::DEQUANTIZE, OperandType::TENSOR_QUANT8_ASYMM}, dequantize}, 465 //{{OperationType::EMBEDDING_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM}, embedding_lookup}, 466 {{OperationType::FULLY_CONNECTED, OperandType::TENSOR_QUANT8_ASYMM}, fully_connected}, 467 //{{OperationType::HASHTABLE_LOOKUP, OperandType::TENSOR_QUANT8_ASYMM}, hashtable_lookup}, 468 {{OperationType::LOGISTIC, OperandType::TENSOR_QUANT8_ASYMM}, logistic}, 469 //{{OperationType::LSH_PROJECTION, OperandType::TENSOR_QUANT8_ASYMM}, lsh_projection}, 470 {{OperationType::MAX_POOL_2D, OperandType::TENSOR_QUANT8_ASYMM}, max_pool_2d}, 471 {{OperationType::MUL, OperandType::TENSOR_QUANT8_ASYMM}, mul}, 472 {{OperationType::RELU, OperandType::TENSOR_QUANT8_ASYMM}, relu}, 473 {{OperationType::RELU1, OperandType::TENSOR_QUANT8_ASYMM}, relu1}, 474 {{OperationType::RELU6, OperandType::TENSOR_QUANT8_ASYMM}, relu6}, 475 {{OperationType::RESHAPE, OperandType::TENSOR_QUANT8_ASYMM}, reshape}, 476 {{OperationType::SOFTMAX, OperandType::TENSOR_QUANT8_ASYMM}, softmax}, 477 //{{OperationType::SPACE_TO_DEPTH, OperandType::TENSOR_QUANT8_ASYMM}, space_to_depth}, 478 }; 479 480 // The following functions are normally used by float32, but those 481 // operations have been temporarily disabled. Void explicitly marks them as 482 // unused, and prevents the compiler from throwing an error. 483 (void)l2_pool_2d; 484 (void)local_response_normalization; 485 (void)tanh; 486 (void)resize_bilinear; 487 488 return table; 489 } 490 491 } // namespace hexagon 492 } // namespace implementation 493 } // namespace V1_0 494 } // namespace neuralnetworks 495 } // namespace hardware 496 } // namespace android 497