Home | History | Annotate | Download | only in 1.0
      1 /*
      2  * Copyright (C) 2017 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "android.hardware.neuralnetworks (at) 1.0-impl-hvx"
     18 
     19 #include "HexagonUtils.h"
     20 #include <hidlmemory/mapping.h>
     21 #include <algorithm>
     22 #include <numeric>
     23 #include <vector>
     24 #include "OperationsUtils.h"
     25 
     26 namespace android {
     27 namespace hardware {
     28 namespace neuralnetworks {
     29 namespace V1_0 {
     30 namespace implementation {
     31 namespace hexagon {
     32 
     33 bool isHexagonAvailable() {
     34     int version = -1;
     35     Controller::getInstance().version(&version);
     36     if (version != 92) {
     37         LOG(INFO) << "ATTEMPTING TO RESTART NNLIB";
     38         Controller::getInstance().resetNnlib();
     39         Controller::getInstance().version(&version);
     40     }
     41     return version == 92;
     42 }
     43 
     44 hexagon_nn_padding_type getPadding(uint32_t pad) {
     45     switch (pad) {
     46         case ::android::nn::kPaddingSame:
     47             return NN_PAD_SAME;
     48         case ::android::nn::kPaddingValid:
     49             return NN_PAD_VALID;
     50         case ::android::nn::kPaddingUnknown:
     51         default:
     52             return NN_PAD_NA;
     53     };
     54 }
     55 
     56 hexagon_nn_padding_type getPadding(int32_t inWidth, int32_t inHeight, int32_t strideWidth,
     57                                    int32_t strideHeight, int32_t filterWidth, int32_t filterHeight,
     58                                    int32_t paddingLeft, int32_t paddingRight, int32_t paddingTop,
     59                                    int32_t paddingBottom) {
     60     return getPadding(::android::nn::getPaddingScheme(inWidth, inHeight, strideWidth, strideHeight,
     61                                                       filterWidth, filterHeight, paddingLeft,
     62                                                       paddingRight, paddingTop, paddingBottom));
     63 }
     64 
     65 op_type getFloatActivationFunction(FusedActivationFunc act) {
     66     switch (act) {
     67         case FusedActivationFunc::RELU:
     68             return OP_Relu_f;
     69         case FusedActivationFunc::RELU1:
     70             return OP_Clamp_f;
     71         case FusedActivationFunc::RELU6:
     72             return OP_ReluX_f;
     73         case FusedActivationFunc::NONE:
     74             FALLTHROUGH_INTENDED;
     75         default:
     76             return OP_Nop;
     77     };
     78 }
     79 
     80 op_type getQuantizedActivationFunction(FusedActivationFunc act) {
     81     switch (act) {
     82         case FusedActivationFunc::RELU:
     83             return OP_QuantizedRelu_8;
     84         case FusedActivationFunc::RELU1:
     85             return OP_QuantizedClamp_8;
     86         case FusedActivationFunc::RELU6:
     87             return OP_QuantizedReluX_8;
     88         case FusedActivationFunc::NONE:
     89             FALLTHROUGH_INTENDED;
     90         default:
     91             return OP_Nop;
     92     };
     93 }
     94 
     95 uint32_t getSize(OperandType type) {
     96     static const uint32_t sizes[] = {
     97         4,  // FLOAT32
     98         4,  // INT32
     99         4,  // UINT32
    100         4,  // TENSOR_FLOAT32
    101         4,  // TENSOR_INT32
    102         1,  // TENSOR_SYMMETRICAL_QUANT8
    103     };
    104     HEXAGON_SOFT_ASSERT(static_cast<uint32_t>(type) < sizeof(sizes) / sizeof(*sizes),
    105                         "Error: type exceeds max enum value");
    106     return sizes[static_cast<uint32_t>(type)];
    107 }
    108 
    109 std::vector<uint32_t> getAlignedDimensions(const std::vector<uint32_t>& dims, uint32_t N) {
    110     HEXAGON_SOFT_ASSERT_GE(
    111         N, dims.size(),
    112         "Error: constant data dimensions " << dims.size() << " exceeds alignment of " << N);
    113     std::vector<uint32_t> dimensions(N - dims.size(), 1);
    114     dimensions.insert(dimensions.end(), dims.begin(), dims.end());
    115     return dimensions;
    116 }
    117 
    118 std::vector<RunTimePoolInfo> mapPools(const hidl_vec<hidl_memory>& pools) {
    119     std::vector<RunTimePoolInfo> poolInfos;
    120     poolInfos.reserve(pools.size());
    121     bool fail = false;
    122     for (const auto& pool : pools) {
    123         poolInfos.emplace_back(pool, &fail);
    124     }
    125     HEXAGON_SOFT_ASSERT(!fail, "Error setting pools");
    126     return poolInfos;
    127 }
    128 
    129 std::unordered_set<uint32_t> getPoolIndexes(const std::vector<RequestArgument>& inputsOutputs) {
    130     std::unordered_set<uint32_t> indexes;
    131     for (const RequestArgument& inputOutput : inputsOutputs) {
    132         indexes.insert(inputOutput.location.poolIndex);
    133     }
    134     return indexes;
    135 }
    136 
    137 namespace {
    138 const uint8_t* getDataFromBlock(const hidl_vec<uint8_t>& block, uint32_t offset, uint32_t length) {
    139     HEXAGON_SOFT_ASSERT_LE(offset + length, block.size(),
    140                            "Error: trying to copy data from outside of block bounds");
    141     return block.data() + offset;
    142 }
    143 
    144 const uint8_t* getDataFromPool(const RunTimePoolInfo& pool, uint32_t offset,
    145                                [[maybe_unused]] uint32_t length) {
    146     // HEXAGON_SOFT_ASSERT_LE(offset + length, pool->getSize(),
    147     //                       "Error: trying to copy data from outside of pool bounds");
    148     return pool.getBuffer() + offset;
    149 }
    150 }  // anonymous namespace
    151 
    152 const uint8_t* getData(const Operand& operand, const hidl_vec<uint8_t>& block,
    153                        const std::vector<RunTimePoolInfo>& pools) {
    154     switch (operand.lifetime) {
    155         case OperandLifeTime::TEMPORARY_VARIABLE:
    156             return nullptr;
    157         case OperandLifeTime::MODEL_INPUT:
    158         case OperandLifeTime::MODEL_OUTPUT:
    159             HEXAGON_SOFT_ASSERT(false,
    160                                 "Error: trying to retrieve data that is only known at runtime");
    161         case OperandLifeTime::CONSTANT_COPY:
    162             return getDataFromBlock(block, operand.location.offset, operand.location.length);
    163         case OperandLifeTime::CONSTANT_REFERENCE:
    164             return getDataFromPool(pools[operand.location.poolIndex], operand.location.offset,
    165                                    operand.location.length);
    166         default:
    167             HEXAGON_SOFT_ASSERT(false, "Error: unrecognized operand lifetime");
    168     }
    169 }
    170 
    171 bool operator==(const hexagon_nn_input& lhs, const hexagon_nn_input& rhs) {
    172     return lhs.src_id == rhs.src_id && lhs.output_idx == rhs.output_idx;
    173 }
    174 
    175 bool operator!=(const hexagon_nn_input& lhs, const hexagon_nn_input& rhs) {
    176     return !(lhs == rhs);
    177 }
    178 
    179 bool operator==(const hexagon_nn_output& lhs, const hexagon_nn_output& rhs) {
    180     return lhs.rank == rhs.rank && lhs.max_sizes[0] == rhs.max_sizes[0] &&
    181            lhs.max_sizes[1] == rhs.max_sizes[1] && lhs.max_sizes[2] == rhs.max_sizes[2] &&
    182            lhs.max_sizes[3] == rhs.max_sizes[3] && lhs.max_sizes[4] == rhs.max_sizes[4] &&
    183            lhs.max_sizes[5] == rhs.max_sizes[5] && lhs.max_sizes[6] == rhs.max_sizes[6] &&
    184            lhs.max_sizes[7] == rhs.max_sizes[7] && lhs.elementsize == rhs.elementsize &&
    185            lhs.zero_offset == rhs.zero_offset && lhs.stepsize == rhs.stepsize;
    186 }
    187 
    188 bool operator!=(const hexagon_nn_output& lhs, const hexagon_nn_output& rhs) {
    189     return !(lhs == rhs);
    190 }
    191 
    192 hexagon_nn_output make_hexagon_nn_output(const std::vector<uint32_t>& dims, uint32_t size) {
    193     std::vector<uint32_t> alignedDims = getAlignedDimensions(dims, 4);
    194     hexagon_nn_output output = {
    195         .rank = std::min(8u, static_cast<uint32_t>(alignedDims.size())),
    196         .max_sizes = {0, 0, 0, 0, 0, 0, 0, 0},
    197         .elementsize = size,
    198         .zero_offset = 0,
    199         .stepsize = 0.0f,
    200     };
    201     for (size_t i = 0; i < alignedDims.size() && i < 8; ++i) {
    202         output.max_sizes[i] = alignedDims[i];
    203     }
    204     return output;
    205 }
    206 
    207 // printers
    208 std::string toString(uint32_t val) {
    209     return std::to_string(val);
    210 }
    211 
    212 std::string toString(float val) {
    213     return std::to_string(val);
    214 }
    215 
    216 std::string toString(hexagon_nn_nn_id id) {
    217     return std::to_string(static_cast<int32_t>(id));
    218 }
    219 
    220 std::string toString(op_type op) {
    221     static const char* opText[] = {
    222 #define DEF_OP(NAME, ...) "OP_" #NAME,
    223 #include "hexagon_nn_controller/ops.def"
    224 #undef DEF_OP
    225     };
    226     return static_cast<size_t>(op) < sizeof(opText) / sizeof(char*)
    227                ? opText[static_cast<size_t>(op)]
    228                : "<invalid op_type>";
    229 }
    230 
    231 std::string toString(hexagon_nn_padding_type padding) {
    232     static const char* paddingText[] = {
    233         "NN_PAD_NA",
    234         "NN_PAD_SAME",
    235         "NN_PAD_VALID",
    236         "NN_PAD_MIRROR_REFLECT",
    237         "NN_PAD_MIRROR_SYMMETRIC",
    238         "NN_PAD_SAME_CAFFE",
    239     };
    240     return static_cast<size_t>(padding) < sizeof(paddingText) / sizeof(char*)
    241                ? paddingText[static_cast<size_t>(padding)]
    242                : "<invalid hexagon_nn_padding_type>";
    243 }
    244 
    245 std::string toString(const hexagon_nn_input& input) {
    246     return "hexagon_nn_input{.src_id: " + std::to_string(input.src_id) +
    247            ", .output_idx: " + std::to_string(input.output_idx) + "}";
    248 }
    249 
    250 std::string toString(const hexagon_nn_output& output) {
    251     return "hexagon_nn_output{.rank: " + std::to_string(output.rank) + ", .max_sizes: [" +
    252            std::to_string(output.max_sizes[0]) + ", " + std::to_string(output.max_sizes[1]) + ", " +
    253            std::to_string(output.max_sizes[2]) + ", " + std::to_string(output.max_sizes[3]) + ", " +
    254            std::to_string(output.max_sizes[4]) + ", " + std::to_string(output.max_sizes[5]) + ", " +
    255            std::to_string(output.max_sizes[6]) + ", " + std::to_string(output.max_sizes[7]) + "]" +
    256            ", .elementsize: " + std::to_string(output.elementsize) +
    257            ", .zero_offset: " + std::to_string(output.zero_offset) +
    258            ", .stepsize: " + std::to_string(output.stepsize) + "}";
    259 }
    260 
    261 std::string toString(const hexagon_nn_tensordef& tensordef) {
    262     return "hexagon_nn_tensordef{.batches: " + std::to_string(tensordef.batches) +
    263            ", .height: " + std::to_string(tensordef.height) +
    264            ", .width: " + std::to_string(tensordef.width) +
    265            ", .depth: " + std::to_string(tensordef.depth) +
    266            ", .data: " + std::to_string(reinterpret_cast<uintptr_t>(tensordef.data)) +
    267            ", .dataLen: " + std::to_string(tensordef.dataLen) +
    268            ", .data_valid_len: " + std::to_string(tensordef.data_valid_len) +
    269            ", .unused: " + std::to_string(tensordef.unused) + "}";
    270 }
    271 
    272 std::string toString(const hexagon_nn_perfinfo& perfinfo) {
    273     return "hexagon_nn_perfinfo{.node_id: " + std::to_string(perfinfo.node_id) +
    274            ", .executions: " + std::to_string(perfinfo.executions) +
    275            ", .counter_lo: " + std::to_string(perfinfo.counter_lo) +
    276            ", .counter_hi: " + std::to_string(perfinfo.counter_hi) + "}";
    277 }
    278 
    279 std::string toString(const ::android::nn::Shape& shape) {
    280     return "Shape{.type: " + toString(shape.type) +
    281            ", .dimensions: " + toString(shape.dimensions.data(), shape.dimensions.size()) +
    282            ", .scale: " + std::to_string(shape.scale) +
    283            ", .zeroPoint: " + std::to_string(shape.offset) + "}";
    284 }
    285 
    286 }  // namespace hexagon
    287 }  // namespace implementation
    288 }  // namespace V1_0
    289 }  // namespace neuralnetworks
    290 }  // namespace hardware
    291 }  // namespace android
    292