Home | History | Annotate | Download | only in operation_signatures
      1 /*
      2  * Copyright (C) 2019 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "fuzzing/operation_signatures/OperationSignatureUtils.h"
     18 
     19 namespace android {
     20 namespace nn {
     21 namespace fuzzing_test {
     22 
     23 static void reduceOpConstructor(Type, uint32_t rank, RandomOperation* op) {
     24     setFreeDimensions(op->inputs[0], rank);
     25 
     26     // A boolean array indicating whether each dimension is selected to be reduced.
     27     bool reduce[4] = {false, false, false, false};
     28 
     29     // Generate values for the "axis" tensor.
     30     uint32_t numAxis = getUniform<int32_t>(1, 10);
     31     op->inputs[1]->dimensions = {numAxis};
     32     op->inputs[1]->resizeBuffer<int32_t>(numAxis);
     33     for (uint32_t i = 0; i < numAxis; i++) {
     34         int32_t dim = getUniform<int32_t>(-rank, rank - 1);
     35         op->inputs[1]->value<int32_t>(i) = dim;
     36         reduce[dim < 0 ? dim + rank : dim] = true;
     37     }
     38 
     39     // This scalar may have two types: in MEAN it is INT32, in REDUCE_* it is BOOL
     40     bool keepDims;
     41     if (op->inputs[2]->dataType == Type::BOOL) {
     42         keepDims = op->inputs[2]->value<bool8>();
     43     } else {
     44         keepDims = op->inputs[2]->value<int32_t>() > 0;
     45     }
     46 
     47     for (uint32_t i = 0; i < rank; i++) {
     48         if (!reduce[i]) {
     49             op->outputs[0]->dimensions.emplace_back(op->inputs[0]->dimensions[i]);
     50         } else if (keepDims) {
     51             op->outputs[0]->dimensions.emplace_back(1);
     52         }
     53     }
     54     setSameQuantization(op->outputs[0], op->inputs[0]);
     55 
     56     // REDUCE_PROD may produce Inf output values. We should not connect the output tensor to the
     57     // input of another operation.
     58     if (op->opType == ANEURALNETWORKS_REDUCE_PROD) {
     59         op->outputs[0]->doNotConnect = true;
     60     }
     61 }
     62 
     63 #define DEFINE_MEAN_SIGNATURE(ver, ...)                                   \
     64     DEFINE_OPERATION_SIGNATURE(MEAN_##ver){                               \
     65             .opType = ANEURALNETWORKS_MEAN,                               \
     66             .supportedDataTypes = {__VA_ARGS__},                          \
     67             .supportedRanks = {1, 2, 3, 4},                               \
     68             .version = HalVersion::ver,                                   \
     69             .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32), \
     70                        PARAMETER_CHOICE(Type::INT32, -100, 100)},         \
     71             .outputs = {OUTPUT_DEFAULT},                                  \
     72             .constructor = reduceOpConstructor};
     73 
     74 DEFINE_MEAN_SIGNATURE(V1_1, Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM);
     75 DEFINE_MEAN_SIGNATURE(V1_2, Type::TENSOR_FLOAT16);
     76 
     77 #define DEFINE_REDUCE_SIGNATURE(op, ver, ...)                             \
     78     DEFINE_OPERATION_SIGNATURE(op##_##ver){                               \
     79             .opType = ANEURALNETWORKS_##op,                               \
     80             .supportedDataTypes = {__VA_ARGS__},                          \
     81             .supportedRanks = {1, 2, 3, 4},                               \
     82             .version = HalVersion::ver,                                   \
     83             .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32), \
     84                        PARAMETER_CHOICE(Type::BOOL, true, false)},        \
     85             .outputs = {OUTPUT_DEFAULT},                                  \
     86             .constructor = reduceOpConstructor};
     87 
     88 DEFINE_REDUCE_SIGNATURE(REDUCE_ALL, V1_2, Type::TENSOR_BOOL8);
     89 DEFINE_REDUCE_SIGNATURE(REDUCE_ANY, V1_2, Type::TENSOR_BOOL8);
     90 DEFINE_REDUCE_SIGNATURE(REDUCE_PROD, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
     91 DEFINE_REDUCE_SIGNATURE(REDUCE_SUM, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16);
     92 DEFINE_REDUCE_SIGNATURE(REDUCE_MAX, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
     93                         Type::TENSOR_QUANT8_ASYMM);
     94 DEFINE_REDUCE_SIGNATURE(REDUCE_MIN, V1_2, Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16,
     95                         Type::TENSOR_QUANT8_ASYMM);
     96 
     97 static void singleAxisReduceOpConstructor(Type, uint32_t rank, RandomOperation* op) {
     98     setFreeDimensions(op->inputs[0], rank);
     99     // "axis" must be in the range [-rank, rank).
    100     // Negative "axis" is used to specify axis from the end.
    101     int32_t axis = getUniform<int32_t>(-rank, rank - 1);
    102     op->inputs[1]->setScalarValue<int32_t>(axis);
    103     for (uint32_t i = 0; i < rank; i++) {
    104         if (i != static_cast<uint32_t>(axis) && i != axis + rank) {
    105             op->outputs[0]->dimensions.emplace_back(op->inputs[0]->dimensions[i]);
    106         }
    107     }
    108 }
    109 
    110 #define DEFINE_ARGMIN_MAX_SIGNATURE(op, ver, ...)                                                  \
    111     DEFINE_OPERATION_SIGNATURE(op##_##ver){                                                        \
    112             .opType = ANEURALNETWORKS_##op,                                                        \
    113             .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32, \
    114                                    Type::TENSOR_QUANT8_ASYMM},                                     \
    115             .supportedRanks = {1, 2, 3, 4, 5},                                                     \
    116             .version = HalVersion::ver,                                                            \
    117             .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::INT32)},                                \
    118             .outputs = {OUTPUT_TYPED(Type::TENSOR_INT32)},                                         \
    119             .constructor = singleAxisReduceOpConstructor};
    120 
    121 DEFINE_ARGMIN_MAX_SIGNATURE(ARGMAX, V1_2);
    122 DEFINE_ARGMIN_MAX_SIGNATURE(ARGMIN, V1_2);
    123 
    124 }  // namespace fuzzing_test
    125 }  // namespace nn
    126 }  // namespace android
    127