Home | History | Annotate | Download | only in operation_signatures
      1 /*
      2  * Copyright (C) 2019 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "fuzzing/operation_signatures/OperationSignatureUtils.h"
     18 
     19 namespace android {
     20 namespace nn {
     21 namespace fuzzing_test {
     22 
     23 static void embeddingLookupConstructor(Type, uint32_t rank, RandomOperation* op) {
     24     setFreeDimensions(op->inputs[0], /*rank=*/1);
     25     setFreeDimensions(op->inputs[1], rank);
     26     op->outputs[0]->dimensions.resize(rank);
     27     op->outputs[0]->dimensions[0] = op->inputs[0]->dimensions[0];
     28     for (uint32_t i = 1; i < rank; i++) {
     29         op->outputs[0]->dimensions[i] = op->inputs[1]->dimensions[i];
     30     }
     31     setSameQuantization(op->outputs[0], op->inputs[1]);
     32 }
     33 
     34 static void embeddingLookupFinalizer(RandomOperation* op) {
     35     uint32_t dimValue = op->inputs[1]->dimensions[0].getValue();
     36     uint32_t numElements = op->inputs[0]->getNumberOfElements();
     37     for (uint32_t i = 0; i < numElements; i++) {
     38         // The index values must be in the range of [0, input1_dim0).
     39         op->inputs[0]->value<int32_t>(i) = getUniform<int32_t>(0, dimValue - 1);
     40     }
     41 }
     42 
     43 DEFINE_OPERATION_SIGNATURE(EMBEDDING_LOOKUP_V1_0){
     44         .opType = ANEURALNETWORKS_EMBEDDING_LOOKUP,
     45         .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM},
     46         .supportedRanks = {2, 3, 4},
     47         .version = HalVersion::V1_0,
     48         .inputs = {PARAMETER_NONE(Type::TENSOR_INT32), INPUT_DEFAULT},
     49         .outputs = {OUTPUT_DEFAULT},
     50         .constructor = embeddingLookupConstructor,
     51         .finalizer = embeddingLookupFinalizer};
     52 
     53 static void hashtableLookupConstructor(Type, uint32_t rank, RandomOperation* op) {
     54     op->inputs[0]->dimensions = {RandomVariableType::FREE};
     55     op->inputs[1]->dimensions = {RandomVariableType::FREE};
     56     op->inputs[2]->dimensions.resize(rank);
     57     op->outputs[0]->dimensions.resize(rank);
     58     op->inputs[2]->dimensions[0] = op->inputs[1]->dimensions[0];
     59     op->outputs[0]->dimensions[0] = op->inputs[0]->dimensions[0];
     60     for (uint32_t i = 1; i < rank; i++) {
     61         op->inputs[2]->dimensions[i] = RandomVariableType::FREE;
     62         op->outputs[0]->dimensions[i] = op->inputs[2]->dimensions[i];
     63     }
     64     setSameQuantization(op->outputs[0], op->inputs[2]);
     65     op->outputs[1]->dimensions = {op->inputs[0]->dimensions[0]};
     66 }
     67 
     68 static void hashtableLookupFinalizer(RandomOperation* op) {
     69     // Generate values for keys. The keys tensor must be sorted in ascending order.
     70     uint32_t n = op->inputs[1]->getNumberOfElements();
     71     int32_t val = 0;
     72     for (uint32_t i = 0; i < n; i++) {
     73         op->inputs[1]->value<int32_t>(i) = val;
     74         val += getUniform<int32_t>(1, 2);
     75     }
     76     // Generate values for lookups.
     77     uint32_t k = op->inputs[0]->getNumberOfElements();
     78     for (uint32_t i = 0; i < k; i++) {
     79         op->inputs[0]->value<int32_t>(i) = getUniform<int32_t>(0, val);
     80     }
     81 }
     82 
     83 // The hits tensor in HASHTABLE_LOOKUP.
     84 static const OperandSignature hitsTensor_HASHTABLE_LOOKUP = {
     85         .type = RandomOperandType::OUTPUT, .constructor = [](Type, uint32_t, RandomOperand* op) {
     86             op->dataType = Type::TENSOR_QUANT8_ASYMM;
     87             op->scale = 1.0f;
     88             op->zeroPoint = 0;
     89         }};
     90 
     91 DEFINE_OPERATION_SIGNATURE(HASHTABLE_LOOKUP_V1_0){
     92         .opType = ANEURALNETWORKS_HASHTABLE_LOOKUP,
     93         .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_INT32, Type::TENSOR_QUANT8_ASYMM},
     94         .supportedRanks = {2, 3, 4},
     95         .version = HalVersion::V1_0,
     96         .inputs = {PARAMETER_NONE(Type::TENSOR_INT32), PARAMETER_NONE(Type::TENSOR_INT32),
     97                    INPUT_DEFAULT},
     98         .outputs = {OUTPUT_DEFAULT, hitsTensor_HASHTABLE_LOOKUP},
     99         .constructor = hashtableLookupConstructor,
    100         .finalizer = hashtableLookupFinalizer};
    101 
    102 static void gatherConstructor(Type, uint32_t rank, RandomOperation* op) {
    103     // Generate value for "axis" scalar.
    104     int32_t axis = getUniform<int32_t>(-rank, rank - 1);
    105     op->inputs[1]->setScalarValue<int32_t>(axis);
    106     if (axis < 0) axis += rank;
    107 
    108     // Set dimensions for input and indices tensor.
    109     uint32_t indRank = getUniform<uint32_t>(1, 5);
    110     setFreeDimensions(op->inputs[0], rank);
    111     setFreeDimensions(op->inputs[2], indRank);
    112 
    113     for (uint32_t i = 0; i < static_cast<uint32_t>(axis); i++) {
    114         op->outputs[0]->dimensions.push_back(op->inputs[0]->dimensions[i]);
    115     }
    116     for (uint32_t i = 0; i < indRank; i++) {
    117         op->outputs[0]->dimensions.push_back(op->inputs[2]->dimensions[i]);
    118     }
    119     for (uint32_t i = axis + 1; i < rank; i++) {
    120         op->outputs[0]->dimensions.push_back(op->inputs[0]->dimensions[i]);
    121     }
    122     setSameQuantization(op->outputs[0], op->inputs[0]);
    123 }
    124 
    125 static void gatherFinalizer(RandomOperation* op) {
    126     int32_t axis = op->inputs[1]->value<int32_t>();
    127     if (axis < 0) axis += op->inputs[0]->dimensions.size();
    128     uint32_t dimValue = op->inputs[0]->dimensions[axis].getValue();
    129     uint32_t numElements = op->inputs[2]->getNumberOfElements();
    130     for (uint32_t i = 0; i < numElements; i++) {
    131         // The index values must be in the range of [0, dimValue).
    132         op->inputs[2]->value<int32_t>(i) = getUniform<int32_t>(0, dimValue - 1);
    133     }
    134 }
    135 
    136 DEFINE_OPERATION_SIGNATURE(GATHER_V1_2){
    137         .opType = ANEURALNETWORKS_GATHER,
    138         .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
    139                                Type::TENSOR_QUANT8_ASYMM},
    140         .supportedRanks = {1, 2, 3, 4, 5},
    141         .version = HalVersion::V1_2,
    142         .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::INT32), PARAMETER_NONE(Type::TENSOR_INT32)},
    143         .outputs = {OUTPUT_DEFAULT},
    144         .constructor = gatherConstructor,
    145         .finalizer = gatherFinalizer};
    146 
    147 static void selectConstructor(Type, uint32_t rank, RandomOperation* op) {
    148     setFreeDimensions(op->inputs[0], rank);
    149     op->inputs[1]->dimensions = op->inputs[0]->dimensions;
    150     op->inputs[2]->dimensions = op->inputs[0]->dimensions;
    151     op->outputs[0]->dimensions = op->inputs[0]->dimensions;
    152     setSameQuantization(op->inputs[2], op->inputs[1]);
    153     setSameQuantization(op->outputs[0], op->inputs[1]);
    154 }
    155 
    156 DEFINE_OPERATION_SIGNATURE(SELECT_V1_2){
    157         .opType = ANEURALNETWORKS_SELECT,
    158         .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
    159                                Type::TENSOR_QUANT8_ASYMM},
    160         .supportedRanks = {1, 2, 3, 4},
    161         .version = HalVersion::V1_2,
    162         .inputs = {INPUT_TYPED(Type::TENSOR_BOOL8), INPUT_DEFAULT, INPUT_DEFAULT},
    163         .outputs = {OUTPUT_DEFAULT},
    164         .constructor = selectConstructor};
    165 
    166 static void topKConstructor(Type, uint32_t rank, RandomOperation* op) {
    167     setFreeDimensions(op->inputs[0], rank);
    168     op->outputs[0]->dimensions.resize(rank);
    169     op->outputs[1]->dimensions.resize(rank);
    170     for (uint32_t i = 0; i < rank - 1; i++) {
    171         op->outputs[0]->dimensions[i] = op->inputs[0]->dimensions[i];
    172         op->outputs[1]->dimensions[i] = op->inputs[0]->dimensions[i];
    173     }
    174 
    175     // K must be in the range of [1, depth].
    176     auto k = op->inputs[1]->value<RandomVariable>();
    177     k.setRange(1, kInvalidValue);
    178     op->inputs[0]->dimensions.back().setGreaterEqual(k);
    179 
    180     op->outputs[0]->dimensions.back() = k;
    181     op->outputs[1]->dimensions.back() = k;
    182     setSameQuantization(op->outputs[0], op->inputs[0]);
    183 
    184     // As the sorting is not required to be stable, we should not check the second output (indices).
    185     op->outputs[1]->doNotCheckAccuracy = true;
    186     op->outputs[1]->doNotConnect = true;
    187 }
    188 
    189 DEFINE_OPERATION_SIGNATURE(TOPK_V2_V1_2){
    190         .opType = ANEURALNETWORKS_TOPK_V2,
    191         .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
    192                                Type::TENSOR_QUANT8_ASYMM},
    193         .supportedRanks = {1, 2, 3, 4},
    194         .version = HalVersion::V1_2,
    195         .inputs = {INPUT_DEFAULT, RANDOM_INT_FREE},
    196         .outputs = {OUTPUT_DEFAULT, OUTPUT_TYPED(Type::TENSOR_INT32)},
    197         .constructor = topKConstructor};
    198 
    199 static void sliceConstructor(Type, uint32_t rank, RandomOperation* op) {
    200     op->inputs[1]->dimensions = {rank};
    201     op->inputs[2]->dimensions = {rank};
    202     setFreeDimensions(op->inputs[0], rank);
    203     setFreeDimensions(op->outputs[0], rank);
    204     // The axis size of output must be less than or equal to input.
    205     for (uint32_t i = 0; i < rank; i++) {
    206         op->inputs[0]->dimensions[i].setGreaterEqual(op->outputs[0]->dimensions[i]);
    207     }
    208     setSameQuantization(op->outputs[0], op->inputs[0]);
    209 }
    210 
    211 static void sliceFinalizer(RandomOperation* op) {
    212     uint32_t rank = op->inputs[0]->dimensions.size();
    213     int32_t* begin = reinterpret_cast<int32_t*>(op->inputs[1]->buffer.data());
    214     int32_t* size = reinterpret_cast<int32_t*>(op->inputs[2]->buffer.data());
    215     for (uint32_t i = 0; i < rank; i++) {
    216         int32_t inputSize = op->inputs[0]->dimensions[i].getValue();
    217         int32_t outputSize = op->outputs[0]->dimensions[i].getValue();
    218         // Randomly choose a valid begin index for each axis.
    219         begin[i] = getUniform<int32_t>(0, inputSize - outputSize);
    220         size[i] = outputSize;
    221     }
    222 }
    223 
    224 DEFINE_OPERATION_SIGNATURE(SLICE_V1_2){
    225         .opType = ANEURALNETWORKS_SLICE,
    226         .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_FLOAT16, Type::TENSOR_INT32,
    227                                Type::TENSOR_QUANT8_ASYMM},
    228         .supportedRanks = {1, 2, 3, 4},
    229         .version = HalVersion::V1_2,
    230         .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32),
    231                    PARAMETER_NONE(Type::TENSOR_INT32)},
    232         .outputs = {OUTPUT_DEFAULT},
    233         .constructor = sliceConstructor,
    234         .finalizer = sliceFinalizer};
    235 
    236 inline int32_t convertToBitMask(const std::vector<bool>& flags) {
    237     int32_t mask = 0, bit = 1;
    238     for (bool flag : flags) {
    239         if (flag) mask |= bit;
    240         bit <<= 1;
    241     }
    242     return mask;
    243 }
    244 
    245 static void stridedSliceConstructor(Type, uint32_t rank, RandomOperation* op) {
    246     op->inputs[1]->dimensions = {rank};
    247     op->inputs[2]->dimensions = {rank};
    248     op->inputs[3]->dimensions = {rank};
    249     op->inputs[3]->resizeBuffer<int32_t>(rank);
    250     setFreeDimensions(op->inputs[0], rank);
    251     std::vector<bool> shrinkMask(rank, false);
    252     for (uint32_t i = 0; i < rank; i++) {
    253         // TODO: Currently shrinkMask is always set to false.
    254         shrinkMask[i] = false;
    255         int32_t stride = getUniform<int32_t>(1, 3);
    256         op->inputs[3]->value<int32_t>(i) = stride;
    257         if (!shrinkMask[i]) {
    258             op->outputs[0]->dimensions.push_back(RandomVariableType::FREE);
    259             auto maxOut = (op->inputs[0]->dimensions[i] + (stride - 1)) / stride;
    260             maxOut.setGreaterEqual(op->outputs[0]->dimensions.back());
    261         }
    262     }
    263     setSameQuantization(op->outputs[0], op->inputs[0]);
    264     op->inputs[6]->setScalarValue<int32_t>(convertToBitMask(shrinkMask));
    265 }
    266 
    267 static void stridedSliceFinalizer(RandomOperation* op) {
    268     uint32_t rank = op->inputs[0]->dimensions.size();
    269     int32_t* begin = reinterpret_cast<int32_t*>(op->inputs[1]->buffer.data());
    270     int32_t* end = reinterpret_cast<int32_t*>(op->inputs[2]->buffer.data());
    271     std::vector<bool> beginMask(rank, false), endMask(rank, false);
    272     int32_t shrinkMask = op->inputs[6]->value<int32_t>();
    273     for (uint32_t i = 0, o = 0; i < rank; i++) {
    274         int32_t inputSize = op->inputs[0]->dimensions[i].getValue();
    275         int32_t stride = op->inputs[3]->value<int32_t>(i);
    276         if ((shrinkMask & (1 << i)) == 0) {
    277             int32_t outputSize = op->outputs[0]->dimensions[o++].getValue();
    278             int32_t maxStart = inputSize - (outputSize - 1) * stride - 1;
    279             begin[i] = getUniform<int32_t>(0, maxStart);
    280 
    281             int32_t minEnd = begin[i] + (outputSize - 1) * stride + 1;
    282             int32_t maxEnd = std::min(begin[i] + outputSize * stride, inputSize);
    283             end[i] = getUniform<int32_t>(minEnd, maxEnd);
    284 
    285             // Switch to masked begin/end.
    286             beginMask[i] = (begin[i] == 0 && getBernoulli(0.2f));
    287             endMask[i] = (end[i] == 0 && getBernoulli(0.2f));
    288 
    289             // When begin or end mask is set, begin[i] or end[i] is ignored and can have any
    290             // arbitrary value.
    291             if (beginMask[i]) begin[i] = getUniform<int32_t>(-inputSize, inputSize - 1);
    292             if (endMask[i]) end[i] = getUniform<int32_t>(-inputSize, inputSize - 1);
    293         } else {
    294             // When shrink mask is set, the begin and end must define a slice of size 1, e.g.
    295             // begin[i] = x, end[i] = x + 1.
    296             begin[i] = getUniform<int32_t>(0, inputSize - 1);
    297             end[i] = begin[i] + 1;
    298         }
    299 
    300         // Switch to negative stride.
    301         if (getBernoulli(0.2f)) {
    302             op->inputs[3]->value<int32_t>(i) = -stride;
    303             std::swap(begin[i], end[i]);
    304             std::swap(beginMask[i], endMask[i]);
    305             begin[i]--;
    306             end[i]--;
    307             // end = -1 will be intepreted to inputSize - 1 if not setting endMask.
    308             if (end[i] < 0) endMask[i] = true;
    309         }
    310     }
    311     op->inputs[4]->setScalarValue<int32_t>(convertToBitMask(beginMask));
    312     op->inputs[5]->setScalarValue<int32_t>(convertToBitMask(endMask));
    313 }
    314 
    315 DEFINE_OPERATION_SIGNATURE(STRIDED_SLICE_V1_1){
    316         .opType = ANEURALNETWORKS_STRIDED_SLICE,
    317         .supportedDataTypes = {Type::TENSOR_FLOAT32, Type::TENSOR_QUANT8_ASYMM},
    318         .supportedRanks = {1, 2, 3, 4},
    319         .version = HalVersion::V1_1,
    320         .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32),
    321                    PARAMETER_NONE(Type::TENSOR_INT32), PARAMETER_NONE(Type::TENSOR_INT32),
    322                    PARAMETER_CHOICE(Type::INT32, 0), PARAMETER_CHOICE(Type::INT32, 0),
    323                    PARAMETER_CHOICE(Type::INT32, 0)},
    324         .outputs = {OUTPUT_DEFAULT},
    325         .constructor = stridedSliceConstructor,
    326         .finalizer = stridedSliceFinalizer};
    327 
    328 DEFINE_OPERATION_SIGNATURE(STRIDED_SLICE_V1_2){
    329         .opType = ANEURALNETWORKS_STRIDED_SLICE,
    330         .supportedDataTypes = {Type::TENSOR_FLOAT16},
    331         .supportedRanks = {1, 2, 3, 4},
    332         .version = HalVersion::V1_2,
    333         .inputs = {INPUT_DEFAULT, PARAMETER_NONE(Type::TENSOR_INT32),
    334                    PARAMETER_NONE(Type::TENSOR_INT32), PARAMETER_NONE(Type::TENSOR_INT32),
    335                    PARAMETER_NONE(Type::INT32), PARAMETER_NONE(Type::INT32),
    336                    PARAMETER_NONE(Type::INT32)},
    337         .outputs = {OUTPUT_DEFAULT},
    338         .constructor = stridedSliceConstructor,
    339         .finalizer = stridedSliceFinalizer};
    340 
    341 }  // namespace fuzzing_test
    342 }  // namespace nn
    343 }  // namespace android
    344