Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2017 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "ModelBuilder"
     18 
     19 #include "ModelBuilder.h"
     20 
     21 #include "CompilationBuilder.h"
     22 #include "GraphDump.h"
     23 #include "Manager.h"
     24 #include "TypeManager.h"
     25 #include "Utils.h"
     26 #include "ValidateHal.h"
     27 
     28 #include <map>
     29 #include <utility>
     30 
     31 namespace android {
     32 namespace nn {
     33 
     34 // The maximum number of operands and operations that a model may have.
     35 const uint32_t MAX_NUMBER_OF_OPERANDS = 0xFFFFFFFE;
     36 const uint32_t MAX_NUMBER_OF_OPERATIONS = 0xFFFFFFFE;
     37 
     38 bool ModelBuilder::badState(const char* name) {
     39     if (mCompletedModel) {
     40         LOG(ERROR) << "ANeuralNetworksModel_" << name << " can't modify after model finished";
     41         return true;
     42     }
     43     if (mInvalidModel) {
     44         LOG(ERROR) << "ANeuralNetworksModel_" << name << " can't modify an invalid model";
     45         return true;
     46     }
     47     return false;
     48 }
     49 
     50 int ModelBuilder::getExtensionType(const char* extensionName, uint16_t typeWithinExtension,
     51                                    int32_t* type) {
     52     return TypeManager::get()->getExtensionType(extensionName, typeWithinExtension, type)
     53                    ? ANEURALNETWORKS_NO_ERROR
     54                    : ANEURALNETWORKS_BAD_DATA;
     55 }
     56 
     57 int ModelBuilder::addOperand(const ANeuralNetworksOperandType& type) {
     58     if (badState("addOperand")) {
     59         return ANEURALNETWORKS_BAD_STATE;
     60     }
     61 
     62     OperandType operandType = static_cast<OperandType>(type.type);
     63     if (isExtensionOperandType(operandType) && !TypeManager::get()->areExtensionsAllowed()) {
     64         LOG(ERROR) << "Extensions are not supported for this process.";
     65         return ANEURALNETWORKS_BAD_DATA;
     66     }
     67     if (operandType == OperandType::OEM || operandType == OperandType::TENSOR_OEM_BYTE) {
     68         LOG(WARNING) << "OEM data type is deprecated. Use Extensions instead.";
     69     }
     70 
     71     const Extension::OperandTypeInformation* info = nullptr;
     72     if (isExtensionOperandType(operandType) &&
     73         !TypeManager::get()->getExtensionOperandTypeInfo(operandType, &info)) {
     74         LOG(ERROR) << "Extension operand type " << toString(operandType) << " is not registered";
     75         return ANEURALNETWORKS_BAD_DATA;
     76     }
     77     NN_RETURN_IF_ERROR(validateOperandType(type, info, "ANeuralNetworksModel_addOperand", true));
     78     size_t idx = mOperands.size();
     79     if (idx >= MAX_NUMBER_OF_OPERANDS) {
     80         LOG(ERROR) << "ANeuralNetworksModel_addOperand exceed max operands";
     81         return ANEURALNETWORKS_BAD_DATA;
     82     }
     83 
     84     mOperands.push_back({
     85             .type = operandType,
     86             .dimensions =
     87                     hidl_vec<uint32_t>(type.dimensions, type.dimensions + type.dimensionCount),
     88             .numberOfConsumers = 0,
     89             .scale = type.scale,
     90             .zeroPoint = type.zeroPoint,
     91             .lifetime = OperandLifeTime::TEMPORARY_VARIABLE,
     92             .location = {.poolIndex = 0, .offset = 0, .length = 0},
     93             .extraParams = Operand::ExtraParams(),
     94     });
     95     return ANEURALNETWORKS_NO_ERROR;
     96 }
     97 
     98 int ModelBuilder::setOperandValue(uint32_t index, const void* buffer, size_t length) {
     99     VLOG(MODEL) << __func__ << " for operand " << index << " size " << length;
    100     if (badState("setOperandValue")) {
    101         return ANEURALNETWORKS_BAD_STATE;
    102     }
    103 
    104     if (index >= operandCount()) {
    105         LOG(ERROR) << "ANeuralNetworksModel_setOperandValue setting operand " << index << " of "
    106                    << operandCount();
    107         return ANEURALNETWORKS_BAD_DATA;
    108     }
    109     Operand& operand = mOperands[index];
    110     if (buffer == nullptr) {
    111         if (length) {
    112             LOG(ERROR) << "ANeuralNetworksModel_setOperandValue buffer is nullptr but length is "
    113                           "not 0";
    114             return ANEURALNETWORKS_BAD_DATA;
    115         }
    116         operand.lifetime = OperandLifeTime::NO_VALUE;
    117         // The location is unused and is set to zeros.
    118         operand.location = {.poolIndex = 0, .offset = 0, .length = 0};
    119     } else {
    120         if (TypeManager::get()->isTensorType(operand.type) &&
    121             tensorHasUnspecifiedDimensions(operand)) {
    122             LOG(ERROR) << "ANeuralNetworksModel_setOperandValue setting operand " << index
    123                        << " which has operand type that is not fully specified";
    124             return ANEURALNETWORKS_BAD_DATA;
    125         }
    126         if (length > 0xFFFFFFFF) {
    127             LOG(ERROR) << "ANeuralNetworksModel_setOperandValue value length of " << length
    128                        << " exceeds max size";
    129             return ANEURALNETWORKS_BAD_DATA;
    130         }
    131         uint32_t valueLength = static_cast<uint32_t>(length);
    132         if (operand.type != OperandType::OEM) {
    133             uint32_t neededLength = TypeManager::get()->getSizeOfData(operand);
    134             if (neededLength != valueLength) {
    135                 LOG(ERROR) << "ANeuralNetworksModel_setOperandValue setting " << valueLength
    136                            << " bytes when needing " << neededLength;
    137                 return ANEURALNETWORKS_BAD_DATA;
    138             }
    139         }
    140         if (valueLength <= ANEURALNETWORKS_MAX_SIZE_OF_IMMEDIATELY_COPIED_VALUES) {
    141             uint32_t existingSize = static_cast<uint32_t>(mSmallOperandValues.size());
    142             uint32_t extraBytes = alignBytesNeeded(existingSize, valueLength);
    143             mSmallOperandValues.resize(existingSize + extraBytes + valueLength);
    144             operand.lifetime = OperandLifeTime::CONSTANT_COPY;
    145             operand.location = {
    146                     .poolIndex = 0, .offset = existingSize + extraBytes, .length = valueLength};
    147             memcpy(&mSmallOperandValues[operand.location.offset], buffer, valueLength);
    148             VLOG(MODEL) << "Copied small value to offset " << operand.location.offset;
    149         } else {
    150             VLOG(MODEL) << "Saving large value";
    151             operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
    152             // The values for poolIndex and offset will be set when the model is finished.
    153             typedef decltype(operand.location.poolIndex) PoolIndexType;
    154             typedef decltype(operand.location.offset) OffsetType;
    155             operand.location = {.poolIndex = ~PoolIndexType(0),
    156                                 .offset = ~OffsetType(0),
    157                                 .length = valueLength};
    158             // We keep track of the buffers. We'll allocate the shared memory only
    159             // once we know the total size, to avoid needless copies.
    160             mLargeOperandValues.push_back(LargeValue{.operandIndex = index, .buffer = buffer});
    161         }
    162     }
    163     return ANEURALNETWORKS_NO_ERROR;
    164 }
    165 
    166 int ModelBuilder::setOperandSymmPerChannelQuantParams(
    167         uint32_t index, const ANeuralNetworksSymmPerChannelQuantParams& channelQuant) {
    168     if (badState("setOperandSymmPerChannelQuantParams")) {
    169         return ANEURALNETWORKS_BAD_STATE;
    170     }
    171 
    172     if (index >= operandCount()) {
    173         LOG(ERROR) << "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams "
    174                    << "setting per-channel quantization parameters for operand " << index << " of "
    175                    << operandCount();
    176         return ANEURALNETWORKS_BAD_DATA;
    177     }
    178     Operand& operand = mOperands[index];
    179 
    180     if (!validateOperandSymmPerChannelQuantParams(
    181                 operand, channelQuant,
    182                 "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams")) {
    183         return ANEURALNETWORKS_BAD_DATA;
    184     }
    185     switch (operand.type) {
    186         case OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
    187             operand.extraParams.channelQuant({
    188                     .scales = hidl_vec<float>(channelQuant.scales,
    189                                               channelQuant.scales + channelQuant.scaleCount),
    190                     .channelDim = channelQuant.channelDim,
    191             });
    192             break;
    193         default:
    194             LOG(ERROR) << "ANeuralNetworksModel_setOperandSymmPerChannelQuantParams "
    195                        << "invalid operand type " << static_cast<int32_t>(operand.type);
    196             return ANEURALNETWORKS_BAD_DATA;
    197     }
    198     return ANEURALNETWORKS_NO_ERROR;
    199 }
    200 
    201 int ModelBuilder::setOperandExtensionData(uint32_t index, const void* data, size_t length) {
    202     if (badState("setOperandExtensionData")) {
    203         return ANEURALNETWORKS_BAD_STATE;
    204     }
    205 
    206     if (index >= operandCount()) {
    207         LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData "
    208                    << "setting extension data for operand " << index << " of " << operandCount();
    209         return ANEURALNETWORKS_BAD_DATA;
    210     }
    211     Operand& operand = mOperands[index];
    212 
    213     if (data == nullptr && length != 0) {
    214         LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData data is nullptr but length is "
    215                    << length;
    216         return ANEURALNETWORKS_BAD_DATA;
    217     }
    218     if (data != nullptr && length == 0) {
    219         LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData data is not nullptr but length "
    220                    << "is zero";
    221         return ANEURALNETWORKS_BAD_DATA;
    222     }
    223     if (!isExtensionOperandType(operand.type)) {
    224         LOG(ERROR) << "ANeuralNetworksModel_setOperandExtensionData "
    225                    << "setting extension data for a base operand type "
    226                    << static_cast<int32_t>(operand.type);
    227         return ANEURALNETWORKS_BAD_DATA;
    228     }
    229 
    230     if (data == nullptr) {
    231         operand.extraParams.none();
    232     } else {
    233         operand.extraParams.extension(
    234                 hidl_vec<uint8_t>(reinterpret_cast<const uint8_t*>(data),
    235                                   reinterpret_cast<const uint8_t*>(data) + length));
    236     }
    237     return ANEURALNETWORKS_NO_ERROR;
    238 }
    239 
    240 int ModelBuilder::copyLargeValuesToSharedMemory() {
    241     VLOG(MODEL) << __func__ << " has " << mLargeOperandValues.size() << " values.";
    242     if (!mLargeOperandValues.empty()) {
    243         // Calculate the size of the shared memory needed for all the large values.
    244         // Also sets the offset for each value within the memory.
    245         size_t poolSize = 0;
    246         for (LargeValue& l : mLargeOperandValues) {
    247             Operand& operand = mOperands[l.operandIndex];
    248             nnAssert(operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE);
    249             poolSize += alignBytesNeeded(poolSize, operand.location.length);
    250             operand.location.offset = poolSize;
    251             poolSize += operand.location.length;
    252         }
    253 
    254         // Allocated the shared memory.
    255         int n = mLargeValueMemory.create(poolSize);
    256         if (n != ANEURALNETWORKS_NO_ERROR) {
    257             return n;
    258         }
    259         uint8_t* memoryPointer = nullptr;
    260         n = mLargeValueMemory.getPointer(&memoryPointer);
    261         if (n != ANEURALNETWORKS_NO_ERROR) {
    262             return n;
    263         }
    264         uint32_t poolIndex = mMemories.add(&mLargeValueMemory);
    265         VLOG(MODEL) << "Allocated large value pool of size " << poolSize << " at index "
    266                     << poolIndex;
    267 
    268         // Copy the values to this memory.
    269         for (LargeValue& l : mLargeOperandValues) {
    270             Operand& operand = mOperands[l.operandIndex];
    271             operand.location.poolIndex = poolIndex;
    272             memcpy(memoryPointer + operand.location.offset, l.buffer, operand.location.length);
    273         }
    274     }
    275     return ANEURALNETWORKS_NO_ERROR;
    276 }
    277 
    278 int ModelBuilder::setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
    279                                             size_t length) {
    280     VLOG(MODEL) << __func__ << " for operand " << index << " offset " << offset << " size "
    281                 << length;
    282     if (badState("setOperandValueFromMemory")) {
    283         return ANEURALNETWORKS_BAD_STATE;
    284     }
    285 
    286     if (index >= operandCount()) {
    287         LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromMemory setting operand " << index
    288                    << " of " << operandCount();
    289         return ANEURALNETWORKS_BAD_DATA;
    290     }
    291     Operand& operand = mOperands[index];
    292     if (TypeManager::get()->isTensorType(operand.type) && tensorHasUnspecifiedDimensions(operand)) {
    293         LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromMemory setting operand " << index
    294                    << " which has operand type that is not fully specified";
    295         return ANEURALNETWORKS_BAD_DATA;
    296     }
    297     // Only BLOB format AHardwareBuffer can be used for constant data.
    298     if (memory->getHidlMemory().name() == "hardware_buffer") {
    299         LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromMemory passed an AHardwareBuffer"
    300                    << " that is not in AHARDWAREBUFFER_FORMAT_BLOB format";
    301         return ANEURALNETWORKS_UNMAPPABLE;
    302     }
    303     uint32_t neededLength = TypeManager::get()->getSizeOfData(operand);
    304     if (neededLength != length) {
    305         LOG(ERROR) << "ANeuralNetworksModel_setOperandValueFromMemory setting " << length
    306                    << " bytes when needing " << neededLength;
    307         return ANEURALNETWORKS_BAD_DATA;
    308     }
    309     if (!memory->validateSize(offset, length)) {
    310         return ANEURALNETWORKS_BAD_DATA;
    311     }
    312     operand.lifetime = OperandLifeTime::CONSTANT_REFERENCE;
    313     operand.location = {.poolIndex = mMemories.add(memory),
    314                         .offset = offset,
    315                         .length = static_cast<uint32_t>(length)};
    316     return ANEURALNETWORKS_NO_ERROR;
    317 }
    318 
    319 int ModelBuilder::addOperation(ANeuralNetworksOperationType type, uint32_t inputCount,
    320                                const uint32_t* inputs, uint32_t outputCount,
    321                                const uint32_t* outputs) {
    322     if (badState("addOperation")) {
    323         return ANEURALNETWORKS_BAD_STATE;
    324     }
    325 
    326     OperationType operationType = static_cast<OperationType>(type);
    327     if (isExtensionOperationType(operationType) && !TypeManager::get()->areExtensionsAllowed()) {
    328         LOG(ERROR) << "Extensions are not supported for this process.";
    329         return ANEURALNETWORKS_BAD_DATA;
    330     }
    331     if (operationType == OperationType::OEM_OPERATION) {
    332         LOG(WARNING) << "OEM_OPERATION is deprecated. Use Extensions instead.";
    333     }
    334 
    335     if (!isExtensionOperationType(operationType)) {
    336         if (!validCode(kNumberOfOperationTypes, kNumberOfOperationTypesOEM, type)) {
    337             LOG(ERROR) << "ANeuralNetworksModel_addOperation invalid operation type " << type;
    338             return ANEURALNETWORKS_BAD_DATA;
    339         }
    340     }
    341     NN_RETURN_IF_ERROR(validateOperation(type, inputCount, inputs, outputCount, outputs, mOperands,
    342                                          HalVersion::LATEST));
    343 
    344     uint32_t operationIndex = operationCount();
    345     if (operationIndex >= MAX_NUMBER_OF_OPERATIONS) {
    346         LOG(ERROR) << "ANeuralNetworksModel_addOperation exceed max operations";
    347         return ANEURALNETWORKS_BAD_DATA;
    348     }
    349 
    350     mOperations.push_back({
    351             .type = operationType,
    352             .inputs = hidl_vec<uint32_t>(inputs, inputs + inputCount),
    353             .outputs = hidl_vec<uint32_t>(outputs, outputs + outputCount),
    354     });
    355     for (uint32_t i : mOperations.back().inputs) {
    356         mOperands[i].numberOfConsumers++;
    357     }
    358     mHasOEMOperation |= (operationType == OperationType::OEM_OPERATION);
    359     mHasExtensionOperation |= isExtensionOperationType(operationType);
    360 
    361     return ANEURALNETWORKS_NO_ERROR;
    362 }
    363 
    364 int ModelBuilder::identifyInputsAndOutputs(uint32_t inputCount, const uint32_t* inputs,
    365                                            uint32_t outputCount, const uint32_t* outputs) {
    366     if (badState("identifyInputsAndOutputs")) {
    367         return ANEURALNETWORKS_BAD_STATE;
    368     }
    369 
    370     int n = validateOperandList(inputCount, inputs, operandCount(),
    371                                 "ANeuralNetworksModel_identifyInputsAndOutputs inputs");
    372     if (n != ANEURALNETWORKS_NO_ERROR) {
    373         return n;
    374     }
    375     n = validateOperandList(outputCount, outputs, operandCount(),
    376                             "ANeuralNetworksModel_identifyInputsAndOutputs outputs");
    377     if (n != ANEURALNETWORKS_NO_ERROR) {
    378         return n;
    379     }
    380 
    381     // Makes a copy of the index list, validates the arguments, and changes
    382     // the lifetime info of the corresponding operand.
    383     auto setArguments = [&](std::vector<uint32_t>* indexVector, uint32_t indexCount,
    384                             const uint32_t* indexList, OperandLifeTime lifetime) -> bool {
    385         indexVector->resize(indexCount);
    386         for (uint32_t i = 0; i < indexCount; i++) {
    387             const uint32_t operandIndex = indexList[i];
    388             if (operandIndex >= mOperands.size()) {
    389                 LOG(ERROR) << "ANeuralNetworksModel_identifyInputsAndOutputs Can't set input or "
    390                               "output "
    391                               "to be "
    392                            << operandIndex << " as this exceeds the numbe of operands "
    393                            << mOperands.size();
    394                 return false;
    395             }
    396             (*indexVector)[i] = operandIndex;
    397             Operand& operand = mOperands[operandIndex];
    398             if (operand.lifetime != OperandLifeTime::TEMPORARY_VARIABLE) {
    399                 LOG(ERROR) << "ANeuralNetworksModel_identifyInputsAndOutputs Can't set operand "
    400                            << operandIndex
    401                            << " to be an input or output.  Check that it's not a constant or "
    402                               "already an input or output";
    403                 return false;
    404             }
    405             operand.lifetime = lifetime;
    406         }
    407         return true;
    408     };
    409 
    410     if (!setArguments(&mInputIndexes, inputCount, inputs, OperandLifeTime::MODEL_INPUT) ||
    411         !setArguments(&mOutputIndexes, outputCount, outputs, OperandLifeTime::MODEL_OUTPUT)) {
    412         return ANEURALNETWORKS_BAD_DATA;
    413     }
    414 
    415     return ANEURALNETWORKS_NO_ERROR;
    416 }
    417 
    418 int ModelBuilder::relaxComputationFloat32toFloat16(bool allow) {
    419     if (badState("relaxComputationFloat32toFloat16")) {
    420         return ANEURALNETWORKS_BAD_STATE;
    421     }
    422 
    423     mRelaxComputationFloat32toFloat16 = allow;
    424 
    425     return ANEURALNETWORKS_NO_ERROR;
    426 }
    427 
    428 int ModelBuilder::createCompilation(CompilationBuilder** compilation,
    429                                     const std::vector<std::shared_ptr<Device>>& devices,
    430                                     bool explicitDeviceList) {
    431     if (!mCompletedModel || mInvalidModel) {
    432         LOG(ERROR) << "ANeuralNetworksCompilation_create passed an unfinished or invalid model";
    433         *compilation = nullptr;
    434         return ANEURALNETWORKS_BAD_STATE;
    435     }
    436     *compilation = new (std::nothrow) CompilationBuilder(this, devices, explicitDeviceList);
    437     return (*compilation ? ANEURALNETWORKS_NO_ERROR : ANEURALNETWORKS_OUT_OF_MEMORY);
    438 }
    439 
    440 int ModelBuilder::finish() {
    441     if (mCompletedModel) {
    442         LOG(ERROR) << "ANeuralNetworksModel_finish called more than once";
    443         return ANEURALNETWORKS_BAD_STATE;
    444     }
    445     if (mInvalidModel) {
    446         LOG(ERROR) << "ANeuralNetworksModel_finish called on an invalid model";
    447         return ANEURALNETWORKS_BAD_STATE;
    448     }
    449 
    450     int n = copyLargeValuesToSharedMemory();
    451     if (n != ANEURALNETWORKS_NO_ERROR) {
    452         return n;
    453     }
    454 
    455     // TODO: Modify validation so that it can be called without creating a HAL Model.
    456     // NOTE: Must copyLargeValuesToSharedMemory() before validation; otherwise,
    457     //       a CONSTANT_REFERENCE operand will not have correct .poolIndex, and
    458     //       validation will not work properly.
    459     Model modelForValidation;
    460     setHidlModel(&modelForValidation);
    461     if (!validateModel(modelForValidation)) {
    462         LOG(ERROR) << "ANeuralNetworksModel_finish called on invalid model";
    463         mInvalidModel = true;
    464         return ANEURALNETWORKS_BAD_DATA;
    465     }
    466     if (VLOG_IS_ON(MODEL)) {
    467         graphDump("ModelBuilder::finish", modelForValidation, nullptr);
    468     }
    469 
    470     // We sort the operations so that they will be in the appropriate
    471     // order for a single-threaded, op at a time execution.
    472     // TODO: we don't need this if we always run the partitioner.
    473     sortIntoRunOrder();
    474     mCompletedModel = true;
    475     return ANEURALNETWORKS_NO_ERROR;
    476 }
    477 
    478 void ModelBuilder::sortIntoRunOrder() {
    479     if (!mSortedOperationIndexMap.empty()) {
    480         LOG(ERROR) << "Operations already in run order.";
    481         return;
    482     }
    483     // Tracks the operations that can be executed.
    484     std::vector<uint32_t> opsReadyToRun;
    485     std::vector<Operation> runOrder;
    486 
    487     // Tracks how many inputs are needed for each operation to be ready to run.
    488     std::multimap<uint32_t, uint32_t> operandToOperations;
    489     std::vector<uint32_t> unknownInputCount(operationCount());
    490     for (uint32_t operationIndex = 0; operationIndex < operationCount(); operationIndex++) {
    491         uint32_t& count = unknownInputCount[operationIndex];
    492         count = 0;
    493         for (uint32_t operandIndex : mOperations[operationIndex].inputs) {
    494             auto lifetime = mOperands[operandIndex].lifetime;
    495             if (lifetime == OperandLifeTime::TEMPORARY_VARIABLE ||
    496                 lifetime == OperandLifeTime::MODEL_OUTPUT) {
    497                 count++;
    498                 operandToOperations.insert(
    499                         std::pair<uint32_t, uint32_t>(operandIndex, operationIndex));
    500             }
    501         }
    502         if (count == 0) {
    503             opsReadyToRun.push_back(operationIndex);
    504         }
    505     }
    506 
    507     while (opsReadyToRun.size() > 0) {
    508         // Execute the next op
    509         int opIndex = opsReadyToRun.back();
    510         opsReadyToRun.pop_back();
    511         const Operation& operation = mOperations[opIndex];
    512 
    513         runOrder.push_back(mOperations[opIndex]);
    514         mSortedOperationIndexMap.push_back(opIndex);
    515 
    516         // Mark all its outputs as known.
    517         for (uint32_t operandIndex : operation.outputs) {
    518             auto range = operandToOperations.equal_range(operandIndex);
    519             for (auto i = range.first; i != range.second; i++) {
    520                 uint32_t& count = unknownInputCount[i->second];
    521                 if (--count == 0) {
    522                     opsReadyToRun.push_back(i->second);
    523                 }
    524             }
    525         }
    526     }
    527     mOperations = runOrder;
    528 }
    529 
    530 void ModelBuilder::setHidlModel(Model* model) const {
    531     model->operands = mOperands;
    532     model->operations = mOperations;
    533     model->inputIndexes = mInputIndexes;
    534     model->outputIndexes = mOutputIndexes;
    535     model->operandValues = mSmallOperandValues;
    536     model->relaxComputationFloat32toFloat16 = mRelaxComputationFloat32toFloat16;
    537     model->extensionNameToPrefix = getExtensionNameToPrefixMap();
    538 
    539     uint32_t count = mMemories.size();
    540     model->pools.resize(count);
    541     for (uint32_t i = 0; i < count; i++) {
    542         model->pools[i] = mMemories[i]->getHidlMemory();
    543     }
    544 }
    545 
    546 std::vector<Model::ExtensionNameAndPrefix> ModelBuilder::getExtensionNameToPrefixMap() const {
    547     std::vector<Model::ExtensionNameAndPrefix> extensionNameToPrefix;
    548     std::set<uint16_t> prefixSet;
    549 
    550     auto addExtensionWithPrefix = [&extensionNameToPrefix, &prefixSet](uint16_t prefix) {
    551         if (!prefixSet.insert(prefix).second) {
    552             return;
    553         }
    554         const Extension* extension;
    555         CHECK(TypeManager::get()->getExtensionInfo(prefix, &extension));
    556         extensionNameToPrefix.push_back({
    557                 .name = extension->name,
    558                 .prefix = prefix,
    559         });
    560     };
    561 
    562     constexpr uint8_t kLowBitsType =
    563             static_cast<uint8_t>(Model::ExtensionTypeEncoding::LOW_BITS_TYPE);
    564     for (const auto& operand : mOperands) {
    565         if (isExtensionOperandType(operand.type)) {
    566             addExtensionWithPrefix(static_cast<uint32_t>(operand.type) >> kLowBitsType);
    567         }
    568     }
    569     for (const auto& operation : mOperations) {
    570         if (isExtensionOperationType(operation.type)) {
    571             addExtensionWithPrefix(static_cast<uint32_t>(operation.type) >> kLowBitsType);
    572         }
    573     }
    574     return extensionNameToPrefix;
    575 }
    576 
    577 }  // namespace nn
    578 }  // namespace android
    579