Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2017 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 // Class used to build a model through a succession of successive calls
     18 // to the NN API.
     19 
     20 #ifndef ANDROID_ML_NN_RUNTIME_MODEL_BUILDER_H
     21 #define ANDROID_ML_NN_RUNTIME_MODEL_BUILDER_H
     22 
     23 #include "HalInterfaces.h"
     24 #include "Memory.h"
     25 #include "NeuralNetworks.h"
     26 #include "Utils.h"
     27 
     28 namespace android {
     29 namespace nn {
     30 
     31 class CompilationBuilder;
     32 class Device;
     33 class ExecutionPlan;
     34 class ExecutionStep;
     35 class Memory;
     36 
     37 class ModelBuilder {
     38 public:
     39     virtual ~ModelBuilder() {}
     40     // Adds an operand to the model.
     41     int addOperand(const ANeuralNetworksOperandType& type);
     42     int setOperandValue(uint32_t index, const void* buffer, size_t length);
     43     int setOperandValueFromMemory(uint32_t index, const Memory* memory, uint32_t offset,
     44                                   size_t length);
     45 
     46     int addOperation(ANeuralNetworksOperationType type, uint32_t inputCount, const uint32_t* inputs,
     47                      uint32_t outputCount, const uint32_t* outputs);
     48     int identifyInputsAndOutputs(uint32_t inputCount, const uint32_t* inputs, uint32_t outputCount,
     49                                  const uint32_t* outputs);
     50 
     51     int finish();
     52     bool isFinished() const { return mCompletedModel; }
     53 
     54     int createCompilation(CompilationBuilder** compilation);
     55 
     56     void setHidlModel(Model* model) const;
     57 
     58     uint32_t operandCount() const {
     59         // We don't allow more than uint32_t worth of operands
     60         return static_cast<uint32_t>(mOperands.size());
     61     }
     62     uint32_t operationCount() const {
     63         // We don't allow more than uint32_t worth of operations
     64         return static_cast<uint32_t>(mOperations.size());
     65     }
     66     uint32_t inputCount() const { return static_cast<uint32_t>(mInputIndexes.size()); }
     67     uint32_t outputCount() const { return static_cast<uint32_t>(mOutputIndexes.size()); }
     68     uint32_t getInputOperandIndex(uint32_t i) const { return mInputIndexes[i]; }
     69     const Operand& getInputOperand(uint32_t i) const {
     70         return mOperands[getInputOperandIndex(i)];
     71     }
     72     uint32_t getOutputOperandIndex(uint32_t i) const { return mOutputIndexes[i]; }
     73     const Operand& getOutputOperand(uint32_t i) const {
     74         return mOperands[getOutputOperandIndex(i)];
     75     }
     76     const Operand& getOperand(uint32_t index) const { return mOperands[index]; }
     77     const Operation& getOperation(uint32_t index) const { return mOperations[index]; }
     78     const MemoryTracker& getMemories() const { return mMemories; }
     79     const std::vector<Operation>& getOperations() const { return mOperations; }
     80     const uint8_t* getPointerToOperandValue(uint32_t offset) const {
     81         return mSmallOperandValues.data() + offset;
     82     }
     83 
     84     int partitionTheWork(const std::vector<std::shared_ptr<Device>>& devices,
     85                          uint32_t preference, ExecutionPlan* plan) const;
     86 
     87  private:
     88     // TODO: move partitionTheWork, findBestDeviceForEachOperation,
     89     // sortIntoRunOrder to CompilationBuilder?
     90 
     91     int findBestDeviceForEachOperation(uint32_t preference,
     92                                        const std::vector<std::shared_ptr<Device>>& devices,
     93                                        const size_t operationCount,
     94                                        const size_t deviceCount,
     95                                        std::vector<int>* bestDeviceForOperation) const;
     96     PerformanceInfo getPerformanceInfo(const std::shared_ptr<Device> device,
     97                                        uint32_t operationIndex) const;
     98 
     99     // Sorts the operations to be in the correct order for single threaded
    100     // node-at-a-time execution.
    101     void sortIntoRunOrder();
    102 
    103     // Copies the large values to a shared memory, if we have any.
    104     int copyLargeValuesToSharedMemory();
    105 
    106     // The operations of the graph.
    107     std::vector<Operation> mOperations;
    108     // The description of the operands of the graph.
    109     std::vector<Operand> mOperands;
    110     // Specifies where to find the list of indexes identifying
    111     // the inputs and outputs of the model.  The offset is into
    112     // the mOperandIndexes table.
    113     std::vector<uint32_t> mInputIndexes;
    114     std::vector<uint32_t> mOutputIndexes;
    115 
    116     MemoryTracker mMemories;
    117 
    118     // The value of the small operands that are defined at model
    119     // creation time.
    120     std::vector<uint8_t> mSmallOperandValues;
    121 
    122     struct LargeValue {
    123         uint32_t operandIndex;
    124         const void* buffer;
    125     };
    126     // Operand index and buffer pointer for all the large operand values of this model.
    127     std::vector<LargeValue> mLargeOperandValues;
    128     // The shared memory region that will contain the large values.
    129     Memory mLargeValueMemory;
    130 
    131     // Once the model has been finished, we should not allow further
    132     // modifications to the model.
    133     mutable bool mCompletedModel = false;
    134 };
    135 
    136 }  // namespace nn
    137 }  // namespace android
    138 
    139 #endif  // ANDROID_ML_NN_RUNTIME_MODEL_BUILDER_H
    140