Home | History | Annotate | Download | only in runtime
      1 /*
      2  * Copyright (C) 2017 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define LOG_TAG "Manager"
     18 
     19 #include "Manager.h"
     20 #include "Callbacks.h"
     21 #include "HalInterfaces.h"
     22 #include "Tracing.h"
     23 #include "Utils.h"
     24 
     25 #include <android/hidl/manager/1.0/IServiceManager.h>
     26 #include <build/version.h>
     27 #include <hidl/HidlTransportSupport.h>
     28 #include <hidl/ServiceManagement.h>
     29 
     30 #include <algorithm>
     31 #include <functional>
     32 
     33 using ::android::hardware::neuralnetworks::V1_2::implementation::ExecutionCallback;
     34 using HidlToken = hidl_array<uint8_t, ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN>;
     35 
     36 namespace android {
     37 namespace nn {
     38 
     39 bool Device::isCachingSupported() const {
     40     auto pair = getNumberOfCacheFilesNeeded();
     41     // Caching is supported if either of numModelCache or numDataCache is greater than 0.
     42     return pair.first > 0 || pair.second > 0;
     43 }
     44 
     45 // A Device with actual underlying driver
     46 class DriverDevice : public Device {
     47     DISALLOW_IMPLICIT_CONSTRUCTORS(DriverDevice);
     48 
     49    public:
     50     DriverDevice(std::string name, const sp<V1_0::IDevice>& device);
     51 
     52     // Returns true if succesfully initialized.
     53     bool initialize();
     54 
     55     const char* getName() const override { return mName.c_str(); }
     56     const char* getVersionString() const override { return mVersionString.c_str(); }
     57     VersionedIDevice* getInterface() override { return mInterface.get(); }
     58     int64_t getFeatureLevel() override { return mInterface->getFeatureLevel(); }
     59     int32_t getType() const override { return mInterface->getType(); }
     60     hidl_vec<Extension> getSupportedExtensions() const override;
     61     void getSupportedOperations(const Model& hidlModel, IModelSlicer* slicer,
     62                                 hidl_vec<bool>* supportedOperations) override;
     63     PerformanceInfo getPerformance(OperandType type) const override;
     64     PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
     65         return mCapabilities.relaxedFloat32toFloat16PerformanceScalar;
     66     }
     67     PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
     68         return mCapabilities.relaxedFloat32toFloat16PerformanceTensor;
     69     }
     70     std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override {
     71         return mNumCacheFiles;
     72     }
     73 
     74     int prepareModel(const Model& hidlModel, ExecutionPreference executionPreference,
     75                      const hidl_vec<hidl_handle>& modelCache,
     76                      const hidl_vec<hidl_handle>& dataCache, const HidlToken& token,
     77                      std::shared_ptr<VersionedIPreparedModel>* preparedModel) override;
     78     int prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
     79                               const hidl_vec<hidl_handle>& dataCache, const HidlToken& token,
     80                               std::shared_ptr<VersionedIPreparedModel>* preparedModel) override;
     81 
     82    private:
     83     std::string mName;
     84     std::string mVersionString;
     85     const std::shared_ptr<VersionedIDevice> mInterface;
     86     Capabilities mCapabilities;
     87     hidl_vec<Extension> mSupportedExtensions;
     88     std::pair<uint32_t, uint32_t> mNumCacheFiles;
     89 
     90 #ifdef NN_DEBUGGABLE
     91     // For debugging: behavior of IDevice::getSupportedOperations for SampleDriver.
     92     // 0 - all operations reported by IDevice::getSupportedOperations() supported
     93     // 1 - some operations reported by IDevice::getSupportedOperations() supported
     94     uint32_t mSupported = 0;
     95 #endif  // NN_DEBUGGABLE
     96 };
     97 
     98 DriverDevice::DriverDevice(std::string name, const sp<V1_0::IDevice>& device)
     99     : mName(std::move(name)), mInterface(VersionedIDevice::create(mName, device)) {}
    100 
    101 // TODO: handle errors from initialize correctly
    102 bool DriverDevice::initialize() {
    103 #ifdef NN_DEBUGGABLE
    104     static const char samplePrefix[] = "sample";
    105 
    106     mSupported = (mName.substr(0, sizeof(samplePrefix) - 1) == samplePrefix)
    107                          ? getProp("debug.nn.sample.supported")
    108                          : 0;
    109 #endif  // NN_DEBUGGABLE
    110 
    111     ErrorStatus status = ErrorStatus::GENERAL_FAILURE;
    112 
    113     if (mInterface == nullptr) {
    114         LOG(ERROR) << "DriverDevice contains invalid interface object.";
    115         return false;
    116     }
    117 
    118     std::tie(status, mCapabilities) = mInterface->getCapabilities();
    119     if (status != ErrorStatus::NONE) {
    120         LOG(ERROR) << "IDevice::getCapabilities returned the error " << toString(status);
    121         return false;
    122     }
    123     VLOG(MANAGER) << "Capab " << toString(mCapabilities);
    124 
    125     std::tie(status, mVersionString) = mInterface->getVersionString();
    126     // TODO(miaowang): add a validation test case for in case of error.
    127     if (status != ErrorStatus::NONE) {
    128         LOG(ERROR) << "IDevice::getVersionString returned the error " << toString(status);
    129         return false;
    130     }
    131 
    132     std::tie(status, mSupportedExtensions) = mInterface->getSupportedExtensions();
    133     if (status != ErrorStatus::NONE) {
    134         LOG(ERROR) << "IDevice::getSupportedExtensions returned the error " << toString(status);
    135         return false;
    136     }
    137 
    138     std::tie(status, mNumCacheFiles.first, mNumCacheFiles.second) =
    139             mInterface->getNumberOfCacheFilesNeeded();
    140     if (status != ErrorStatus::NONE) {
    141         LOG(WARNING) << "IDevice::getNumberOfCacheFilesNeeded returned the error "
    142                      << toString(status);
    143         mNumCacheFiles = {0, 0};
    144     }
    145     if (mNumCacheFiles.first > static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES) ||
    146         mNumCacheFiles.second > static_cast<uint32_t>(Constant::MAX_NUMBER_OF_CACHE_FILES)) {
    147         LOG(WARNING)
    148                 << "IDevice::getNumberOfCacheFilesNeeded returned invalid number of cache files "
    149                    "numModelCache = "
    150                 << mNumCacheFiles.first << ", numDataCache = " << mNumCacheFiles.second;
    151         mNumCacheFiles = {0, 0};
    152     }
    153     return true;
    154 }
    155 
    156 hidl_vec<Extension> DriverDevice::getSupportedExtensions() const {
    157     return mSupportedExtensions;
    158 }
    159 
    160 void DriverDevice::getSupportedOperations(const Model& hidlModel, IModelSlicer* slicer,
    161                                           hidl_vec<bool>* outSupportedOperations) {
    162     // Query the driver for what it can do.
    163     ErrorStatus status = ErrorStatus::GENERAL_FAILURE;
    164     hidl_vec<bool> supportedOperations;
    165     std::tie(status, supportedOperations) = mInterface->getSupportedOperations(hidlModel, slicer);
    166 
    167     if (status != ErrorStatus::NONE) {
    168         LOG(ERROR) << "IDevice::getSupportedOperations returned the error " << toString(status);
    169         // Set the supported operation vectors to all false, so we won't use this driver.
    170         outSupportedOperations->resize(hidlModel.operations.size());
    171         std::fill(outSupportedOperations->begin(), outSupportedOperations->end(), false);
    172         return;
    173     }
    174     if (supportedOperations.size() != hidlModel.operations.size()) {
    175         LOG(ERROR) << "IDevice::getSupportedOperations returned a vector of length "
    176                    << supportedOperations.size() << " when expecting "
    177                    << hidlModel.operations.size();
    178         // Set the supported operation vectors to all false, so we won't use this driver.
    179         outSupportedOperations->resize(hidlModel.operations.size());
    180         std::fill(outSupportedOperations->begin(), outSupportedOperations->end(), false);
    181         return;
    182     }
    183 
    184     *outSupportedOperations = std::move(supportedOperations);
    185 
    186 #ifdef NN_DEBUGGABLE
    187     if (mSupported != 1) {
    188         return;
    189     }
    190 
    191     const uint32_t baseAccumulator = std::hash<std::string>{}(mName);
    192     for (size_t operationIndex = 0; operationIndex < outSupportedOperations->size();
    193          operationIndex++) {
    194         if (!(*outSupportedOperations)[operationIndex]) {
    195             continue;
    196         }
    197 
    198         uint32_t accumulator = baseAccumulator;
    199         const Operation &operation = hidlModel.operations[operationIndex];
    200         accumulator ^= static_cast<uint32_t>(operation.type);
    201         auto accumulateOperands = [&hidlModel, &accumulator](const hidl_vec<uint32_t>& operands) {
    202             for (uint32_t operandIndex : operands) {
    203                 const Operand& operand = hidlModel.operands[operandIndex];
    204                 accumulator ^= static_cast<uint32_t>(operand.type);
    205                 accumulator ^= operand.dimensions.size();
    206                 for (uint32_t dimension : operand.dimensions) {
    207                     accumulator ^= dimension;
    208                     if (operand.lifetime == OperandLifeTime::CONSTANT_COPY ||
    209                         operand.lifetime == OperandLifeTime::CONSTANT_REFERENCE) {
    210                         accumulator ^= 1;
    211                     }
    212                 }
    213             }
    214         };
    215         accumulateOperands(operation.inputs);
    216         accumulateOperands(operation.outputs);
    217         if (accumulator & 1) {
    218             (*outSupportedOperations)[operationIndex] = false;
    219         }
    220     }
    221 #endif  // NN_DEBUGGABLE
    222 }
    223 
    224 PerformanceInfo DriverDevice::getPerformance(OperandType type) const {
    225     return lookup(mCapabilities.operandPerformance, type);
    226 }
    227 
    228 static int prepareModelCheck(ErrorStatus status,
    229                              const std::shared_ptr<VersionedIPreparedModel>& preparedModel,
    230                              const char* prepareName, const char* serviceName,
    231                              std::shared_ptr<VersionedIPreparedModel>* preparedModelOut) {
    232     CHECK(preparedModelOut != nullptr) << "prepareModelCheck -- preparedModelOut must be non-null";
    233     *preparedModelOut = nullptr;
    234 
    235     if (status != ErrorStatus::NONE) {
    236         LOG(ERROR) << prepareName << " on " << serviceName << " failed: "
    237                    << "prepareReturnStatus=" << toString(status);
    238         return ANEURALNETWORKS_OP_FAILED;
    239     }
    240     if (preparedModel == nullptr) {
    241         LOG(ERROR) << prepareName << " on " << serviceName << " failed: preparedModel is nullptr";
    242         return ANEURALNETWORKS_OP_FAILED;
    243     }
    244 
    245     *preparedModelOut = preparedModel;
    246     return ANEURALNETWORKS_NO_ERROR;
    247 }
    248 
    249 int DriverDevice::prepareModel(const Model& hidlModel, ExecutionPreference executionPreference,
    250                                const hidl_vec<hidl_handle>& modelCache,
    251                                const hidl_vec<hidl_handle>& dataCache, const HidlToken& token,
    252                                std::shared_ptr<VersionedIPreparedModel>* preparedModel) {
    253     // Note that some work within VersionedIDevice will be subtracted from the IPC layer
    254     NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "prepareModel");
    255 
    256     const auto [status, localPreparedModel] =
    257             mInterface->prepareModel(hidlModel, executionPreference, modelCache, dataCache, token);
    258 
    259     return prepareModelCheck(status, localPreparedModel, "prepareModel", getName(), preparedModel);
    260 }
    261 
    262 int DriverDevice::prepareModelFromCache(const hidl_vec<hidl_handle>& modelCache,
    263                                         const hidl_vec<hidl_handle>& dataCache,
    264                                         const HidlToken& token,
    265                                         std::shared_ptr<VersionedIPreparedModel>* preparedModel) {
    266     // Note that some work within VersionedIDevice will be subtracted from the IPC layer
    267     NNTRACE_FULL(NNTRACE_LAYER_IPC, NNTRACE_PHASE_COMPILATION, "prepareModelFromCache");
    268 
    269     const auto [status, localPreparedModel] =
    270             mInterface->prepareModelFromCache(modelCache, dataCache, token);
    271 
    272     return prepareModelCheck(status, localPreparedModel, "prepareModelFromCache", getName(),
    273                              preparedModel);
    274 }
    275 
    276 // A special abstracted device for the CPU. Only one instance of this class will exist.
    277 // Use get() to retrieve it.
    278 class CpuDevice : public Device {
    279     DISALLOW_COPY_AND_ASSIGN(CpuDevice);
    280 
    281    public:
    282     // Returns the singleton CPU fallback device.
    283     static std::shared_ptr<CpuDevice> get() {
    284         static std::shared_ptr<CpuDevice> instance(new CpuDevice);
    285         return instance;
    286     }
    287 
    288     const char* getName() const override { return kName.c_str(); }
    289     const char* getVersionString() const override { return kVersionString.c_str(); }
    290     VersionedIDevice* getInterface() override { return nullptr; }
    291     int64_t getFeatureLevel() override { return kFeatureLevel; }
    292     int32_t getType() const override { return ANEURALNETWORKS_DEVICE_CPU; }
    293     hidl_vec<Extension> getSupportedExtensions() const override { return {/* No extensions. */}; }
    294     void getSupportedOperations(const Model& hidlModel, IModelSlicer* slicer,
    295                                 hidl_vec<bool>* supportedOperations) override;
    296     PerformanceInfo getPerformance(OperandType) const override { return kPerformance; }
    297     PerformanceInfo getRelaxedFloat32toFloat16PerformanceScalar() const override {
    298         return kPerformance;
    299     }
    300     PerformanceInfo getRelaxedFloat32toFloat16PerformanceTensor() const override {
    301         return kPerformance;
    302     }
    303     std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override {
    304         return kNumCacheFiles;
    305     }
    306 
    307     int prepareModel(const Model& hidlModel, ExecutionPreference executionPreference,
    308                      const hidl_vec<hidl_handle>& modelCache,
    309                      const hidl_vec<hidl_handle>& dataCache, const HidlToken&,
    310                      std::shared_ptr<VersionedIPreparedModel>* preparedModel) override;
    311     int prepareModelFromCache(const hidl_vec<hidl_handle>&, const hidl_vec<hidl_handle>&,
    312                               const HidlToken&,
    313                               std::shared_ptr<VersionedIPreparedModel>*) override {
    314         CHECK(false) << "Should never call prepareModelFromCache on CpuDevice";
    315         return ANEURALNETWORKS_OP_FAILED;
    316     }
    317 
    318    private:
    319     CpuDevice() = default;
    320     const int64_t kFeatureLevel = __ANDROID_API__;
    321     const std::string kName = "nnapi-reference";
    322     const std::string kVersionString = build::GetBuildNumber();
    323     // Since the performance is a ratio compared to the CPU performance,
    324     // by definition the performance of the CPU is 1.0.
    325     const PerformanceInfo kPerformance = {.execTime = 1.0f, .powerUsage = 1.0f};
    326     // CPU device does not support compilation caching.
    327     const std::pair<uint32_t, uint32_t> kNumCacheFiles = {/*numModelCache=*/0,
    328                                                           /*numDataCache=*/0};
    329 };
    330 
    331 void CpuDevice::getSupportedOperations(const Model& hidlModel, IModelSlicer*,
    332                                        hidl_vec<bool>* supportedOperations) {
    333     const size_t count = hidlModel.operations.size();
    334     hidl_vec<bool> result(count);
    335     for (size_t i = 0; i < count; i++) {
    336         // TODO(b/119870033): Decide whether and how post-P operations would be supported on CPU.
    337         //                    We may want to use the slicer for CpuDevice just as we do for
    338         //                    DriverDevice.
    339         OperationType operationType = hidlModel.operations[i].type;
    340         result[i] = !isExtensionOperationType(operationType) &&
    341                     operationType != OperationType::OEM_OPERATION;
    342     }
    343     *supportedOperations = std::move(result);
    344 }
    345 
    346 int CpuDevice::prepareModel(const Model& hidlModel, ExecutionPreference executionPreference,
    347                             const hidl_vec<hidl_handle>& modelCache,
    348                             const hidl_vec<hidl_handle>& dataCache, const HidlToken&,
    349                             std::shared_ptr<VersionedIPreparedModel>* preparedModel) {
    350     CHECK(modelCache.size() == 0 && dataCache.size() == 0)
    351             << "Should never call prepareModel with cache information on CpuDevice";
    352     *preparedModel = nullptr;
    353     if (!validateModel(hidlModel) || !validateExecutionPreference(executionPreference)) {
    354         return ANEURALNETWORKS_OP_FAILED;
    355     }
    356     return ANEURALNETWORKS_NO_ERROR;
    357 }
    358 
    359 DeviceManager* DeviceManager::get() {
    360     static DeviceManager manager;
    361     return &manager;
    362 }
    363 
    364 std::shared_ptr<Device> DeviceManager::getCpuDevice() {
    365     return CpuDevice::get();
    366 }
    367 
    368 std::shared_ptr<Device> DeviceManager::forTest_makeDriverDevice(const std::string& name,
    369                                                                 const sp<V1_0::IDevice>& device) {
    370     auto driverDevice = std::make_shared<DriverDevice>(name, device);
    371     CHECK(driverDevice->initialize());
    372     return driverDevice;
    373 }
    374 
    375 void DeviceManager::findAvailableDevices() {
    376     using ::android::hidl::manager::V1_0::IServiceManager;
    377     VLOG(MANAGER) << "findAvailableDevices";
    378 
    379     sp<IServiceManager> manager = hardware::defaultServiceManager();
    380     if (manager == nullptr) {
    381         LOG(ERROR) << "Unable to open defaultServiceManager";
    382         return;
    383     }
    384 
    385     manager->listByInterface(V1_0::IDevice::descriptor, [this](const hidl_vec<hidl_string>& names) {
    386         for (const auto& name : names) {
    387             VLOG(MANAGER) << "Found interface " << name.c_str();
    388             sp<V1_0::IDevice> device = V1_0::IDevice::getService(name);
    389             if (device == nullptr) {
    390                 LOG(ERROR) << "Got a null IDEVICE for " << name.c_str();
    391                 continue;
    392             }
    393             registerDevice(name.c_str(), device);
    394         }
    395     });
    396 
    397     // register CPU fallback device
    398     mDevices.push_back(CpuDevice::get());
    399     mDevicesCpuOnly.push_back(CpuDevice::get());
    400 }
    401 
    402 void DeviceManager::registerDevice(const char* name, const sp<V1_0::IDevice>& device) {
    403     auto d = std::make_shared<DriverDevice>(name, device);
    404     if (d->initialize()) {
    405         mDevices.push_back(d);
    406     }
    407 }
    408 
    409 DeviceManager::DeviceManager() {
    410     VLOG(MANAGER) << "DeviceManager::DeviceManager";
    411     findAvailableDevices();
    412 #ifdef NN_DEBUGGABLE
    413     mStrictSlicing = (getProp("debug.nn.strict-slicing") != 0);
    414     mPartitioning = getProp("debug.nn.partition", kPartitioningDefault);
    415     mDebugNNCpuOnly = (getProp("debug.nn.cpuonly") != 0);
    416     mSyncExecCpu = (getProp("debug.nn.syncexec-cpu", 1) != 0);
    417     if (!mSyncExecHalSetter) {
    418         mSyncExecHal = (getProp("debug.nn.syncexec-hal", 1) != 0);
    419     }
    420     mSyncExecRuntime = (getProp("debug.nn.syncexec-runtime") != 0);
    421 #endif  // NN_DEBUGGABLE
    422 }
    423 
    424 }  // namespace nn
    425 }  // namespace android
    426