1 /* 2 * Copyright (C) 2018 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 // This test only tests internal APIs, and has dependencies on internal header 18 // files, including NN API HIDL definitions. 19 // It is not part of CTS. 20 21 #include "TestMemory.h" 22 23 #include "Manager.h" 24 #include "Memory.h" 25 #include "TestNeuralNetworksWrapper.h" 26 27 #include <android/sharedmem.h> 28 #include <gtest/gtest.h> 29 30 #include <fstream> 31 #include <string> 32 33 using WrapperCompilation = ::android::nn::test_wrapper::Compilation; 34 using WrapperExecution = ::android::nn::test_wrapper::Execution; 35 using WrapperMemory = ::android::nn::test_wrapper::Memory; 36 using WrapperModel = ::android::nn::test_wrapper::Model; 37 using WrapperOperandType = ::android::nn::test_wrapper::OperandType; 38 using WrapperResult = ::android::nn::test_wrapper::Result; 39 using WrapperType = ::android::nn::test_wrapper::Type; 40 41 namespace { 42 43 // Tests to ensure that various kinds of memory leaks do not occur. 44 // 45 // The fixture checks that no anonymous shared memory regions are leaked by 46 // comparing the count of /dev/ashmem mappings in SetUp and TearDown. This could 47 // break if the test or framework starts lazily instantiating something that 48 // creates a mapping - at that point the way the test works needs to be 49 // reinvestigated. The filename /dev/ashmem is a documented part of the Android 50 // kernel interface (see 51 // https://source.android.com/devices/architecture/kernel/reqs-interfaces). 52 // 53 // (We can also get very unlucky and mask a memory leak by unrelated unmapping 54 // somewhere else. This seems unlikely enough to not deal with.) 55 class MemoryLeakTest : public ::testing::Test { 56 protected: 57 void SetUp() override; 58 void TearDown() override; 59 60 private: 61 size_t GetAshmemMappingsCount(); 62 63 size_t mStartingMapCount = 0; 64 bool mIsCpuOnly; 65 }; 66 67 void MemoryLeakTest::SetUp() { 68 mIsCpuOnly = android::nn::DeviceManager::get()->getUseCpuOnly(); 69 mStartingMapCount = GetAshmemMappingsCount(); 70 } 71 72 void MemoryLeakTest::TearDown() { 73 android::nn::DeviceManager::get()->setUseCpuOnly(mIsCpuOnly); 74 const size_t endingMapCount = GetAshmemMappingsCount(); 75 ASSERT_EQ(mStartingMapCount, endingMapCount); 76 } 77 78 size_t MemoryLeakTest::GetAshmemMappingsCount() { 79 std::ifstream mappingsStream("/proc/self/maps"); 80 if (! mappingsStream.good()) { 81 // errno is set by std::ifstream on Linux 82 ADD_FAILURE() << "Failed to open /proc/self/maps: " << std::strerror(errno); 83 return 0; 84 } 85 std::string line; 86 int mapCount = 0; 87 while (std::getline(mappingsStream, line)) { 88 if (line.find("/dev/ashmem") != std::string::npos) { 89 ++mapCount; 90 } 91 } 92 return mapCount; 93 } 94 95 // As well as serving as a functional test for ASharedMemory, also 96 // serves as a regression test for http://b/69685100 "RunTimePoolInfo 97 // leaks shared memory regions". 98 // 99 // TODO: test non-zero offset. 100 TEST_F(MemoryLeakTest, TestASharedMemory) { 101 // Layout where to place matrix2 and matrix3 in the memory we'll allocate. 102 // We have gaps to test that we don't assume contiguity. 103 constexpr uint32_t offsetForMatrix2 = 20; 104 constexpr uint32_t offsetForMatrix3 = offsetForMatrix2 + sizeof(matrix2) + 30; 105 constexpr uint32_t weightsSize = offsetForMatrix3 + sizeof(matrix3) + 60; 106 107 int weightsFd = ASharedMemory_create("weights", weightsSize); 108 ASSERT_GT(weightsFd, -1); 109 uint8_t* weightsData = (uint8_t*)mmap(nullptr, weightsSize, PROT_READ | PROT_WRITE, 110 MAP_SHARED, weightsFd, 0); 111 ASSERT_NE(weightsData, nullptr); 112 memcpy(weightsData + offsetForMatrix2, matrix2, sizeof(matrix2)); 113 memcpy(weightsData + offsetForMatrix3, matrix3, sizeof(matrix3)); 114 WrapperMemory weights(weightsSize, PROT_READ | PROT_WRITE, weightsFd, 0); 115 ASSERT_TRUE(weights.isValid()); 116 117 WrapperModel model; 118 WrapperOperandType matrixType(WrapperType::TENSOR_FLOAT32, {3, 4}); 119 WrapperOperandType scalarType(WrapperType::INT32, {}); 120 int32_t activation(0); 121 auto a = model.addOperand(&matrixType); 122 auto b = model.addOperand(&matrixType); 123 auto c = model.addOperand(&matrixType); 124 auto d = model.addOperand(&matrixType); 125 auto e = model.addOperand(&matrixType); 126 auto f = model.addOperand(&scalarType); 127 128 model.setOperandValueFromMemory(e, &weights, offsetForMatrix2, sizeof(Matrix3x4)); 129 model.setOperandValueFromMemory(a, &weights, offsetForMatrix3, sizeof(Matrix3x4)); 130 model.setOperandValue(f, &activation, sizeof(activation)); 131 model.addOperation(ANEURALNETWORKS_ADD, {a, c, f}, {b}); 132 model.addOperation(ANEURALNETWORKS_ADD, {b, e, f}, {d}); 133 model.identifyInputsAndOutputs({c}, {d}); 134 ASSERT_TRUE(model.isValid()); 135 model.finish(); 136 137 // Test the two node model. 138 constexpr uint32_t offsetForMatrix1 = 20; 139 constexpr size_t inputSize = offsetForMatrix1 + sizeof(Matrix3x4); 140 int inputFd = ASharedMemory_create("input", inputSize); 141 ASSERT_GT(inputFd, -1); 142 uint8_t* inputData = (uint8_t*)mmap(nullptr, inputSize, 143 PROT_READ | PROT_WRITE, MAP_SHARED, inputFd, 0); 144 ASSERT_NE(inputData, nullptr); 145 memcpy(inputData + offsetForMatrix1, matrix1, sizeof(Matrix3x4)); 146 WrapperMemory input(inputSize, PROT_READ, inputFd, 0); 147 ASSERT_TRUE(input.isValid()); 148 149 constexpr uint32_t offsetForActual = 32; 150 constexpr size_t outputSize = offsetForActual + sizeof(Matrix3x4); 151 int outputFd = ASharedMemory_create("output", outputSize); 152 ASSERT_GT(outputFd, -1); 153 uint8_t* outputData = (uint8_t*)mmap(nullptr, outputSize, 154 PROT_READ | PROT_WRITE, MAP_SHARED, outputFd, 0); 155 ASSERT_NE(outputData, nullptr); 156 memset(outputData, 0, outputSize); 157 WrapperMemory actual(outputSize, PROT_READ | PROT_WRITE, outputFd, 0); 158 ASSERT_TRUE(actual.isValid()); 159 160 WrapperCompilation compilation2(&model); 161 ASSERT_EQ(compilation2.finish(), WrapperResult::NO_ERROR); 162 163 WrapperExecution execution2(&compilation2); 164 ASSERT_EQ(execution2.setInputFromMemory(0, &input, offsetForMatrix1, sizeof(Matrix3x4)), 165 WrapperResult::NO_ERROR); 166 ASSERT_EQ(execution2.setOutputFromMemory(0, &actual, offsetForActual, sizeof(Matrix3x4)), 167 WrapperResult::NO_ERROR); 168 ASSERT_EQ(execution2.compute(), WrapperResult::NO_ERROR); 169 ASSERT_EQ(CompareMatrices(expected3, 170 *reinterpret_cast<Matrix3x4*>(outputData + offsetForActual)), 0); 171 172 munmap(weightsData, weightsSize); 173 munmap(inputData, inputSize); 174 munmap(outputData, outputSize); 175 close(weightsFd); 176 close(inputFd); 177 close(outputFd); 178 } 179 180 // Regression test for http://b/69621433 "MemoryFd leaks shared memory regions". 181 TEST_F(MemoryLeakTest, GetPointer) { 182 static const size_t size = 1; 183 184 int fd = ASharedMemory_create(nullptr, size); 185 ASSERT_GE(fd, 0); 186 187 uint8_t* buf = (uint8_t*)mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); 188 ASSERT_NE(buf, nullptr); 189 *buf = 0; 190 191 { 192 // Scope "mem" in such a way that any shared memory regions it 193 // owns will be released before we check the value of *buf: We 194 // want to verify that the explicit mmap() above is not 195 // perturbed by any mmap()/munmap() that results from methods 196 // invoked on "mem". 197 198 WrapperMemory mem(size, PROT_READ | PROT_WRITE, fd, 0); 199 ASSERT_TRUE(mem.isValid()); 200 201 auto internalMem = reinterpret_cast<::android::nn::Memory*>(mem.get()); 202 uint8_t *dummy; 203 ASSERT_EQ(internalMem->getPointer(&dummy), ANEURALNETWORKS_NO_ERROR); 204 (*dummy)++; 205 } 206 207 ASSERT_EQ(*buf, (uint8_t)1); 208 ASSERT_EQ(munmap(buf, size), 0); 209 210 close(fd); 211 } 212 213 // Regression test for http://b/69621433 "MemoryFd leaks shared memory regions". 214 TEST_F(MemoryLeakTest, Instantiate) { 215 static const size_t size = 1; 216 int fd = ASharedMemory_create(nullptr, size); 217 ASSERT_GE(fd, 0); 218 WrapperMemory mem(size, PROT_READ | PROT_WRITE, fd, 0); 219 ASSERT_TRUE(mem.isValid()); 220 221 auto internalMem = reinterpret_cast<::android::nn::Memory*>(mem.get()); 222 uint8_t *dummy; 223 ASSERT_EQ(internalMem->getPointer(&dummy), ANEURALNETWORKS_NO_ERROR); 224 225 close(fd); 226 } 227 228 #ifndef NNTEST_ONLY_PUBLIC_API 229 // Regression test for http://b/73663843, conv_2d trying to allocate too much memory. 230 TEST_F(MemoryLeakTest, convTooLarge) { 231 android::nn::DeviceManager::get()->setUseCpuOnly(true); 232 WrapperModel model; 233 234 // This kernel/input size will make convQuant8 allocate 12 * 13 * 13 * 128 * 92 * 92, which is 235 // just outside of signed int range (0x82F56000) - this will fail due to CPU implementation 236 // limitations 237 WrapperOperandType type3(WrapperType::INT32, {}); 238 WrapperOperandType type2(WrapperType::TENSOR_INT32, {128}, 0.25, 0); 239 WrapperOperandType type0(WrapperType::TENSOR_QUANT8_ASYMM, {12, 104, 104, 128}, 0.5, 0); 240 WrapperOperandType type4(WrapperType::TENSOR_QUANT8_ASYMM, {12, 92, 92, 128}, 1.0, 0); 241 WrapperOperandType type1(WrapperType::TENSOR_QUANT8_ASYMM, {128, 13, 13, 128}, 0.5, 0); 242 243 // Operands 244 auto op1 = model.addOperand(&type0); 245 auto op2 = model.addOperand(&type1); 246 auto op3 = model.addOperand(&type2); 247 auto pad0 = model.addOperand(&type3); 248 auto act = model.addOperand(&type3); 249 auto stride = model.addOperand(&type3); 250 auto op4 = model.addOperand(&type4); 251 252 // Operations 253 uint8_t op2_init[128 * 13 * 13 * 128] = {}; 254 model.setOperandValue(op2, op2_init, sizeof(op2_init)); 255 int32_t op3_init[128] = {}; 256 model.setOperandValue(op3, op3_init, sizeof(op3_init)); 257 int32_t pad0_init[] = {0}; 258 model.setOperandValue(pad0, pad0_init, sizeof(pad0_init)); 259 int32_t act_init[] = {0}; 260 model.setOperandValue(act, act_init, sizeof(act_init)); 261 int32_t stride_init[] = {1}; 262 model.setOperandValue(stride, stride_init, sizeof(stride_init)); 263 model.addOperation(ANEURALNETWORKS_CONV_2D, {op1, op2, op3, pad0, pad0, pad0, pad0, stride, stride, act}, {op4}); 264 265 // Inputs and outputs 266 model.identifyInputsAndOutputs({op1}, {op4}); 267 ASSERT_TRUE(model.isValid()); 268 model.finish(); 269 270 // Compilation 271 WrapperCompilation compilation(&model); 272 ASSERT_EQ(WrapperResult::NO_ERROR,compilation.finish()); 273 WrapperExecution execution(&compilation); 274 275 // Set input and outputs 276 static uint8_t input[12 * 104 * 104 * 128] = {}; 277 ASSERT_EQ(WrapperResult::NO_ERROR, execution.setInput(0, input, sizeof(input))); 278 static uint8_t output[12 * 92 * 92 * 128] = {}; 279 ASSERT_EQ(WrapperResult::NO_ERROR, execution.setOutput(0, output, sizeof(output))); 280 281 // This shouldn't segfault 282 WrapperResult r = execution.compute(); 283 284 ASSERT_EQ(WrapperResult::OP_FAILED, r); 285 } 286 #endif // NNTEST_ONLY_PUBLIC_API 287 288 } // end namespace 289