1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_MIRROR_ARRAY_INL_H_ 18 #define ART_RUNTIME_MIRROR_ARRAY_INL_H_ 19 20 #include "array.h" 21 22 #include "class.h" 23 #include "gc/heap-inl.h" 24 #include "thread.h" 25 #include "utils.h" 26 27 namespace art { 28 namespace mirror { 29 30 inline uint32_t Array::ClassSize() { 31 uint32_t vtable_entries = Object::kVTableLength; 32 return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0); 33 } 34 35 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> 36 inline size_t Array::SizeOf() { 37 // This is safe from overflow because the array was already allocated, so we know it's sane. 38 size_t component_size = 39 GetClass<kVerifyFlags, kReadBarrierOption>()->template GetComponentSize<kReadBarrierOption>(); 40 // Don't need to check this since we already check this in GetClass. 41 int32_t component_count = 42 GetLength<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(); 43 size_t header_size = DataOffset(component_size).SizeValue(); 44 size_t data_size = component_count * component_size; 45 return header_size + data_size; 46 } 47 48 template<VerifyObjectFlags kVerifyFlags> 49 inline bool Array::CheckIsValidIndex(int32_t index) { 50 if (UNLIKELY(static_cast<uint32_t>(index) >= 51 static_cast<uint32_t>(GetLength<kVerifyFlags>()))) { 52 ThrowArrayIndexOutOfBoundsException(index); 53 return false; 54 } 55 return true; 56 } 57 58 static inline size_t ComputeArraySize(Thread* self, Class* array_class, int32_t component_count, 59 size_t component_size) 60 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 61 DCHECK(array_class != NULL); 62 DCHECK_GE(component_count, 0); 63 DCHECK(array_class->IsArrayClass()); 64 65 size_t header_size = Array::DataOffset(component_size).SizeValue(); 66 size_t data_size = component_count * component_size; 67 size_t size = header_size + data_size; 68 69 // Check for overflow and throw OutOfMemoryError if this was an unreasonable request. 70 size_t component_shift = sizeof(size_t) * 8 - 1 - CLZ(component_size); 71 if (UNLIKELY(data_size >> component_shift != size_t(component_count) || size < data_size)) { 72 self->ThrowOutOfMemoryError(StringPrintf("%s of length %d would overflow", 73 PrettyDescriptor(array_class).c_str(), 74 component_count).c_str()); 75 return 0; // failure 76 } 77 return size; 78 } 79 80 // Used for setting the array length in the allocation code path to ensure it is guarded by a 81 // StoreStore fence. 82 class SetLengthVisitor { 83 public: 84 explicit SetLengthVisitor(int32_t length) : length_(length) { 85 } 86 87 void operator()(Object* obj, size_t usable_size) const 88 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 89 UNUSED(usable_size); 90 // Avoid AsArray as object is not yet in live bitmap or allocation stack. 91 Array* array = down_cast<Array*>(obj); 92 // DCHECK(array->IsArrayInstance()); 93 array->SetLength(length_); 94 } 95 96 private: 97 const int32_t length_; 98 99 DISALLOW_COPY_AND_ASSIGN(SetLengthVisitor); 100 }; 101 102 // Similar to SetLengthVisitor, used for setting the array length to fill the usable size of an 103 // array. 104 class SetLengthToUsableSizeVisitor { 105 public: 106 SetLengthToUsableSizeVisitor(int32_t min_length, size_t header_size, size_t component_size) : 107 minimum_length_(min_length), header_size_(header_size), component_size_(component_size) { 108 } 109 110 void operator()(Object* obj, size_t usable_size) const 111 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 112 // Avoid AsArray as object is not yet in live bitmap or allocation stack. 113 Array* array = down_cast<Array*>(obj); 114 // DCHECK(array->IsArrayInstance()); 115 int32_t length = (usable_size - header_size_) / component_size_; 116 DCHECK_GE(length, minimum_length_); 117 byte* old_end = reinterpret_cast<byte*>(array->GetRawData(component_size_, minimum_length_)); 118 byte* new_end = reinterpret_cast<byte*>(array->GetRawData(component_size_, length)); 119 // Ensure space beyond original allocation is zeroed. 120 memset(old_end, 0, new_end - old_end); 121 array->SetLength(length); 122 } 123 124 private: 125 const int32_t minimum_length_; 126 const size_t header_size_; 127 const size_t component_size_; 128 129 DISALLOW_COPY_AND_ASSIGN(SetLengthToUsableSizeVisitor); 130 }; 131 132 template <bool kIsInstrumented> 133 inline Array* Array::Alloc(Thread* self, Class* array_class, int32_t component_count, 134 size_t component_size, gc::AllocatorType allocator_type, 135 bool fill_usable) { 136 DCHECK(allocator_type != gc::kAllocatorTypeLOS); 137 size_t size = ComputeArraySize(self, array_class, component_count, component_size); 138 if (UNLIKELY(size == 0)) { 139 return nullptr; 140 } 141 gc::Heap* heap = Runtime::Current()->GetHeap(); 142 Array* result; 143 if (!fill_usable) { 144 SetLengthVisitor visitor(component_count); 145 result = down_cast<Array*>( 146 heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size, 147 allocator_type, visitor)); 148 } else { 149 SetLengthToUsableSizeVisitor visitor(component_count, DataOffset(component_size).SizeValue(), 150 component_size); 151 result = down_cast<Array*>( 152 heap->AllocObjectWithAllocator<kIsInstrumented, true>(self, array_class, size, 153 allocator_type, visitor)); 154 } 155 if (kIsDebugBuild && result != nullptr && Runtime::Current()->IsStarted()) { 156 array_class = result->GetClass(); // In case the array class moved. 157 CHECK_EQ(array_class->GetComponentSize(), component_size); 158 if (!fill_usable) { 159 CHECK_EQ(result->SizeOf(), size); 160 } else { 161 CHECK_GE(result->SizeOf(), size); 162 } 163 } 164 return result; 165 } 166 167 template<class T> 168 inline void PrimitiveArray<T>::VisitRoots(RootCallback* callback, void* arg) { 169 array_class_.VisitRootIfNonNull(callback, arg, RootInfo(kRootStickyClass)); 170 } 171 172 template<typename T> 173 inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length) { 174 Array* raw_array = Array::Alloc<true>(self, GetArrayClass(), length, sizeof(T), 175 Runtime::Current()->GetHeap()->GetCurrentAllocator()); 176 return down_cast<PrimitiveArray<T>*>(raw_array); 177 } 178 179 template<typename T> 180 inline T PrimitiveArray<T>::Get(int32_t i) { 181 if (!CheckIsValidIndex(i)) { 182 DCHECK(Thread::Current()->IsExceptionPending()); 183 return T(0); 184 } 185 return GetWithoutChecks(i); 186 } 187 188 template<typename T> 189 inline void PrimitiveArray<T>::Set(int32_t i, T value) { 190 if (Runtime::Current()->IsActiveTransaction()) { 191 Set<true>(i, value); 192 } else { 193 Set<false>(i, value); 194 } 195 } 196 197 template<typename T> 198 template<bool kTransactionActive, bool kCheckTransaction> 199 inline void PrimitiveArray<T>::Set(int32_t i, T value) { 200 if (CheckIsValidIndex(i)) { 201 SetWithoutChecks<kTransactionActive, kCheckTransaction>(i, value); 202 } else { 203 DCHECK(Thread::Current()->IsExceptionPending()); 204 } 205 } 206 207 template<typename T> 208 template<bool kTransactionActive, bool kCheckTransaction> 209 inline void PrimitiveArray<T>::SetWithoutChecks(int32_t i, T value) { 210 if (kCheckTransaction) { 211 DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction()); 212 } 213 if (kTransactionActive) { 214 Runtime::Current()->RecordWriteArray(this, i, GetWithoutChecks(i)); 215 } 216 DCHECK(CheckIsValidIndex(i)); 217 GetData()[i] = value; 218 } 219 // Backward copy where elements are of aligned appropriately for T. Count is in T sized units. 220 // Copies are guaranteed not to tear when the sizeof T is less-than 64bit. 221 template<typename T> 222 static inline void ArrayBackwardCopy(T* d, const T* s, int32_t count) { 223 d += count; 224 s += count; 225 for (int32_t i = 0; i < count; ++i) { 226 d--; 227 s--; 228 *d = *s; 229 } 230 } 231 232 // Forward copy where elements are of aligned appropriately for T. Count is in T sized units. 233 // Copies are guaranteed not to tear when the sizeof T is less-than 64bit. 234 template<typename T> 235 static inline void ArrayForwardCopy(T* d, const T* s, int32_t count) { 236 for (int32_t i = 0; i < count; ++i) { 237 *d = *s; 238 d++; 239 s++; 240 } 241 } 242 243 template<class T> 244 inline void PrimitiveArray<T>::Memmove(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, 245 int32_t count) { 246 if (UNLIKELY(count == 0)) { 247 return; 248 } 249 DCHECK_GE(dst_pos, 0); 250 DCHECK_GE(src_pos, 0); 251 DCHECK_GT(count, 0); 252 DCHECK(src != nullptr); 253 DCHECK_LT(dst_pos, GetLength()); 254 DCHECK_LE(dst_pos, GetLength() - count); 255 DCHECK_LT(src_pos, src->GetLength()); 256 DCHECK_LE(src_pos, src->GetLength() - count); 257 258 // Note for non-byte copies we can't rely on standard libc functions like memcpy(3) and memmove(3) 259 // in our implementation, because they may copy byte-by-byte. 260 if (LIKELY(src != this)) { 261 // Memcpy ok for guaranteed non-overlapping distinct arrays. 262 Memcpy(dst_pos, src, src_pos, count); 263 } else { 264 // Handle copies within the same array using the appropriate direction copy. 265 void* dst_raw = GetRawData(sizeof(T), dst_pos); 266 const void* src_raw = src->GetRawData(sizeof(T), src_pos); 267 if (sizeof(T) == sizeof(uint8_t)) { 268 uint8_t* d = reinterpret_cast<uint8_t*>(dst_raw); 269 const uint8_t* s = reinterpret_cast<const uint8_t*>(src_raw); 270 memmove(d, s, count); 271 } else { 272 const bool copy_forward = (dst_pos < src_pos) || (dst_pos - src_pos >= count); 273 if (sizeof(T) == sizeof(uint16_t)) { 274 uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw); 275 const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw); 276 if (copy_forward) { 277 ArrayForwardCopy<uint16_t>(d, s, count); 278 } else { 279 ArrayBackwardCopy<uint16_t>(d, s, count); 280 } 281 } else if (sizeof(T) == sizeof(uint32_t)) { 282 uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw); 283 const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw); 284 if (copy_forward) { 285 ArrayForwardCopy<uint32_t>(d, s, count); 286 } else { 287 ArrayBackwardCopy<uint32_t>(d, s, count); 288 } 289 } else { 290 DCHECK_EQ(sizeof(T), sizeof(uint64_t)); 291 uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw); 292 const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw); 293 if (copy_forward) { 294 ArrayForwardCopy<uint64_t>(d, s, count); 295 } else { 296 ArrayBackwardCopy<uint64_t>(d, s, count); 297 } 298 } 299 } 300 } 301 } 302 303 template<class T> 304 inline void PrimitiveArray<T>::Memcpy(int32_t dst_pos, PrimitiveArray<T>* src, int32_t src_pos, 305 int32_t count) { 306 if (UNLIKELY(count == 0)) { 307 return; 308 } 309 DCHECK_GE(dst_pos, 0); 310 DCHECK_GE(src_pos, 0); 311 DCHECK_GT(count, 0); 312 DCHECK(src != nullptr); 313 DCHECK_LT(dst_pos, GetLength()); 314 DCHECK_LE(dst_pos, GetLength() - count); 315 DCHECK_LT(src_pos, src->GetLength()); 316 DCHECK_LE(src_pos, src->GetLength() - count); 317 318 // Note for non-byte copies we can't rely on standard libc functions like memcpy(3) and memmove(3) 319 // in our implementation, because they may copy byte-by-byte. 320 void* dst_raw = GetRawData(sizeof(T), dst_pos); 321 const void* src_raw = src->GetRawData(sizeof(T), src_pos); 322 if (sizeof(T) == sizeof(uint8_t)) { 323 memcpy(dst_raw, src_raw, count); 324 } else if (sizeof(T) == sizeof(uint16_t)) { 325 uint16_t* d = reinterpret_cast<uint16_t*>(dst_raw); 326 const uint16_t* s = reinterpret_cast<const uint16_t*>(src_raw); 327 ArrayForwardCopy<uint16_t>(d, s, count); 328 } else if (sizeof(T) == sizeof(uint32_t)) { 329 uint32_t* d = reinterpret_cast<uint32_t*>(dst_raw); 330 const uint32_t* s = reinterpret_cast<const uint32_t*>(src_raw); 331 ArrayForwardCopy<uint32_t>(d, s, count); 332 } else { 333 DCHECK_EQ(sizeof(T), sizeof(uint64_t)); 334 uint64_t* d = reinterpret_cast<uint64_t*>(dst_raw); 335 const uint64_t* s = reinterpret_cast<const uint64_t*>(src_raw); 336 ArrayForwardCopy<uint64_t>(d, s, count); 337 } 338 } 339 340 } // namespace mirror 341 } // namespace art 342 343 #endif // ART_RUNTIME_MIRROR_ARRAY_INL_H_ 344