1 // Copyright 2015 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "src/arguments-inl.h" 6 #include "src/base/macros.h" 7 #include "src/base/platform/mutex.h" 8 #include "src/conversions-inl.h" 9 #include "src/heap/factory.h" 10 #include "src/objects/js-array-buffer-inl.h" 11 #include "src/runtime/runtime-utils.h" 12 13 // Implement Atomic accesses to SharedArrayBuffers as defined in the 14 // SharedArrayBuffer draft spec, found here 15 // https://github.com/tc39/ecmascript_sharedmem 16 17 namespace v8 { 18 namespace internal { 19 20 namespace { 21 22 #if V8_CC_GNU 23 24 template <typename T> 25 inline T ExchangeSeqCst(T* p, T value) { 26 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); 27 } 28 29 template <typename T> 30 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { 31 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, 32 __ATOMIC_SEQ_CST); 33 return oldval; 34 } 35 36 template <typename T> 37 inline T AddSeqCst(T* p, T value) { 38 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); 39 } 40 41 template <typename T> 42 inline T SubSeqCst(T* p, T value) { 43 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); 44 } 45 46 template <typename T> 47 inline T AndSeqCst(T* p, T value) { 48 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); 49 } 50 51 template <typename T> 52 inline T OrSeqCst(T* p, T value) { 53 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); 54 } 55 56 template <typename T> 57 inline T XorSeqCst(T* p, T value) { 58 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); 59 } 60 61 #elif V8_CC_MSVC 62 63 #define InterlockedExchange32 _InterlockedExchange 64 #define InterlockedCompareExchange32 _InterlockedCompareExchange 65 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 66 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd 67 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 68 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 69 #define InterlockedAnd32 _InterlockedAnd 70 #define InterlockedOr32 _InterlockedOr 71 #define InterlockedXor32 _InterlockedXor 72 73 #define ATOMIC_OPS(type, suffix, vctype) \ 74 inline type ExchangeSeqCst(type* p, type value) { \ 75 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ 76 bit_cast<vctype>(value)); \ 77 } \ 78 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ 79 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ 80 bit_cast<vctype>(newval), \ 81 bit_cast<vctype>(oldval)); \ 82 } \ 83 inline type AddSeqCst(type* p, type value) { \ 84 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ 85 bit_cast<vctype>(value)); \ 86 } \ 87 inline type SubSeqCst(type* p, type value) { \ 88 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ 89 -bit_cast<vctype>(value)); \ 90 } \ 91 inline type AndSeqCst(type* p, type value) { \ 92 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ 93 bit_cast<vctype>(value)); \ 94 } \ 95 inline type OrSeqCst(type* p, type value) { \ 96 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ 97 bit_cast<vctype>(value)); \ 98 } \ 99 inline type XorSeqCst(type* p, type value) { \ 100 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ 101 bit_cast<vctype>(value)); \ 102 } 103 104 ATOMIC_OPS(int8_t, 8, char) 105 ATOMIC_OPS(uint8_t, 8, char) 106 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ 107 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ 108 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ 109 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ 110 111 #undef ATOMIC_OPS 112 113 #undef InterlockedExchange32 114 #undef InterlockedCompareExchange32 115 #undef InterlockedCompareExchange8 116 #undef InterlockedExchangeAdd32 117 #undef InterlockedExchangeAdd16 118 #undef InterlockedExchangeAdd8 119 #undef InterlockedAnd32 120 #undef InterlockedOr32 121 #undef InterlockedXor32 122 123 #else 124 125 #error Unsupported platform! 126 127 #endif 128 129 template <typename T> 130 T FromObject(Handle<Object> number); 131 132 template <> 133 inline uint8_t FromObject<uint8_t>(Handle<Object> number) { 134 return NumberToUint32(*number); 135 } 136 137 template <> 138 inline int8_t FromObject<int8_t>(Handle<Object> number) { 139 return NumberToInt32(*number); 140 } 141 142 template <> 143 inline uint16_t FromObject<uint16_t>(Handle<Object> number) { 144 return NumberToUint32(*number); 145 } 146 147 template <> 148 inline int16_t FromObject<int16_t>(Handle<Object> number) { 149 return NumberToInt32(*number); 150 } 151 152 template <> 153 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { 154 return NumberToUint32(*number); 155 } 156 157 template <> 158 inline int32_t FromObject<int32_t>(Handle<Object> number) { 159 return NumberToInt32(*number); 160 } 161 162 163 inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); } 164 165 inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); } 166 167 inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); } 168 169 inline Object* ToObject(Isolate* isolate, uint16_t t) { 170 return Smi::FromInt(t); 171 } 172 173 inline Object* ToObject(Isolate* isolate, int32_t t) { 174 return *isolate->factory()->NewNumber(t); 175 } 176 177 inline Object* ToObject(Isolate* isolate, uint32_t t) { 178 return *isolate->factory()->NewNumber(t); 179 } 180 181 template <typename T> 182 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, 183 Handle<Object> obj) { 184 T value = FromObject<T>(obj); 185 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); 186 return ToObject(isolate, result); 187 } 188 189 template <typename T> 190 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, 191 Handle<Object> oldobj, Handle<Object> newobj) { 192 T oldval = FromObject<T>(oldobj); 193 T newval = FromObject<T>(newobj); 194 T result = 195 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); 196 return ToObject(isolate, result); 197 } 198 199 template <typename T> 200 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, 201 Handle<Object> obj) { 202 T value = FromObject<T>(obj); 203 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); 204 return ToObject(isolate, result); 205 } 206 207 template <typename T> 208 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, 209 Handle<Object> obj) { 210 T value = FromObject<T>(obj); 211 T result = SubSeqCst(static_cast<T*>(buffer) + index, value); 212 return ToObject(isolate, result); 213 } 214 215 template <typename T> 216 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, 217 Handle<Object> obj) { 218 T value = FromObject<T>(obj); 219 T result = AndSeqCst(static_cast<T*>(buffer) + index, value); 220 return ToObject(isolate, result); 221 } 222 223 template <typename T> 224 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, 225 Handle<Object> obj) { 226 T value = FromObject<T>(obj); 227 T result = OrSeqCst(static_cast<T*>(buffer) + index, value); 228 return ToObject(isolate, result); 229 } 230 231 template <typename T> 232 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, 233 Handle<Object> obj) { 234 T value = FromObject<T>(obj); 235 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); 236 return ToObject(isolate, result); 237 } 238 239 } // anonymous namespace 240 241 // Duplicated from objects.h 242 // V has parameters (Type, type, TYPE, C type) 243 #define INTEGER_TYPED_ARRAYS(V) \ 244 V(Uint8, uint8, UINT8, uint8_t) \ 245 V(Int8, int8, INT8, int8_t) \ 246 V(Uint16, uint16, UINT16, uint16_t) \ 247 V(Int16, int16, INT16, int16_t) \ 248 V(Uint32, uint32, UINT32, uint32_t) \ 249 V(Int32, int32, INT32, int32_t) 250 251 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { 252 HandleScope scope(isolate); 253 DCHECK_EQ(3, args.length()); 254 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 255 CONVERT_SIZE_ARG_CHECKED(index, 1); 256 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 257 CHECK(sta->GetBuffer()->is_shared()); 258 CHECK_LT(index, NumberToSize(sta->length())); 259 260 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 261 NumberToSize(sta->byte_offset()); 262 263 switch (sta->type()) { 264 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 265 case kExternal##Type##Array: \ 266 return DoExchange<ctype>(isolate, source, index, value); 267 268 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 269 #undef TYPED_ARRAY_CASE 270 271 default: 272 break; 273 } 274 275 UNREACHABLE(); 276 } 277 278 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { 279 HandleScope scope(isolate); 280 DCHECK_EQ(4, args.length()); 281 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 282 CONVERT_SIZE_ARG_CHECKED(index, 1); 283 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2); 284 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3); 285 CHECK(sta->GetBuffer()->is_shared()); 286 CHECK_LT(index, NumberToSize(sta->length())); 287 288 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 289 NumberToSize(sta->byte_offset()); 290 291 switch (sta->type()) { 292 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 293 case kExternal##Type##Array: \ 294 return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj); 295 296 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 297 #undef TYPED_ARRAY_CASE 298 299 default: 300 break; 301 } 302 303 UNREACHABLE(); 304 } 305 306 // ES #sec-atomics.add 307 // Atomics.add( typedArray, index, value ) 308 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { 309 HandleScope scope(isolate); 310 DCHECK_EQ(3, args.length()); 311 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 312 CONVERT_SIZE_ARG_CHECKED(index, 1); 313 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 314 CHECK(sta->GetBuffer()->is_shared()); 315 CHECK_LT(index, NumberToSize(sta->length())); 316 317 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 318 NumberToSize(sta->byte_offset()); 319 320 switch (sta->type()) { 321 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 322 case kExternal##Type##Array: \ 323 return DoAdd<ctype>(isolate, source, index, value); 324 325 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 326 #undef TYPED_ARRAY_CASE 327 328 default: 329 break; 330 } 331 332 UNREACHABLE(); 333 } 334 335 // ES #sec-atomics.sub 336 // Atomics.sub( typedArray, index, value ) 337 RUNTIME_FUNCTION(Runtime_AtomicsSub) { 338 HandleScope scope(isolate); 339 DCHECK_EQ(3, args.length()); 340 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 341 CONVERT_SIZE_ARG_CHECKED(index, 1); 342 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 343 CHECK(sta->GetBuffer()->is_shared()); 344 CHECK_LT(index, NumberToSize(sta->length())); 345 346 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 347 NumberToSize(sta->byte_offset()); 348 349 switch (sta->type()) { 350 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 351 case kExternal##Type##Array: \ 352 return DoSub<ctype>(isolate, source, index, value); 353 354 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 355 #undef TYPED_ARRAY_CASE 356 357 default: 358 break; 359 } 360 361 UNREACHABLE(); 362 } 363 364 // ES #sec-atomics.and 365 // Atomics.and( typedArray, index, value ) 366 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { 367 HandleScope scope(isolate); 368 DCHECK_EQ(3, args.length()); 369 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 370 CONVERT_SIZE_ARG_CHECKED(index, 1); 371 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 372 CHECK(sta->GetBuffer()->is_shared()); 373 CHECK_LT(index, NumberToSize(sta->length())); 374 375 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 376 NumberToSize(sta->byte_offset()); 377 378 switch (sta->type()) { 379 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 380 case kExternal##Type##Array: \ 381 return DoAnd<ctype>(isolate, source, index, value); 382 383 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 384 #undef TYPED_ARRAY_CASE 385 386 default: 387 break; 388 } 389 390 UNREACHABLE(); 391 } 392 393 // ES #sec-atomics.or 394 // Atomics.or( typedArray, index, value ) 395 RUNTIME_FUNCTION(Runtime_AtomicsOr) { 396 HandleScope scope(isolate); 397 DCHECK_EQ(3, args.length()); 398 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 399 CONVERT_SIZE_ARG_CHECKED(index, 1); 400 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 401 CHECK(sta->GetBuffer()->is_shared()); 402 CHECK_LT(index, NumberToSize(sta->length())); 403 404 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 405 NumberToSize(sta->byte_offset()); 406 407 switch (sta->type()) { 408 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 409 case kExternal##Type##Array: \ 410 return DoOr<ctype>(isolate, source, index, value); 411 412 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 413 #undef TYPED_ARRAY_CASE 414 415 default: 416 break; 417 } 418 419 UNREACHABLE(); 420 } 421 422 // ES #sec-atomics.xor 423 // Atomics.xor( typedArray, index, value ) 424 RUNTIME_FUNCTION(Runtime_AtomicsXor) { 425 HandleScope scope(isolate); 426 DCHECK_EQ(3, args.length()); 427 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 428 CONVERT_SIZE_ARG_CHECKED(index, 1); 429 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 430 CHECK(sta->GetBuffer()->is_shared()); 431 CHECK_LT(index, NumberToSize(sta->length())); 432 433 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 434 NumberToSize(sta->byte_offset()); 435 436 switch (sta->type()) { 437 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype) \ 438 case kExternal##Type##Array: \ 439 return DoXor<ctype>(isolate, source, index, value); 440 441 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 442 #undef TYPED_ARRAY_CASE 443 444 default: 445 break; 446 } 447 448 UNREACHABLE(); 449 } 450 451 #undef INTEGER_TYPED_ARRAYS 452 453 } // namespace internal 454 } // namespace v8 455