1 // Copyright 2015 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "src/runtime/runtime-utils.h" 6 7 #include "src/arguments.h" 8 #include "src/base/macros.h" 9 #include "src/base/platform/mutex.h" 10 #include "src/conversions-inl.h" 11 #include "src/factory.h" 12 13 // Implement Atomic accesses to SharedArrayBuffers as defined in the 14 // SharedArrayBuffer draft spec, found here 15 // https://github.com/lars-t-hansen/ecmascript_sharedmem 16 17 namespace v8 { 18 namespace internal { 19 20 namespace { 21 22 inline bool AtomicIsLockFree(uint32_t size) { 23 return size == 1 || size == 2 || size == 4; 24 } 25 26 #if V8_CC_GNU 27 28 template <typename T> 29 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { 30 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, 31 __ATOMIC_SEQ_CST); 32 return oldval; 33 } 34 35 template <typename T> 36 inline T LoadSeqCst(T* p) { 37 T result; 38 __atomic_load(p, &result, __ATOMIC_SEQ_CST); 39 return result; 40 } 41 42 template <typename T> 43 inline void StoreSeqCst(T* p, T value) { 44 __atomic_store_n(p, value, __ATOMIC_SEQ_CST); 45 } 46 47 template <typename T> 48 inline T AddSeqCst(T* p, T value) { 49 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); 50 } 51 52 template <typename T> 53 inline T SubSeqCst(T* p, T value) { 54 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); 55 } 56 57 template <typename T> 58 inline T AndSeqCst(T* p, T value) { 59 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); 60 } 61 62 template <typename T> 63 inline T OrSeqCst(T* p, T value) { 64 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); 65 } 66 67 template <typename T> 68 inline T XorSeqCst(T* p, T value) { 69 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); 70 } 71 72 template <typename T> 73 inline T ExchangeSeqCst(T* p, T value) { 74 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); 75 } 76 77 #elif V8_CC_MSVC 78 79 #define InterlockedCompareExchange32 _InterlockedCompareExchange 80 #define InterlockedExchange32 _InterlockedExchange 81 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd 82 #define InterlockedAnd32 _InterlockedAnd 83 #define InterlockedOr32 _InterlockedOr 84 #define InterlockedXor32 _InterlockedXor 85 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 86 #define InterlockedCompareExchange8 _InterlockedCompareExchange8 87 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 88 89 #define ATOMIC_OPS(type, suffix, vctype) \ 90 inline type AddSeqCst(type* p, type value) { \ 91 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ 92 bit_cast<vctype>(value)); \ 93 } \ 94 inline type SubSeqCst(type* p, type value) { \ 95 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ 96 -bit_cast<vctype>(value)); \ 97 } \ 98 inline type AndSeqCst(type* p, type value) { \ 99 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ 100 bit_cast<vctype>(value)); \ 101 } \ 102 inline type OrSeqCst(type* p, type value) { \ 103 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ 104 bit_cast<vctype>(value)); \ 105 } \ 106 inline type XorSeqCst(type* p, type value) { \ 107 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ 108 bit_cast<vctype>(value)); \ 109 } \ 110 inline type ExchangeSeqCst(type* p, type value) { \ 111 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ 112 bit_cast<vctype>(value)); \ 113 } \ 114 \ 115 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ 116 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ 117 bit_cast<vctype>(newval), \ 118 bit_cast<vctype>(oldval)); \ 119 } \ 120 inline type LoadSeqCst(type* p) { return *p; } \ 121 inline void StoreSeqCst(type* p, type value) { \ 122 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ 123 bit_cast<vctype>(value)); \ 124 } 125 126 ATOMIC_OPS(int8_t, 8, char) 127 ATOMIC_OPS(uint8_t, 8, char) 128 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ 129 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ 130 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ 131 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ 132 133 #undef ATOMIC_OPS_INTEGER 134 #undef ATOMIC_OPS 135 136 #undef InterlockedCompareExchange32 137 #undef InterlockedExchange32 138 #undef InterlockedExchangeAdd32 139 #undef InterlockedAnd32 140 #undef InterlockedOr32 141 #undef InterlockedXor32 142 #undef InterlockedExchangeAdd16 143 #undef InterlockedCompareExchange8 144 #undef InterlockedExchangeAdd8 145 146 #else 147 148 #error Unsupported platform! 149 150 #endif 151 152 template <typename T> 153 T FromObject(Handle<Object> number); 154 155 template <> 156 inline uint8_t FromObject<uint8_t>(Handle<Object> number) { 157 return NumberToUint32(*number); 158 } 159 160 template <> 161 inline int8_t FromObject<int8_t>(Handle<Object> number) { 162 return NumberToInt32(*number); 163 } 164 165 template <> 166 inline uint16_t FromObject<uint16_t>(Handle<Object> number) { 167 return NumberToUint32(*number); 168 } 169 170 template <> 171 inline int16_t FromObject<int16_t>(Handle<Object> number) { 172 return NumberToInt32(*number); 173 } 174 175 template <> 176 inline uint32_t FromObject<uint32_t>(Handle<Object> number) { 177 return NumberToUint32(*number); 178 } 179 180 template <> 181 inline int32_t FromObject<int32_t>(Handle<Object> number) { 182 return NumberToInt32(*number); 183 } 184 185 186 inline Object* ToObject(Isolate* isolate, int8_t t) { return Smi::FromInt(t); } 187 188 inline Object* ToObject(Isolate* isolate, uint8_t t) { return Smi::FromInt(t); } 189 190 inline Object* ToObject(Isolate* isolate, int16_t t) { return Smi::FromInt(t); } 191 192 inline Object* ToObject(Isolate* isolate, uint16_t t) { 193 return Smi::FromInt(t); 194 } 195 196 197 inline Object* ToObject(Isolate* isolate, int32_t t) { 198 return *isolate->factory()->NewNumber(t); 199 } 200 201 202 inline Object* ToObject(Isolate* isolate, uint32_t t) { 203 return *isolate->factory()->NewNumber(t); 204 } 205 206 207 template <typename T> 208 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, 209 Handle<Object> oldobj, Handle<Object> newobj) { 210 T oldval = FromObject<T>(oldobj); 211 T newval = FromObject<T>(newobj); 212 T result = 213 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); 214 return ToObject(isolate, result); 215 } 216 217 218 template <typename T> 219 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) { 220 T result = LoadSeqCst(static_cast<T*>(buffer) + index); 221 return ToObject(isolate, result); 222 } 223 224 225 template <typename T> 226 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index, 227 Handle<Object> obj) { 228 T value = FromObject<T>(obj); 229 StoreSeqCst(static_cast<T*>(buffer) + index, value); 230 return *obj; 231 } 232 233 234 template <typename T> 235 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, 236 Handle<Object> obj) { 237 T value = FromObject<T>(obj); 238 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); 239 return ToObject(isolate, result); 240 } 241 242 243 template <typename T> 244 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, 245 Handle<Object> obj) { 246 T value = FromObject<T>(obj); 247 T result = SubSeqCst(static_cast<T*>(buffer) + index, value); 248 return ToObject(isolate, result); 249 } 250 251 252 template <typename T> 253 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, 254 Handle<Object> obj) { 255 T value = FromObject<T>(obj); 256 T result = AndSeqCst(static_cast<T*>(buffer) + index, value); 257 return ToObject(isolate, result); 258 } 259 260 261 template <typename T> 262 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, 263 Handle<Object> obj) { 264 T value = FromObject<T>(obj); 265 T result = OrSeqCst(static_cast<T*>(buffer) + index, value); 266 return ToObject(isolate, result); 267 } 268 269 270 template <typename T> 271 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, 272 Handle<Object> obj) { 273 T value = FromObject<T>(obj); 274 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); 275 return ToObject(isolate, result); 276 } 277 278 279 template <typename T> 280 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, 281 Handle<Object> obj) { 282 T value = FromObject<T>(obj); 283 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); 284 return ToObject(isolate, result); 285 } 286 287 288 // Uint8Clamped functions 289 290 uint8_t ClampToUint8(int32_t value) { 291 if (value < 0) return 0; 292 if (value > 255) return 255; 293 return value; 294 } 295 296 297 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, 298 size_t index, 299 Handle<Object> oldobj, 300 Handle<Object> newobj) { 301 typedef int32_t convert_type; 302 uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj)); 303 uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj)); 304 uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index, 305 oldval, newval); 306 return ToObject(isolate, result); 307 } 308 309 310 inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index, 311 Handle<Object> obj) { 312 typedef int32_t convert_type; 313 uint8_t value = ClampToUint8(FromObject<convert_type>(obj)); 314 StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value); 315 return *obj; 316 } 317 318 319 #define DO_UINT8_CLAMPED_OP(name, op) \ 320 inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \ 321 size_t index, Handle<Object> obj) { \ 322 typedef int32_t convert_type; \ 323 uint8_t* p = static_cast<uint8_t*>(buffer) + index; \ 324 convert_type operand = FromObject<convert_type>(obj); \ 325 uint8_t expected; \ 326 uint8_t result; \ 327 do { \ 328 expected = *p; \ 329 result = ClampToUint8(static_cast<convert_type>(expected) op operand); \ 330 } while (CompareExchangeSeqCst(p, expected, result) != expected); \ 331 return ToObject(isolate, expected); \ 332 } 333 334 DO_UINT8_CLAMPED_OP(Add, +) 335 DO_UINT8_CLAMPED_OP(Sub, -) 336 DO_UINT8_CLAMPED_OP(And, &) 337 DO_UINT8_CLAMPED_OP(Or, | ) 338 DO_UINT8_CLAMPED_OP(Xor, ^) 339 340 #undef DO_UINT8_CLAMPED_OP 341 342 343 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer, 344 size_t index, Handle<Object> obj) { 345 typedef int32_t convert_type; 346 uint8_t* p = static_cast<uint8_t*>(buffer) + index; 347 uint8_t result = ClampToUint8(FromObject<convert_type>(obj)); 348 uint8_t expected; 349 do { 350 expected = *p; 351 } while (CompareExchangeSeqCst(p, expected, result) != expected); 352 return ToObject(isolate, expected); 353 } 354 355 356 } // anonymous namespace 357 358 // Duplicated from objects.h 359 // V has parameters (Type, type, TYPE, C type, element_size) 360 #define INTEGER_TYPED_ARRAYS(V) \ 361 V(Uint8, uint8, UINT8, uint8_t, 1) \ 362 V(Int8, int8, INT8, int8_t, 1) \ 363 V(Uint16, uint16, UINT16, uint16_t, 2) \ 364 V(Int16, int16, INT16, int16_t, 2) \ 365 V(Uint32, uint32, UINT32, uint32_t, 4) \ 366 V(Int32, int32, INT32, int32_t, 4) 367 368 369 RUNTIME_FUNCTION(Runtime_AtomicsCompareExchange) { 370 HandleScope scope(isolate); 371 DCHECK(args.length() == 4); 372 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 373 CONVERT_SIZE_ARG_CHECKED(index, 1); 374 CONVERT_NUMBER_ARG_HANDLE_CHECKED(oldobj, 2); 375 CONVERT_NUMBER_ARG_HANDLE_CHECKED(newobj, 3); 376 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 377 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 378 379 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 380 NumberToSize(isolate, sta->byte_offset()); 381 382 switch (sta->type()) { 383 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 384 case kExternal##Type##Array: \ 385 return DoCompareExchange<ctype>(isolate, source, index, oldobj, newobj); 386 387 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 388 #undef TYPED_ARRAY_CASE 389 390 case kExternalUint8ClampedArray: 391 return DoCompareExchangeUint8Clamped(isolate, source, index, oldobj, 392 newobj); 393 394 default: 395 break; 396 } 397 398 UNREACHABLE(); 399 return isolate->heap()->undefined_value(); 400 } 401 402 403 RUNTIME_FUNCTION(Runtime_AtomicsLoad) { 404 HandleScope scope(isolate); 405 DCHECK(args.length() == 2); 406 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 407 CONVERT_SIZE_ARG_CHECKED(index, 1); 408 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 409 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 410 411 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 412 NumberToSize(isolate, sta->byte_offset()); 413 414 switch (sta->type()) { 415 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 416 case kExternal##Type##Array: \ 417 return DoLoad<ctype>(isolate, source, index); 418 419 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 420 #undef TYPED_ARRAY_CASE 421 422 case kExternalUint8ClampedArray: 423 return DoLoad<uint8_t>(isolate, source, index); 424 425 default: 426 break; 427 } 428 429 UNREACHABLE(); 430 return isolate->heap()->undefined_value(); 431 } 432 433 434 RUNTIME_FUNCTION(Runtime_AtomicsStore) { 435 HandleScope scope(isolate); 436 DCHECK(args.length() == 3); 437 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 438 CONVERT_SIZE_ARG_CHECKED(index, 1); 439 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 440 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 441 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 442 443 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 444 NumberToSize(isolate, sta->byte_offset()); 445 446 switch (sta->type()) { 447 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 448 case kExternal##Type##Array: \ 449 return DoStore<ctype>(isolate, source, index, value); 450 451 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 452 #undef TYPED_ARRAY_CASE 453 454 case kExternalUint8ClampedArray: 455 return DoStoreUint8Clamped(isolate, source, index, value); 456 457 default: 458 break; 459 } 460 461 UNREACHABLE(); 462 return isolate->heap()->undefined_value(); 463 } 464 465 466 RUNTIME_FUNCTION(Runtime_AtomicsAdd) { 467 HandleScope scope(isolate); 468 DCHECK(args.length() == 3); 469 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 470 CONVERT_SIZE_ARG_CHECKED(index, 1); 471 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 472 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 473 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 474 475 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 476 NumberToSize(isolate, sta->byte_offset()); 477 478 switch (sta->type()) { 479 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 480 case kExternal##Type##Array: \ 481 return DoAdd<ctype>(isolate, source, index, value); 482 483 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 484 #undef TYPED_ARRAY_CASE 485 486 case kExternalUint8ClampedArray: 487 return DoAddUint8Clamped(isolate, source, index, value); 488 489 default: 490 break; 491 } 492 493 UNREACHABLE(); 494 return isolate->heap()->undefined_value(); 495 } 496 497 498 RUNTIME_FUNCTION(Runtime_AtomicsSub) { 499 HandleScope scope(isolate); 500 DCHECK(args.length() == 3); 501 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 502 CONVERT_SIZE_ARG_CHECKED(index, 1); 503 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 504 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 505 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 506 507 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 508 NumberToSize(isolate, sta->byte_offset()); 509 510 switch (sta->type()) { 511 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 512 case kExternal##Type##Array: \ 513 return DoSub<ctype>(isolate, source, index, value); 514 515 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 516 #undef TYPED_ARRAY_CASE 517 518 case kExternalUint8ClampedArray: 519 return DoSubUint8Clamped(isolate, source, index, value); 520 521 default: 522 break; 523 } 524 525 UNREACHABLE(); 526 return isolate->heap()->undefined_value(); 527 } 528 529 530 RUNTIME_FUNCTION(Runtime_AtomicsAnd) { 531 HandleScope scope(isolate); 532 DCHECK(args.length() == 3); 533 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 534 CONVERT_SIZE_ARG_CHECKED(index, 1); 535 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 536 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 537 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 538 539 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 540 NumberToSize(isolate, sta->byte_offset()); 541 542 switch (sta->type()) { 543 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 544 case kExternal##Type##Array: \ 545 return DoAnd<ctype>(isolate, source, index, value); 546 547 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 548 #undef TYPED_ARRAY_CASE 549 550 case kExternalUint8ClampedArray: 551 return DoAndUint8Clamped(isolate, source, index, value); 552 553 default: 554 break; 555 } 556 557 UNREACHABLE(); 558 return isolate->heap()->undefined_value(); 559 } 560 561 562 RUNTIME_FUNCTION(Runtime_AtomicsOr) { 563 HandleScope scope(isolate); 564 DCHECK(args.length() == 3); 565 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 566 CONVERT_SIZE_ARG_CHECKED(index, 1); 567 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 568 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 569 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 570 571 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 572 NumberToSize(isolate, sta->byte_offset()); 573 574 switch (sta->type()) { 575 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 576 case kExternal##Type##Array: \ 577 return DoOr<ctype>(isolate, source, index, value); 578 579 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 580 #undef TYPED_ARRAY_CASE 581 582 case kExternalUint8ClampedArray: 583 return DoOrUint8Clamped(isolate, source, index, value); 584 585 default: 586 break; 587 } 588 589 UNREACHABLE(); 590 return isolate->heap()->undefined_value(); 591 } 592 593 594 RUNTIME_FUNCTION(Runtime_AtomicsXor) { 595 HandleScope scope(isolate); 596 DCHECK(args.length() == 3); 597 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 598 CONVERT_SIZE_ARG_CHECKED(index, 1); 599 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 600 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 601 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 602 603 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 604 NumberToSize(isolate, sta->byte_offset()); 605 606 switch (sta->type()) { 607 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 608 case kExternal##Type##Array: \ 609 return DoXor<ctype>(isolate, source, index, value); 610 611 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 612 #undef TYPED_ARRAY_CASE 613 614 case kExternalUint8ClampedArray: 615 return DoXorUint8Clamped(isolate, source, index, value); 616 617 default: 618 break; 619 } 620 621 UNREACHABLE(); 622 return isolate->heap()->undefined_value(); 623 } 624 625 626 RUNTIME_FUNCTION(Runtime_AtomicsExchange) { 627 HandleScope scope(isolate); 628 DCHECK(args.length() == 3); 629 CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, sta, 0); 630 CONVERT_SIZE_ARG_CHECKED(index, 1); 631 CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2); 632 RUNTIME_ASSERT(sta->GetBuffer()->is_shared()); 633 RUNTIME_ASSERT(index < NumberToSize(isolate, sta->length())); 634 635 uint8_t* source = static_cast<uint8_t*>(sta->GetBuffer()->backing_store()) + 636 NumberToSize(isolate, sta->byte_offset()); 637 638 switch (sta->type()) { 639 #define TYPED_ARRAY_CASE(Type, typeName, TYPE, ctype, size) \ 640 case kExternal##Type##Array: \ 641 return DoExchange<ctype>(isolate, source, index, value); 642 643 INTEGER_TYPED_ARRAYS(TYPED_ARRAY_CASE) 644 #undef TYPED_ARRAY_CASE 645 646 case kExternalUint8ClampedArray: 647 return DoExchangeUint8Clamped(isolate, source, index, value); 648 649 default: 650 break; 651 } 652 653 UNREACHABLE(); 654 return isolate->heap()->undefined_value(); 655 } 656 657 658 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { 659 HandleScope scope(isolate); 660 DCHECK(args.length() == 1); 661 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); 662 uint32_t usize = NumberToUint32(*size); 663 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); 664 } 665 } // namespace internal 666 } // namespace v8 667