Lines Matching refs:a3
93 __ pop(a3);
122 __ sw(a3, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
129 __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kCodeOffset));
130 __ Addu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag));
133 __ sw(a3, FieldMemOperand(v0, JSFunction::kCodeEntryOffset));
139 __ Push(cp, a3, t0);
158 __ lw(a3, MemOperand(sp, 0));
169 __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
203 __ lw(a3, MemOperand(sp, 0));
219 __ JumpIfNotSmi(a3, &after_sentinel);
222 __ Assert(eq, message, a3, Operand(zero_reg));
224 __ lw(a3, GlobalObjectOperand());
225 __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalContextOffset));
226 __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
231 __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
258 // a3: boilerplate literal array.
282 __ lw(a1, FieldMemOperand(a3, i));
290 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
296 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
310 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
312 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
314 __ Addu(t0, a3, t0);
315 __ lw(a3, MemOperand(t0));
317 __ Branch(&slow_case, eq, a3, Operand(t1));
322 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
358 __ push(a3);
359 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
360 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
362 __ Assert(eq, message, a3, Operand(at));
363 __ pop(a3);
384 // Load boilerplate object into a3 and check if we need to create a
387 __ lw(a3, MemOperand(sp, 3 * kPointerSize));
389 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
391 __ Addu(a3, t0, a3);
392 __ lw(a3, MemOperand(a3));
394 __ Branch(&slow_case, eq, a3, Operand(t0));
399 __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
407 __ lw(a1, FieldMemOperand(a3, i));
531 __ Move(a2, a3, f14);
536 // Write Smi from a0 to a3 and a2 in double format.
538 ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
558 // Load right operand (a0) to f12 or a2/a3.
560 a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
988 // a3: Right value (sign, exponent, top of mantissa).
1000 // function call are prepaired in a0-a3 registers, but function we are
1003 // a0-a3 registers to f12/f14 register pairs.
1005 __ Move(f14, a2, a3);
1034 sign_.is(a3)) {
1039 scratch_.is(a3) &&
1051 WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
1052 WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
1250 // Load lhs to a double in a2, a3.
1251 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1291 ConvertToDoubleStub stub2(a3, a2, t6, t5);
1320 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1387 __ mov(t3, a3); // a3 has MS 32 bits of lhs.
1420 // function call are prepaired in a0-a3 registers, but function we are
1423 // a0-a3 registers to f12/f14 register pairs.
1425 __ Move(f14, a2, a3);
1478 __ GetObjectType(rhs, a3, a3);
1479 __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1482 __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
1488 __ And(t2, a2, Operand(a3));
1500 __ GetObjectType(lhs, a3, a2);
1504 __ Branch(slow, ne, a3, Operand(a2));
1514 __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
1544 __ GetObjectType(rhs, a3, a3);
1545 __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
1546 __ And(at, a3, Operand(kIsSymbolMask));
1556 __ GetObjectType(rhs, a2, a3);
1557 __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1562 __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1564 __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1565 __ and_(a0, a2, a3);
1674 GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, false, &runtime);
1728 // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1734 // and the right hand side if we have FPU. Otherwise a2, a3 represent
1816 __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
1818 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1824 a3,
1831 a3,
1964 __ Move(a3, tos_);
1967 __ Push(a3, a2, a1);
2156 __ AllocateHeapNumber(a1, a2, a3, t2, &slow_allocate_heapnumber);
2169 __ lw(a3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
2171 __ sw(a3, FieldMemOperand(a1, HeapNumber::kMantissaOffset));
2187 __ ConvertToInt32(a0, a1, a2, a3, f0, slow);
2204 __ AllocateHeapNumber(a2, a3, t0, t2, &slow_allocate_heapnumber);
2220 __ ConvertToInt32(v0, a1, a3, t0, f0, &impossible);
2238 WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
2556 // Load left and right operands into f12 and f14 or a0/a1 and a2/a3
2629 __ SmiUntag(a3, left);
2632 // Convert operands to 32-bit integers. Right in a2 and left in a3.
2635 a3,
2655 __ Or(a2, a3, Operand(a2));
2658 __ Xor(a2, a3, Operand(a2));
2661 __ And(a2, a3, Operand(a2));
2666 __ srav(a2, a3, a2);
2671 __ srlv(a2, a3, a2);
2685 __ sllv(a2, a3, a2);
2691 __ Addu(a3, a2, Operand(0x40000000));
2692 __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
2731 // a3 and a0 as scratch. v0 is preserved and returned.
2732 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
2886 a3,
3025 // Convert operands to 32-bit integers. Right in a2 and left in a3. The
3030 a3,
3051 __ Or(a2, a3, Operand(a2));
3054 __ Xor(a2, a3, Operand(a2));
3057 __ And(a2, a3, Operand(a2));
3061 __ srav(a2, a3, a2);
3065 __ srlv(a2, a3, a2);
3090 __ sllv(a2, a3, a2);
3132 // a3 and a0 as scratch. v0 is preserved and returned.
3134 WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
3355 // of the double into a2, a3.
3359 __ Move(a2, a3, f4);
3370 // low and high words into a2, a3.
3372 __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
3375 __ Move(a2, a3, f4);
3379 // a3 = high 32 bits of double value.
3382 __ Xor(a1, a2, a3);
3391 // a3 = high 32 bits of double value.
3429 __ Branch(&calculate, ne, a3, Operand(t1));
3466 __ Push(cache_entry, a2, a3);
3472 __ Pop(cache_entry, a2, a3);
3478 __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
3911 // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
3913 __ lw(a3, MemOperand(a2));
3914 __ Subu(a3, a3, Operand(1));
3915 __ sw(a3, MemOperand(a2));
3948 // If we throw the OOM exception, the value of a3 doesn't matter.
3952 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
3956 __ sw(a3, MemOperand(t0));
4064 // a3: argc
4107 // a3: argc
4174 // a3: argc
4251 Register map = a3; // Map of the object.
4309 // Register mapping: a3 is object map and t0 is function prototype.
4432 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4435 a3,
4444 __ subu(a3, a0, a1);
4445 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4446 __ Addu(a3, fp, Operand(t3));
4447 __ lw(v0, MemOperand(a3, kDisplacement));
4458 __ subu(a3, a0, a1);
4459 __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
4460 __ Addu(a3, a2, Operand(t3));
4461 __ lw(v0, MemOperand(a3, kDisplacement));
4478 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4479 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4486 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4489 __ Addu(a3, a3, Operand(t3));
4490 __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
4491 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4513 __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
4514 __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
4527 __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
4529 __ Addu(a3, a3, Operand(t6));
4530 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4531 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4565 __ AllocateInNewSpace(t5, v0, a3, t0, &runtime, TAG_OBJECT);
4592 __ lw(a3, FieldMemOperand(t0, i));
4593 __ sw(a3, FieldMemOperand(v0, i));
4598 __ lw(a3, MemOperand(sp, 2 * kPointerSize));
4601 __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
4623 // Move backing store address to a3, because it is
4625 __ mov(a3, t0);
4655 __ Addu(a3, t0, Operand(t6));
4656 __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
4660 // a3 = address of backing store (tagged)
4673 __ Addu(t6, a3, t1);
4681 // a3 = address of backing store (tagged)
4685 __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
4686 __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
4699 __ Addu(t1, a3, Operand(t6));
4724 __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
4727 a3,
4739 __ Addu(a3, a2, Operand(at));
4741 __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
4742 __ sw(a3, MemOperand(sp, 1 * kPointerSize));
4759 a3,
4771 __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
4789 __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
4790 __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
4803 __ lw(a3, MemOperand(a2));
4805 __ sw(a3, MemOperand(t0));
4913 __ lw(a3, FieldMemOperand(subject, String::kLengthOffset));
4916 // a3: Length of subject string as a smi
4923 __ Branch(&runtime, ls, a3, Operand(a0));
5018 __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below).
5027 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5037 // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
5088 __ Xor(a3, a3, Operand(1)); // 1 for 2-byte str, 0 for 1-byte.
5095 // Argument 4, a3: End of string data
5098 __ sllv(t1, t0, a3);
5100 __ sllv(t1, a1, a3);
5105 __ sllv(t1, t2, a3);
5106 __ addu(a3, t0, t1);
5213 __ lw(a3, MemOperand(a2, 0));
5216 __ sll(a3, a3, kSmiTagSize); // Convert to Smi.
5217 __ sw(a3, MemOperand(a0, 0));
5279 a3, // Scratch register.
5292 __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
5295 __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
5310 // a3: FixedArray, tagged.
5315 __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
5318 __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
5321 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5325 // a3: Start of elements in FixedArray.
5329 __ addu(t1, t1, a3); // Point past last element to store.
5331 __ Branch(&done, ge, a3, Operand(t1)); // Break when a3 past end of elem.
5332 __ sw(a2, MemOperand(a3));
5334 __ addiu(a3, a3, kPointerSize); // In branch delay slot.
5357 // Load the cache state into a3.
5358 __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
5362 __ Branch(&done, eq, a3, Operand(a1));
5364 __ Branch(&done, eq, a3, Operand(at));
5370 __ Branch(USE_DELAY_SLOT, &done, eq, a3, Operand(at));
5444 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
5458 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
5474 __ GetObjectType(a1, a3, a3);
5475 __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
5489 // a3: object type
5492 __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
5493 __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
5497 __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
6065 __ lw(a3, MemOperand(sp, kFromOffset));
6073 __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
6074 // Both a2 and a3 are untagged integers.
6076 __ Branch(&runtime, lt, a3, Operand(zero_reg)); // From < 0.
6078 __ Branch(&runtime, gt, a3, Operand(a2)); // Fail if from > to.
6079 __ Subu(a2, a2, a3);
6109 __ Addu(v0, v0, Operand(a3));
6110 __ lbu(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6116 masm, a3, t0, a1, t1, t2, t3, t4, &make_two_character_string);
6120 // a3: two characters combined into halfword in little endian byte order.
6123 __ sh(a3, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
6133 // a3: from index (untagged)
6158 __ Addu(a3, a3, t0);
6175 // a3: adjusted start index (untagged)
6193 __ sll(a3, a3, 1);
6195 __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
6204 // a3: adjusted start index (untagged)
6235 __ Addu(t1, t1, a3);
6246 masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
6255 __ sll(t0, a3, 1);
6266 masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
6270 __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
6418 __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
6421 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
6423 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
6463 masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
6468 masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
6486 __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
6490 __ slt(t5, zero_reg, a3); // if (a3 > 0) t5 = 1.
6494 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6502 __ sra(a3, a3, kSmiTagSize);
6508 // a3: length of second string
6515 __ Addu(t2, a2, Operand(a3));
6532 __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
6538 masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
6539 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6551 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6585 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6614 // a3: length of second string
6666 // a3: length of second string
6679 // a3: length of second string.
6684 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
6685 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6695 // a3: length of second string.
6699 StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
6701 __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
6863 Register tmp2 = a3;
6903 Register tmp2 = a3;
7003 __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
7005 __ Branch(&miss, ne, a3, Operand(known_map_));
7159 (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
7233 a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
7272 Register mask = a3;
7366 { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
7370 { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
7371 { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
7373 { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
7374 { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
7376 { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
7377 { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
7381 { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
7382 { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
7634 // -- a3 : element index as smi
7653 __ Push(a1, a3, a0);
7662 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7676 __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
7685 __ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, a2,