Lines Matching refs:HeapNumber
546 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
547 __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
559 HeapNumber::kExponentBias << HeapNumber::kExponentShift;
569 // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
572 __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
576 Operand(mantissa, LSL, HeapNumber::kExponentShift));
582 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
586 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
635 HeapNumber::kExponentShift, HeapNumber::kExponentBits);
638 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
639 __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
662 0, HeapNumber::kMantissaBitsInTopWord);
665 Operand(1 << HeapNumber::kMantissaBitsInTopWord));
711 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
717 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
720 __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
727 ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
728 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
731 HeapNumber::kExponentOffset));
734 HeapNumber::kMantissaOffset));
742 non_smi_exponent += 1 << HeapNumber::kExponentShift;
743 __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
744 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
746 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
818 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
820 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
826 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
828 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
883 // Load the double from rhs, tagged HeapNumber r0, to d6.
884 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
908 // Load the double from lhs, tagged HeapNumber r1, to d7.
909 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
977 __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag);
978 __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag);
1077 // be strictly equal if the other is a HeapNumber.
1261 // Check if input is a HeapNumber.
1267 // Input is a HeapNumber. Load it to a double register and store the
1269 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
1338 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
1367 __ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
1376 __ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
1382 __ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset));
1397 // Allocate an aligned object larger than a HeapNumber.
1398 ASSERT(4 * kPointerSize >= HeapNumber::kSize);
1463 const Register heapnumber = r0;
1488 __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1502 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1508 FieldMemOperand(exponent, HeapNumber::kValueOffset));
1631 heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1633 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1634 ASSERT(heapnumber.is(r0));
4784 __ vldr(d1, r2, HeapNumber::kValueOffset);
4794 __ vldr(d0, r2, HeapNumber::kValueOffset);