Home | History | Annotate | Download | only in mips

Lines Matching refs:And

142 // Push and pop all registers that can hold pointers.
206 // Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
236 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
262 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved)
292 // only set during incremental collection, and then it's also guaranteed that
304 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
336 // Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
363 // catch stores of smis and stores into the young generation.
433 // catch stores of Smis and stores into young gen.
444 // Save caller-saved registers. js_function and code_entry are in the
489 // Store pointer to buffer and increment buffer top.
496 And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
553 // Read the first word and compare to the native_context_map.
598 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
634 And(reg0, reg0, Operand(0x3fffffff));
661 // reg0 - holds the untagged key on entry and holds the hash once computed.
678 // Use reg2 for index calculations and keep the hash intact in reg0.
708 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
711 // Get the value at the masked, scaled index and return.
1053 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
1669 And(shift, shift, 0x3F);
1738 And(shift, shift, 0x3F);
1808 And(shift, shift, 0x3F);
1883 // Move rs to rt and shift it left then right to get the
1884 // desired bitfield on the right side and zeroes on the left.
2030 // and add 2^31 to rs.
2060 // and add 2^31 to rs.
2377 // Tests an FP condition code and then conditionally move rs to rd.
2383 // For testing purposes we need to fetch content of the FCSR register and
2387 // For the MIPS I, II and III architectures, the contents of scratch is
2403 // Tests an FP condition code and then conditionally move rs to rd.
2409 // For testing purposes we need to fetch content of the FCSR register and
2413 // For the MIPS I, II and III architectures, the contents of scratch is
2446 // Left and right hand side are equal, check for -0 vs. +0.
2476 // Left and right hand side are equal, check for -0 vs. +0.
2686 And(except_flag, except_flag, Operand(except_mask));
2699 // Clear cumulative exception flags and save the FCSR.
2705 // Retrieve and restore the FCSR.
2708 // Check for overflow and NaNs.
2709 And(scratch,
2788 And(dst, src, Operand((1 << num_least_bits) - 1));
3651 // overflow cases, so we keep slt and add an intermediate third instruction.
3982 // Buffer growth (and relocation) must be blocked for internal
3983 // references until associated instructions are emitted and
3993 // Buffer growth (and relocation) must be blocked for internal
3995 // until associated instructions are emitted and available to be
4023 // Buffer growth (and relocation) must be blocked for internal
4024 // references until associated instructions are emitted and
4034 // Buffer growth (and relocation) must be blocked for internal
4036 // until associated instructions are emitted and available to be
4061 // Both Drop and Ret need to be conditional.
4189 // Check relative positions of allocation top and limit addresses.
4201 // Set up allocation top address and allocation limit registers.
4209 // Load allocation top into result and allocation limit into alloc_limit.
4226 And(result_end, result, Operand(kDoubleAlignmentMask));
4238 // Calculate new top and bail out if new space is exhausted. Use result
4268 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
4274 // Check relative positions of allocation top and limit addresses.
4285 // Set up allocation top address and allocation limit registers.
4292 // Load allocation top into result and allocation limit into alloc_limit.
4309 And(result_end, result, Operand(kDoubleAlignmentMask));
4321 // Calculate new top and bail out if new space is exhausted. Use result
4334 And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
4362 // Set up allocation top address and allocation limit registers.
4373 And(result_end, result, Operand(kDoubleAlignmentMask));
4393 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
4402 // Set up allocation top address and allocation limit registers.
4412 And(result_end, result, Operand(kDoubleAlignmentMask));
4421 // Calculate new top and bail out if new space is exhausted. Use result
4448 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4454 // Set the map, length and hash field.
4472 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
4478 // Set the map, length and hash field.
4544 And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4552 // Allocates a heap number or jumps to the label if the young space is full and
4560 // Allocate an object in the heap for the heap number and tag it as a heap
4618 And(scratch, src, kPointerSize - 1);
4630 And(scratch, src, kPointerSize - 1);
4797 And(scratch1, scratch1,
4800 And(scratch2, scratch2, Operand(kSingleNaNMask));
4832 And(scratch1, scratch1,
4835 And(scratch2, scratch2, Operand(kDoubleNaNMask));
5035 // Restore caller's frame pointer and return address now as they will be
5041 // callee arguments corruption (source and destination areas could overlap).
5043 // Both src_reg and dst_reg are pointing to the word after the one to copy,
5070 // Check whether the expected and actual arguments count match. If not,
5077 // up actual and expected registers according to the contract if values are
5092 // like we have a match between expected and actual number of
5250 // Get the function and setup the context.
5274 And(scratch, scratch, Operand(kIsNotStringMask));
5312 // If the prototype or initial map is the hole, don't return it and
5371 // cached in the hash field and the number of bits reserved for it does not
5390 // Remove smi tag and convert to double.
5397 // Check for heap number and load double value from it.
5408 And(exponent, exponent, mask_reg);
5628 // should remove this need and make the runtime routine entry code
5854 // The following three instructions must remain together and unmodified
5860 // Load the stub address to t9 and call it,
5941 // Save registers and reserve room for saved entry sp and code object.
5958 // Save the frame pointer and the context in top.
5970 And(sp, sp, Operand(-frame_alignment)); // Align stack.
5981 // Reserve place for the return address, stack space and an optional slot
5983 // returned) and align the frame preparing for calling the runtime function.
5988 And(sp, sp, Operand(-frame_alignment)); // Align stack.
6015 // Restore current context from top and clear it in debug mode.
6025 // Pop the arguments, restore registers, and return.
6315 // Test that both first and second are sequential one-byte strings.
6334 And(scratch1, first, Operand(second));
6363 And(scratch, type, Operand(kFlatOneByteStringMask));
6433 // Make stack end at alignment and make room for num_arguments - 4 words
6434 // and the original value of sp.
6438 And(sp, sp, Operand(-frame_alignment));
6496 And(at, sp, Operand(frame_alignment_mask));
6537 And(scratch, object, Operand(~Page::kPageAlignmentMask));
6539 And(scratch, scratch, Operand(mask));
6566 And(t8, t9, Operand(mask_scratch));
6571 And(t8, t9, Operand(mask_scratch));
6577 And(t9, t9, Operand(1));
6587 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
6609 // Since both black and grey have a 1 in the first position and white does
6612 And(t8, mask_scratch, load_scratch);
6632 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
6728 // In 0-255 range, round and truncate.
6753 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
6756 // object sits on the page boundary as no memento can follow and we cannot
6760 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));