Home | History | Annotate | Download | only in test

Lines Matching full:x21

387   __ Mov(x21, 0x1234000000000000);
417 ASSERT_EQUAL_64(0x1234000000000000, x21);
503 ASSERT_EQUAL_64(0x7ff80000, x21);
823 // Use x20 to preserve sp. We check for the result via x21 because the
828 __ Mov(x21, sp);
845 ASSERT_EQUAL_64(0x543210, x21);
1098 __ Mneg(x21, x26, x18);
1121 ASSERT_EQUAL_64(0xffffffff00000001, x21);
1179 __ Madd(x21, x17, x17, x18);
1209 ASSERT_EQUAL_64(0x0000000100000000, x21);
1248 __ Msub(x21, x17, x17, x18);
1278 ASSERT_EQUAL_64(0x00000000fffffffe, x21);
1295 __ Mov(x21, 1);
1306 __ Smulh(x1, x21, x24);
1343 __ Mov(x21, 1);
1354 __ Umulh(x1, x21, x24);
1394 __ Mov(x21, 0x0000000200000000);
1399 __ Smaddl(x12, w19, w19, x21);
1403 __ Umaddl(x22, w19, w19, x21);
1433 __ Mov(x21, 0x0000000200000000);
1438 __ Smsubl(x12, w19, w19, x21);
1442 __ Umsubl(x22, w19, w19, x21);
1469 __ Mov(x21, 2);
1485 __ Udiv(x12, x19, x21);
1486 __ Sdiv(x13, x19, x21);
1487 __ Udiv(x14, x20, x21);
1488 __ Sdiv(x15, x20, x21);
1495 __ Udiv(x26, x16, x21);
1496 __ Sdiv(x27, x16, x21);
1497 __ Udiv(x28, x18, x21);
1498 __ Sdiv(x29, x18, x21);
1504 __ Sdiv(x21, x16, x17);
1536 ASSERT_EQUAL_64(0, x21);
2371 __ Mov(x21, src_base + 16);
2381 __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2405 ASSERT_EQUAL_64(src_base + 12, x21);
2429 __ Mov(x21, src_base + 8);
2439 __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2463 ASSERT_EQUAL_64(src_base, x21);
2495 __ Mov(x21, drifted_addr);
2496 __ Str(x0, MemOperand(x21, largeoffset + 8, PreIndex));
2512 ASSERT_EQUAL_64(base_addr + 8, x21);
2616 __ Mov(x21, src_base);
2622 __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2638 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2658 __ Mov(x21, src_base);
2664 __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2680 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2700 __ Mov(x21, src_base);
2706 __ Ldr(b2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2722 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2742 __ Mov(x21, src_base);
2748 __ Ldr(h2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2764 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2776 0x21, 0x43, 0x65, 0x87, 0xa9, 0xcb, 0xed, 0x0f,
2790 __ Mov(x21, src_base);
2796 __ Ldr(q2, MemOperand(x21, 32, PreIndex));
2815 ASSERT_EQUAL_64(src_base + 32, x21);
2971 __ Mov(x21, src_base + 4);
2981 MemOperand(x21, 32, PostIndex));
3010 ASSERT_EQUAL_64(src_base + 4 + 32, x21);
3074 __ Mov(x21, src_base + 4);
3082 MemOperand(x21, 64, PostIndex));
3105 ASSERT_EQUAL_64(src_base + 4 + 64, x21);
3222 __ Mov(x21, src_base + 4);
3228 __ Ld2(v31.V2S(), v0.V2S(), MemOperand(x21, 16, PostIndex));
3247 ASSERT_EQUAL_64(src_base + 4 + 16, x21);
3306 __ Mov(x21, src_base + 4);
3312 __ Ld2(v31.V2D(), v0.V2D(), MemOperand(x21, 32, PostIndex));
3334 ASSERT_EQUAL_64(src_base + 4 + 32, x21);
3434 __ Mov(x21, src_base);
3458 __ Mov(x4, x21);
3461 __ Ld2(v8.B(), v9.B(), 4, MemOperand(x21, x25, PostIndex));
3509 ASSERT_EQUAL_64(src_base + 1, x21);
3662 __ Mov(x21, src_base + 4);
3668 __ Ld3(v31.V2S(), v0.V2S(), v1.V2S(), MemOperand(x21, 24, PostIndex));
3693 ASSERT_EQUAL_64(src_base + 4 + 24, x21);
3757 __ Mov(x21, src_base + 4);
3764 __ Ld3(v31.V2D(), v0.V2D(), v1.V2D(), MemOperand(x21, 48, PostIndex));
3789 ASSERT_EQUAL_64(src_base + 4 + 48, x21);
3897 __ Mov(x21, src_base);
3920 __ Mov(x4, x21);
3924 __ Ld3(v12.B(), v13.B(), v14.B(), 4, MemOperand(x21, x25, PostIndex));
3980 ASSERT_EQUAL_64(src_base + 1, x21);
4152 __ Mov(x21, src_base + 4);
4163 MemOperand(x21, 32, PostIndex));
4194 ASSERT_EQUAL_64(src_base + 4 + 32, x21);
4261 __ Mov(x21, src_base + 4);
4273 MemOperand(x21, 64, PostIndex));
4305 ASSERT_EQUAL_64(src_base + 4 + 64, x21);
4457 __ Mov(x21, src_base);
4462 __ Mov(x4, x21);
4468 MemOperand(x21, x25, PostIndex));
4538 ASSERT_EQUAL_64(src_base + 1, x21);
5017 __ Mov(x21, src_base);
5041 __ Ldr(q4, MemOperand(x21));
5042 __ Ld1(v4.B(), 4, MemOperand(x21, x25, PostIndex));
5072 ASSERT_EQUAL_64(src_base + 1, x21);
5278 __ Mov(x21, -32);
5298 __ Ldr(q21, MemOperand(x17, x21));
5303 __ Ldr(q23, MemOperand(x17, x21));
5389 __ Mov(x21, -64);
5409 __ Ldr(q22, MemOperand(x17, x21));
6123 __ Mov(x21, dst_base - base_offset);
6131 __ Stp(w0, w1, MemOperand(x21, base_offset));
6132 __ Stp(w2, w3, MemOperand(x21, base_offset + 8));
6133 __ Stp(x4, x5, MemOperand(x21, base_offset + 16));
6158 ASSERT_EQUAL_64(dst_base - base_offset, x21);
6358 __ Mov(x21, x16);
6385 ASSERT_EQUAL_64(src_base + 8, x21);
6418 __ Mov(x21, x24);
6447 ASSERT_EQUAL_64(src_base + 8, x21);
6474 __ Mov(x21, x16);
6501 ASSERT_EQUAL_64(src_base + 8, x21);
6534 __ Mov(x21, x24);
6563 ASSERT_EQUAL_64(src_base + base_offset + 8, x21);
6603 __ Mov(x21, dst_base + 40);
6611 __ Strb(w3, MemOperand(x21, -1));
7321 __ Sub(x21, x1, Operand(0x111));
7344 ASSERT_EQUAL_64(0x1000, x21);
7415 __ Sub(x21, x3, Operand(x1, LSL, 8));
7436 ASSERT_EQUAL_64(0xdcba9876543210ff, x21);
7469 __ Add(x21, x0, Operand(x2, SXTW, 3));
7499 ASSERT_EQUAL_64(0x3b2a19080, x21);
8072 __ Adc(x21, x2, Operand(x3, ASR, 4));
8099 ASSERT_EQUAL_64(0x0111111111111110 + 1, x21);
8136 __ Adc(x21, x1, Operand(x2, SXTH, 2));
8157 ASSERT_EQUAL_64(0xffffffffffff37bd + 1, x21);
8244 ASSERT_EQUAL_64(1, x21);
8407 __ Mov(x21, 0x7800000078000000);
8436 __ Cmp(x20, Operand(x21, ROR, 31));
8758 __ Lsl(x21, x0, x6);
8776 ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
8812 __ Lsr(x21, x0, x6);
8830 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
8868 __ Asr(x21, x0, x6);
8886 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
8924 __ Ror(x21, x0, x6);
8942 ASSERT_EQUAL_64(0x789abcdef0123456, x21);
9019 __ Sbfiz(x21, x2, 8, 16);
9046 ASSERT_EQUAL_64(0x0000000000321000, x21);
9089 __ Uxth(x21, x1);
9111 ASSERT_EQUAL_64(0x000000000000cdef, x21);
10000 __ Mov(x21, 0x7ff0000000000001); // Double precision NaN.
10001 __ Fmov(d21, x21);
10118 __ Mov(x21, 0x7ff0000000000001); // Double precision NaN.
10119 __ Fmov(d21, x21);
11257 __ Fcvtas(x21, s21);
11291 ASSERT_EQUAL_64(0x8000000000000000, x21);
11359 __ Fcvtau(x21, s21);
11391 ASSERT_EQUAL_64(0, x21);
11460 x21, s21);
11494 ASSERT_EQUAL_64(0x8000000000000000, x21);
11563 __ Fcvtmu(x21, s21);
11596 ASSERT_EQUAL_64(0, x21);
11666 __ Fcvtns(x21, s21);
11700 ASSERT_EQUAL_64(0x8000000000000000, x21);
11768 __ Fcvtnu(x21, s21);
11800 ASSERT_EQUAL_64(0, x21);
11869 __ Fcvtzs(x21, s21);
11903 ASSERT_EQUAL_64(0x8000000000000000, x21);
11971 __ Fcvtzu(x21, s21);
12004 ASSERT_EQUAL_64(0, x21);
13148 Register reg_index = x21;
14229 __ Mov(x21, x0);
14301 ASSERT_EQUAL_64(13, x21);
17588 __ Mov(x21, x20);
17589 __ Dc(CVAU, x21);
17590 __ Mov(x22, x21);