Lines Matching defs:shift
250 static void gen_ubfx(TCGv var, int shift, uint32_t mask)
252 if (shift)
253 tcg_gen_shri_i32(var, var, shift);
258 static void gen_sbfx(TCGv var, int shift, int width)
262 if (shift)
263 tcg_gen_sari_i32(var, var, shift);
264 if (shift + width < 32) {
273 static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
276 tcg_gen_shli_i32(val, val, shift);
277 tcg_gen_andi_i32(base, base, ~(mask << shift));
422 static void shifter_out_im(TCGv var, int shift)
425 if (shift == 0) {
428 tcg_gen_shri_i32(tmp, var, shift);
429 if (shift != 31)
436 /* Shift by immediate. Includes special handling for shift == 0. */
437 static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
441 if (shift != 0) {
443 shifter_out_im(var, 32 - shift);
444 tcg_gen_shli_i32(var, var, shift);
448 if (shift == 0) {
456 shifter_out_im(var, shift - 1);
457 tcg_gen_shri_i32(var, var, shift);
461 if (shift == 0)
462 shift = 32;
464 shifter_out_im(var, shift - 1);
465 if (shift == 32)
466 shift = 31;
467 tcg_gen_sari_i32(var, var, shift);
470 if (shift != 0) {
472 shifter_out_im(var, shift - 1);
473 tcg_gen_rotri_i32(var, var, shift); break;
487 TCGv shift, int flags)
491 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
492 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
493 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
494 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
498 case 0: gen_helper_shl(var, var, shift); break;
499 case 1: gen_helper_shr(var, var, shift); break;
500 case 2: gen_helper_sar(var, var, shift); break;
501 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
502 tcg_gen_rotr_i32(var, var, shift); break;
505 tcg_temp_free_i32(shift);
847 int val, rm, shift, shiftop;
858 /* shift/register */
860 shift = (insn >> 7) & 0x1f;
863 gen_arm_shift_im(offset, shiftop, shift, 0);
1036 static inline void gen_vfp_##name(int dp, int shift, int neon) \
1038 TCGv tmp_shift = tcg_const_i32(shift); \
2696 static void gen_neon_dup_u8(TCGv var, int shift)
2699 if (shift)
2700 tcg_gen_shri_i32(var, var, shift);
3915 int shift;
4013 shift = ((insn >> 5) & 3) * 8;
4017 shift = ((insn >> 6) & 1) * 16;
4021 shift = 0;
4080 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
4086 if (shift)
4087 tcg_gen_shri_i32(tmp, tmp, shift);
4172 static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
4178 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4179 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4184 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4185 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4192 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4193 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
4198 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4199 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4498 int shift;
4606 /* Shift instruction operands are reversed. */
4953 /* Two registers and shift. */
4956 /* 64-bit shift. */
4966 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4967 /* To avoid excessive dumplication of ops we implement shift
4968 by immediate using the variable shift operations. */
4970 /* Shift by immediate:
4978 /* Right shifts are encoded as N - shift, where N is the
4981 shift = shift - (1 << (size + 3));
4989 imm = (uint8_t) shift;
4994 imm = (uint16_t) shift;
4999 imm = shift;
5050 if (shift < -63 || shift > 63) {
5054 mask = 0xffffffffffffffffull >> -shift;
5056 mask = 0xffffffffffffffffull << shift;
5119 mask = 0xff >> -shift;
5121 mask = (uint8_t)(0xff << shift);
5127 mask = 0xffff >> -shift;
5129 mask = (uint16_t)(0xffff << shift);
5133 if (shift < -31 || shift > 31) {
5137 mask = 0xffffffffu >> -shift;
5139 mask = 0xffffffffu << shift;
5155 /* Shift by immediate and narrow:
5161 shift = shift - (1 << (size + 3));
5164 tmp64 = tcg_const_i64(shift);
5194 imm = (uint16_t)shift;
5198 imm = (uint32_t)shift;
5240 if (shift != 0) {
5241 /* The shift is less than the width of the source
5242 type, so we can just shift the whole register. */
5243 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
5244 /* Widen the result of shift: we need to clear
5252 imm = (0xffu >> (8 - shift));
5255 imm = 0xffff >> (16 - shift);
5258 imm = 0xffffffff >> (32 - shift);
5276 * hence this 32-shift where the ARM ARM has 64-imm6.
5278 shift = 32 - shift;
5283 gen_vfp_ulto(0, shift, 1);
5285 gen_vfp_slto(0, shift, 1);
5288 gen_vfp_toul(0, shift, 1);
5290 gen_vfp_tosl(0, shift, 1);
6575 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
6839 shift = ((insn >> 8) & 0xf) * 2;
6840 if (shift)
6841 val = (val >> shift) | (val << (32 - shift));
7010 shift = ((insn >> 8) & 0xf) * 2;
7011 if (shift) {
7012 val = (val >> shift) | (val << (32 - shift));
7016 if (logic_cc && shift) {
7025 shift = (insn >> 7) & 0x1f;
7026 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
7419 shift = (insn >> 7) & 0x1f;
7422 if (shift == 0)
7423 shift = 31;
7424 tcg_gen_sari_i32(tmp2, tmp2, shift);
7429 if (shift)
7430 tcg_gen_shli_i32(tmp2, tmp2, shift);
7440 shift = (insn >> 7) & 0x1f;
7442 if (shift == 0)
7443 shift = 31;
7444 tcg_gen_sari_i32(tmp, tmp, shift);
7446 tcg_gen_shli_i32(tmp, tmp, shift);
7479 shift = (insn >> 10) & 3;
7481 rotate, a shift is sufficient. */
7482 if (shift != 0)
7483 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
7603 shift = (insn >> 7) & 0x1f;
7605 i = i + 1 - shift;
7614 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
7623 shift = (insn >> 7) & 0x1f;
7625 if (shift + i > 32)
7629 gen_ubfx(tmp, shift, (1u << i) - 1);
7631 gen_sbfx(tmp, shift, i);
7931 uint32_t insn, imm, shift, offset;
8212 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
8215 if (shift == 0)
8216 shift = 31;
8217 tcg_gen_sari_i32(tmp2, tmp2, shift);
8222 if (shift)
8223 tcg_gen_shli_i32(tmp2, tmp2, shift);
8231 /* Data processing register constant shift. */
8241 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8244 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
8260 case 0: /* Register controlled shift. */
8274 shift = (insn >> 4) & 3;
8276 rotate, a shift is sufficient. */
8277 if (shift != 0)
8278 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
8302 shift = (insn >> 4) & 7;
8303 if ((op & 3) == 3 || (shift & 3) == 3)
8307 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
8703 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
8713 if (shift + imm > 32)
8716 gen_sbfx(tmp, shift, imm);
8720 if (shift + imm > 32)
8723 gen_ubfx(tmp, shift, (1u << imm) - 1);
8726 if (imm < shift)
8728 imm = imm + 1 - shift;
8731 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
8738 if (shift) {
8740 tcg_gen_sari_i32(tmp, tmp, shift);
8742 tcg_gen_shli_i32(tmp, tmp, shift);
8747 if ((op & 1) && shift == 0)
8753 if ((op & 1) && shift == 0)
8801 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
8803 switch (shift) {
8819 shift = (shift << 1) | (imm >> 7);
8821 imm = imm << (32 - shift);
8915 shift = (insn >> 4) & 0xf;
8916 if (shift > 3) {
8921 if (shift)
8922 tcg_gen_shli_i32(tmp, tmp, shift);
9001 uint32_t val, insn, op, rm, rn, rd, shift, cond;
9053 /* shift immediate */
9055 shift = (insn >> 6) & 0x1f;
9057 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
9160 /* the shift/rotate ops want the operands backwards */
9583 shift = CPSR_A | CPSR_I | CPSR_F;
9585 shift = 0;
9586 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);