Lines Matching refs:CurOp
736 unsigned CurOp = X86II::getOperandBias(Desc);
758 CurOp += X86::AddrNumOperands;
761 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
764 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
765 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
767 CurOp++;
770 const MCOperand &MO = MI.getOperand(CurOp);
789 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
791 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
793 CurOp++;
796 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
799 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
800 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
802 CurOp++;
818 // CurOp points to start of the MemoryOperand,
820 // CurOp + X86::AddrNumOperands will point to src3.
821 VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
831 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
832 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
834 CurOp++;
838 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
857 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
859 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
861 CurOp++;
864 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
867 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
868 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
870 CurOp++;
874 CurOp++;
876 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
878 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
880 CurOp++;
882 VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
886 assert(RcOperand >= CurOp);
897 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
899 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
901 CurOp++;
904 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
907 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
908 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
910 CurOp++;
913 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
915 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
927 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
928 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
930 CurOp++;
933 EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
935 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
937 if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
1187 unsigned CurOp = X86II::getOperandBias(Desc);
1208 if (MemoryOperand != -1) MemoryOperand += CurOp;
1282 CurOp += 3; // Consume operands.
1295 CurOp += 2; // Consume operands.
1305 ++CurOp; // Consume operand.
1316 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1319 ++CurOp; // skip segment operand
1323 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1326 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 1, FK_Data_1, CurByte,
1331 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1334 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(), 2, FK_Data_2, CurByte,
1339 CurOp++)), CurByte, OS);
1344 SrcRegNum = CurOp + 1;
1352 EmitRegModRMByte(MI.getOperand(CurOp),
1354 CurOp = SrcRegNum + 1;
1359 SrcRegNum = CurOp + X86::AddrNumOperands;
1367 EmitMemModRMByte(MI, CurOp,
1370 CurOp = SrcRegNum + 1;
1375 SrcRegNum = CurOp + 1;
1387 GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
1390 CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
1392 ++CurOp;
1400 unsigned FirstMemOp = CurOp+1;
1416 EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
1418 CurOp += AddrOperands + 1;
1420 ++CurOp;
1430 ++CurOp;
1432 ++CurOp;
1435 EmitRegModRMByte(MI.getOperand(CurOp++),
1447 ++CurOp;
1449 ++CurOp;
1452 EmitMemModRMByte(MI, CurOp, (Form == X86II::MRMXm) ? 0 : Form-X86II::MRM0m,
1454 CurOp += X86::AddrNumOperands;
1538 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1543 : CurOp);
1544 ++CurOp;
1550 if (CurOp != NumOps) {
1551 const MCOperand &MIMM = MI.getOperand(CurOp++);
1561 EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
1572 if (/*!Desc.isVariadic() &&*/ CurOp != NumOps) {