Home | History | Annotate | Download | only in priv

Lines Matching refs:op

299    AMD64RMI* op       = LibVEX_Alloc(sizeof(AMD64RMI));
300 op->tag = Armi_Imm;
301 op->Armi.Imm.imm32 = imm32;
302 return op;
305 AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI));
306 op->tag = Armi_Reg;
307 op->Armi.Reg.reg = reg;
308 return op;
311 AMD64RMI* op = LibVEX_Alloc(sizeof(AMD64RMI));
312 op->tag = Armi_Mem;
313 op->Armi.Mem.am = am;
314 return op;
317 static void ppAMD64RMI_wrk ( AMD64RMI* op, Bool lo32 ) {
318 switch (op->tag) {
320 vex_printf("$0x%x", op->Armi.Imm.imm32);
324 ppHRegAMD64_lo32(op->Armi.Reg.reg);
326 ppHRegAMD64(op->Armi.Reg.reg);
329 ppAMD64AMode(op->Armi.Mem.am);
335 void ppAMD64RMI ( AMD64RMI* op ) {
336 ppAMD64RMI_wrk(op, False/*!lo32*/);
338 void ppAMD64RMI_lo32 ( AMD64RMI* op ) {
339 ppAMD64RMI_wrk(op, True/*lo32*/);
345 static void addRegUsage_AMD64RMI ( HRegUsage* u, AMD64RMI* op ) {
346 switch (op->tag) {
350 addHRegUse(u, HRmRead, op->Armi.Reg.reg);
353 addRegUsage_AMD64AMode(u, op->Armi.Mem.am);
360 static void mapRegs_AMD64RMI ( HRegRemap* m, AMD64RMI* op ) {
361 switch (op->tag) {
365 op->Armi.Reg.reg = lookupHRegRemap(m, op->Armi.Reg.reg);
368 mapRegs_AMD64AMode(m, op->Armi.Mem.am);
379 AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI));
380 op->tag = Ari_Imm;
381 op->Ari.Imm.imm32 = imm32;
382 return op;
385 AMD64RI* op = LibVEX_Alloc(sizeof(AMD64RI));
386 op->tag = Ari_Reg;
387 op->Ari.Reg.reg = reg;
388 return op;
391 void ppAMD64RI ( AMD64RI* op ) {
392 switch (op->tag) {
394 vex_printf("$0x%x", op->Ari.Imm.imm32);
397 ppHRegAMD64(op->Ari.Reg.reg);
407 static void addRegUsage_AMD64RI ( HRegUsage* u, AMD64RI* op ) {
408 switch (op->tag) {
412 addHRegUse(u, HRmRead, op->Ari.Reg.reg);
419 static void mapRegs_AMD64RI ( HRegRemap* m, AMD64RI* op ) {
420 switch (op->tag) {
424 op->Ari.Reg.reg = lookupHRegRemap(m, op->Ari.Reg.reg);
435 AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM));
436 op->tag = Arm_Reg;
437 op->Arm.Reg.reg = reg;
438 return op;
441 AMD64RM* op = LibVEX_Alloc(sizeof(AMD64RM));
442 op->tag = Arm_Mem;
443 op->Arm.Mem.am = am;
444 return op;
447 void ppAMD64RM ( AMD64RM* op ) {
448 switch (op->tag) {
450 ppAMD64AMode(op->Arm.Mem.am);
453 ppHRegAMD64(op->Arm.Reg.reg);
463 static void addRegUsage_AMD64RM ( HRegUsage* u, AMD64RM* op, HRegMode mode ) {
464 switch (op->tag) {
468 addRegUsage_AMD64AMode(u, op->Arm.Mem.am);
473 addHRegUse(u, mode, op->Arm.Reg.reg);
480 static void mapRegs_AMD64RM ( HRegRemap* m, AMD64RM* op )
482 switch (op->tag) {
484 mapRegs_AMD64AMode(m, op->Arm.Mem.am);
487 op->Arm.Reg.reg = lookupHRegRemap(m, op->Arm.Reg.reg);
506 HChar* showAMD64UnaryOp ( AMD64UnaryOp op ) {
507 switch (op) {
514 HChar* showAMD64AluOp ( AMD64AluOp op ) {
515 switch (op) {
530 HChar* showAMD64ShiftOp ( AMD64ShiftOp op ) {
531 switch (op) {
539 HChar* showA87FpOp ( A87FpOp op ) {
540 switch (op) {
564 HChar* showAMD64SseOp ( AMD64SseOp op ) {
565 switch (op) {
645 AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) {
648 i->Ain.Alu64R.op = op;
653 AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp op, AMD64RI* src, AMD64AMode* dst ) {
656 i->Ain.Alu64M.op = op;
659 vassert(op != Aalu_MUL);
662 AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp op, UInt src, HReg dst ) {
665 i->Ain.Sh64.op = op;
677 AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst ) {
680 i->Ain.Unary64.op = op;
691 AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp op, AMD64RMI* src, HReg dst ) {
694 i->Ain.Alu32R.op = op;
697 switch (op) {
720 //.. AMD64Instr* AMD64Instr_Sh3232 ( AMD64ShiftOp op, UInt amt, HReg src, HReg dst ) {
723 //.. i->Xin.Sh3232.op = op;
727 //.. vassert(op == Xsh_SHL || op == Xsh_SHR);
845 AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op )
849 i->Ain.A87FpOp.op = op;
867 //.. AMD64Instr* AMD64Instr_FpUnary ( AMD64FpOp op, HReg src, HReg dst ) {
870 //.. i->Xin.FpUnary.op = op;
875 //.. AMD64Instr* AMD64Instr_FpBinary ( AMD64FpOp op, HReg srcL, HReg srcR, HReg dst ) {
878 //.. i->Xin.FpBinary.op = op;
1003 AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp op, HReg src, HReg dst ) {
1006 i->Ain.Sse32Fx4.op = op;
1009 vassert(op != Asse_MOV);
1012 AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp op, HReg src, HReg dst ) {
1015 i->Ain.Sse32FLo.op = op;
1018 vassert(op != Asse_MOV);
1021 AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp op, HReg src, HReg dst ) {
1024 i->Ain.Sse64Fx2.op = op;
1027 vassert(op != Asse_MOV);
1030 AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp op, HReg src, HReg dst ) {
1033 i->Ain.Sse64FLo.op = op;
1036 vassert(op != Asse_MOV);
1039 AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp op, HReg re, HReg rg ) {
1042 i->Ain.SseReRg.op = op;
1075 vex_printf("%sq ", showAMD64AluOp(i->Ain.Alu64R.op));
1081 vex_printf("%sq ", showAMD64AluOp(i->Ain.Alu64M.op));
1087 vex_printf("%sq ", showAMD64ShiftOp(i->Ain.Sh64.op));
1099 vex_printf("%sq ", showAMD64UnaryOp(i->Ain.Unary64.op));
1109 vex_printf("%sl ", showAMD64AluOp(i->Ain.Alu32R.op));
1125 //.. vex_printf("%sdl ", showAMD64ShiftOp(i->Xin.Sh3232.op));
1234 vex_printf("f%s", showA87FpOp(i->Ain.A87FpOp.op));
1245 //.. vex_printf("g%sD ", showAMD64FpOp(i->Xin.FpUnary.op));
1251 //.. vex_printf("g%sD ", showAMD64FpOp(i->Xin.FpBinary.op));
1365 vex_printf("%sps ", showAMD64SseOp(i->Ain.Sse32Fx4.op));
1371 vex_printf("%sss ", showAMD64SseOp(i->Ain.Sse32FLo.op));
1377 vex_printf("%spd ", showAMD64SseOp(i->Ain.Sse64Fx2.op));
1383 vex_printf("%ssd ", showAMD64SseOp(i->Ain.Sse64FLo.op));
1389 vex_printf("%s ", showAMD64SseOp(i->Ain.SseReRg.op));
1425 if (i->Ain.Alu64R.op == Aalu_MOV) {
1429 if (i->Ain.Alu64R.op == Aalu_CMP) {
1455 vassert(i->Ain.Alu32R.op != Aalu_MOV);
1457 if (i->Ain.Alu32R.op == Aalu_CMP) {
1660 vassert(i->Ain.Sse32Fx4.op != Asse_MOV);
1661 unary = toBool( i->Ain.Sse32Fx4.op == Asse_RCPF
1662 || i->Ain.Sse32Fx4.op == Asse_RSQRTF
1663 || i->Ain.Sse32Fx4.op == Asse_SQRTF );
1669 vassert(i->Ain.Sse32FLo.op != Asse_MOV);
1670 unary = toBool( i->Ain.Sse32FLo.op == Asse_RCPF
1671 || i->Ain.Sse32FLo.op == Asse_RSQRTF
1672 || i->Ain.Sse32FLo.op == Asse_SQRTF );
1678 vassert(i->Ain.Sse64Fx2.op != Asse_MOV);
1679 unary = toBool( i->Ain.Sse64Fx2.op == Asse_RCPF
1680 || i->Ain.Sse64Fx2.op == Asse_RSQRTF
1681 || i->Ain.Sse64Fx2.op == Asse_SQRTF );
1687 vassert(i->Ain.Sse64FLo.op != Asse_MOV);
1688 unary = toBool( i->Ain.Sse64FLo.op == Asse_RCPF
1689 || i->Ain.Sse64FLo.op == Asse_RSQRTF
1690 || i->Ain.Sse64FLo.op == Asse_SQRTF );
1696 if ( (i->Ain.SseReRg.op == Asse_XOR
1697 || i->Ain.SseReRg.op == Asse_CMPEQ32)
1706 addHRegUse(u, i->Ain.SseReRg.op == Asse_MOV
1925 if (i->Ain.Alu64R.op != Aalu_MOV)
1935 if (i->Ain.SseReRg.op != Asse_MOV)
2273 //.. /* Emit f<op> %st(0) */
2274 //.. static UChar* do_fop1_st ( UChar* p, AMD64FpOp op )
2276 //.. switch (op) {
2289 //.. default: vpanic("do_fop1_st: unknown op");
2294 //.. /* Emit f<op> %st(i), 1 <= i <= 5 */
2295 //.. static UChar* do_fop2_st ( UChar* p, AMD64FpOp op, Int i )
2299 //.. switch (op) {
2304 //.. default: vpanic("do_fop2_st: unknown op");
2389 if (i->Ain.Alu64R.op == Aalu_MOV) {
2432 if (i->Ain.Alu64R.op == Aalu_MUL) {
2469 switch (i->Ain.Alu64R.op) {
2529 if (i->Ain.Alu64M.op == Aalu_MOV) {
2551 //.. switch (i->Xin.Alu32M.op) {
2581 switch (i->Ain.Sh64.op) {
2610 if (i->Ain.Unary64.op == Aun_NOT) {
2616 if (i->Ain.Unary64.op == Aun_NEG) {
2633 switch (i->Ain.Alu32R.op) {
2760 //.. vassert(i->Xin.Sh3232.op == Xsh_SHL || i->Xin.Sh3232.op == Xsh_SHR);
2764 //.. if (i->Xin.Sh3232.op == Xsh_SHL) {
3110 switch (i->Ain.A87FpOp.op) {
3176 //.. p = do_fop1_st(p, i->Xin.FpUnary.op);
3181 //.. if (i->Xin.FpBinary.op == Xfp_YL2X
3182 //.. || i->Xin.FpBinary.op == Xfp_YL2XP1) {
3191 //.. *p++ = i->Xin.FpBinary.op==Xfp_YL2X ? 0xF1 : 0xF9;
3195 //.. if (i->Xin.FpBinary.op == Xfp_ATAN) {
3207 //.. if (i->Xin.FpBinary.op == Xfp_PREM
3208 //.. || i->Xin.FpBinary.op == Xfp_PREM1
3209 //.. || i->Xin.FpBinary.op == Xfp_SCALE) {
3219 //.. switch (i->Xin.FpBinary.op) {
3236 //.. p = do_fop2_st(p, i->Xin.FpBinary.op,
3476 switch (i->Ain.Sse32Fx4.op) {
3505 switch (i->Ain.Sse64Fx2.op) {
3534 switch (i->Ain.Sse32FLo.op) {
3563 switch (i->Ain.Sse64FLo.op) {
3592 switch (i->Ain.SseReRg.op) {