1 %default { "naninst":"li rTEMP, -1" } 2 %verify "executed" 3 %verify "basic lt, gt, eq */ 4 %verify "left arg NaN" 5 %verify "right arg NaN" 6 /* 7 * Compare two floating-point values. Puts 0, 1, or -1 into the 8 * destination register based on the results of the comparison. 9 * 10 * Provide a "naninst" instruction that puts 1 or -1 into a1 depending 11 * on what value we'd like to return when one of the operands is NaN. 12 * 13 * See OP_CMPL_FLOAT for an explanation. 14 * 15 * For: cmpl-double, cmpg-double 16 */ 17 /* op vAA, vBB, vCC */ 18 19 FETCH(a0, 1) # a0 <- CCBB 20 and rOBJ, a0, 255 # s0 <- BB 21 srl rBIX, a0, 8 # t0 <- CC 22 EAS2(rOBJ, rFP, rOBJ) # s0 <- &fp[BB] 23 EAS2(rBIX, rFP, rBIX) # t0 <- &fp[CC] 24 #ifdef SOFT_FLOAT 25 LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1 26 LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1 27 JAL(__eqdf2) # cmp <=: C clear if <, Z set if eq 28 li rTEMP, 0 29 beqz v0, ${opcode}_finish 30 31 LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1 32 LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1 33 JAL(__ltdf2) 34 li rTEMP, -1 35 bltz v0, ${opcode}_finish 36 LOAD64(rARG0, rARG1, rOBJ) # a0/a1 <- vBB/vBB+1 37 b ${opcode}_continue 38 #else 39 LOAD64_F(ft0, ft0f, rOBJ) 40 LOAD64_F(ft1, ft1f, rBIX) 41 c.olt.d fcc0, ft0, ft1 42 li rTEMP, -1 43 bc1t fcc0, ${opcode}_finish 44 c.olt.d fcc0, ft1, ft0 45 li rTEMP, 1 46 bc1t fcc0, ${opcode}_finish 47 c.eq.d fcc0, ft0, ft1 48 li rTEMP, 0 49 bc1t fcc0, ${opcode}_finish 50 b ${opcode}_nan 51 #endif 52 %break 53 54 ${opcode}_nan: 55 $naninst 56 b ${opcode}_finish 57 58 #ifdef SOFT_FLOAT 59 ${opcode}_continue: 60 LOAD64(rARG2, rARG3, rBIX) # a2/a3 <- vCC/vCC+1 61 JAL(__gtdf2) # fallthru 62 li rTEMP, 1 # rTEMP = 1 if v0 != 0 63 blez v0, ${opcode}_nan # fall thru for finish 64 #endif 65 66 ${opcode}_finish: 67 GET_OPA(rOBJ) 68 FETCH_ADVANCE_INST(2) # advance rPC, load rINST 69 GET_INST_OPCODE(t0) # extract opcode from rINST 70 SET_VREG_GOTO(rTEMP, rOBJ, t0) # vAA <- rTEMP 71