1 %default { "naninst":"li rTEMP, -1" } 2 %verify "executed" 3 %verify "basic lt, gt, eq */ 4 %verify "left arg NaN" 5 %verify "right arg NaN" 6 /* 7 * Compare two double precision floating-point values. Puts 0, 1, or -1 into the 8 * destination register based on the results of the comparison. 9 * 10 * Provide a "naninst" instruction that puts 1 or -1 into a1 depending 11 * on what value we'd like to return when one of the operands is NaN. 12 * 13 * The operation we're implementing is: 14 * if (x == y) 15 * return 0; 16 * else if (x < y) 17 * return -1; 18 * else if (x > y) 19 * return 1; 20 * else 21 * return {-1,1}; // one or both operands was NaN 22 * 23 * On entry: 24 * a0 = &op1 [vBB] 25 * a1 = &op2 [vCC] 26 * 27 * for: cmpl-double, cmpg-double 28 */ 29 /* op vAA, vBB, vCC */ 30 31 /* "clasic" form */ 32 #ifdef SOFT_FLOAT 33 move rOBJ, a0 # save a0 34 move rBIX, a1 # save a1 35 LOAD64(rARG0, rARG1, rOBJ) # a0/a1<- vBB/vBB+1 36 LOAD64(rARG2, rARG3, rBIX) # a2/a3<- vCC/vCC+1 37 JAL(__eqdf2) # v0<- (vBB == vCC) 38 li rTEMP, 0 # vAA<- 0 39 beqz v0, ${opcode}_finish 40 LOAD64(rARG0, rARG1, rOBJ) # a0/a1<- vBB/vBB+1 41 LOAD64(rARG2, rARG3, rBIX) # a2/a3<- vCC/vCC+1 42 JAL(__ltdf2) # a0<- (vBB < vCC) 43 li rTEMP, -1 # vAA<- -1 44 bltz v0, ${opcode}_finish 45 LOAD64(rARG0, rARG1, rOBJ) # a0/a1<- vBB/vBB+1 46 LOAD64(rARG2, rARG3, rBIX) # a2/a3<- vCC/vCC+1 47 JAL(__gtdf2) # v0<- (vBB > vCC) 48 li rTEMP, 1 # vAA<- 1 49 bgtz v0, ${opcode}_finish 50 #else 51 LOAD64_F(fs0, fs0f, a0) # fs0<- vBB 52 LOAD64_F(fs1, fs1f, a1) # fs1<- vCC 53 c.olt.d fcc0, fs0, fs1 # Is fs0 < fs1 54 li rTEMP, -1 55 bc1t fcc0, ${opcode}_finish 56 c.olt.d fcc0, fs1, fs0 57 li rTEMP, 1 58 bc1t fcc0, ${opcode}_finish 59 c.eq.d fcc0, fs0, fs1 60 li rTEMP, 0 61 bc1t fcc0, ${opcode}_finish 62 #endif 63 64 $naninst 65 66 ${opcode}_finish: 67 move v0, rTEMP # v0<- vAA 68 RETURN 69