Home | History | Annotate | Download | only in fpu

Lines Matching refs:aSig

253 | by the denormalized significand `aSig'.  The normalized exponent and
259 normalizeFloat32Subnormal( uint32_t aSig, int16 *zExpPtr, uint32_t *zSigPtr )
263 shiftCount = countLeadingZeros32( aSig ) - 8;
264 *zSigPtr = aSig<<shiftCount;
435 | by the denormalized significand `aSig'. The normalized exponent and
441 normalizeFloat64Subnormal( uint64_t aSig, int16 *zExpPtr, uint64_t *zSigPtr )
445 shiftCount = countLeadingZeros64( aSig ) - 11;
446 *zSigPtr = aSig<<shiftCount;
607 | represented by the denormalized significand `aSig'. The normalized exponent
613 normalizeFloatx80Subnormal( uint64_t aSig, int32 *zExpPtr, uint64_t *zSigPtr )
617 shiftCount = countLeadingZeros64( aSig );
618 *zSigPtr = aSig<<shiftCount;
1370 uint32_t aSig;
1374 aSig = extractFloat32Frac( a );
1377 if ( ( aExp == 0xFF ) && aSig ) aSign = 0;
1378 if ( aExp ) aSig |= 0x00800000;
1380 aSig64 = aSig;
1401 uint32_t aSig;
1405 aSig = extractFloat32Frac( a );
1412 if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) return 0x7FFFFFFF;
1417 if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact;
1420 aSig = ( aSig | 0x00800000 )<<8;
1421 z = aSig>>( - shiftCount );
1422 if ( (uint32_t) ( aSig<<( shiftCount & 31 ) ) ) {
1444 uint32_t aSig;
1447 aSig = extractFloat32Frac( a );
1454 if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) {
1461 if ( aExp | aSig ) {
1467 aSig = ( aSig | 0x00800000 )<<8;
1468 z = aSig>>( - shiftCount );
1469 if ( (uint32_t) ( aSig<<( shiftCount & 31 ) ) ) {
1493 uint32_t aSig;
1497 aSig = extractFloat32Frac( a );
1503 if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) {
1508 if ( aExp ) aSig |= 0x00800000;
1509 aSig64 = aSig;
1530 uint32_t aSig;
1535 aSig = extractFloat32Frac( a );
1542 if ( ! aSign || ( ( aExp == 0xFF ) && aSig ) ) {
1549 if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact;
1552 aSig64 = aSig | 0x00800000;
1574 uint32_t aSig;
1577 aSig = extractFloat32Frac( a );
1581 if ( aSig ) return commonNaNToFloat64( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR );
1585 if ( aSig == 0 ) return packFloat64( aSign, 0, 0 );
1586 normalizeFloat32Subnormal( aSig, &aExp, &aSig );
1589 return packFloat64( aSign, aExp + 0x380, ( (uint64_t) aSig )<<29 );
1606 uint32_t aSig;
1609 aSig = extractFloat32Frac( a );
1613 if ( aSig ) return commonNaNToFloatx80( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR );
1617 if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 );
1618 normalizeFloat32Subnormal( aSig, &aExp, &aSig );
1620 aSig |= 0x00800000;
1621 return packFloatx80( aSign, aExp + 0x3F80, ( (uint64_t) aSig )<<40 );
1640 uint32_t aSig;
1643 aSig = extractFloat32Frac( a );
1647 if ( aSig ) return commonNaNToFloat128( float32ToCommonNaN( a STATUS_VAR ) STATUS_VAR );
1651 if ( aSig == 0 ) return packFloat128( aSign, 0, 0, 0 );
1652 normalizeFloat32Subnormal( aSig, &aExp, &aSig );
1655 return packFloat128( aSign, aExp + 0x3F80, ( (uint64_t) aSig )<<25, 0 );
1732 uint32_t aSig, bSig, zSig;
1735 aSig = extractFloat32Frac( a );
1740 aSig <<= 6;
1744 if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR );
1765 aSig |= 0x20000000;
1767 shift32RightJamming( aSig, - expDiff, &aSig );
1772 if ( aSig | bSig ) return propagateFloat32NaN( a, b STATUS_VAR );
1777 if (aSig | bSig) {
1782 return packFloat32( zSign, 0, ( aSig + bSig )>>6 );
1784 zSig = 0x40000000 + aSig + bSig;
1788 aSig |= 0x20000000;
1789 zSig = ( aSig + bSig )<<1;
1792 zSig = aSig + bSig;
1811 uint32_t aSig, bSig, zSig;
1814 aSig = extractFloat32Frac( a );
1819 aSig <<= 7;
1824 if ( aSig | bSig ) return propagateFloat32NaN( a, b STATUS_VAR );
1832 if ( bSig < aSig ) goto aBigger;
1833 if ( aSig < bSig ) goto bBigger;
1844 aSig |= 0x40000000;
1846 shift32RightJamming( aSig, - expDiff, &aSig );
1849 zSig = bSig - aSig;
1855 if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR );
1865 aSig |= 0x40000000;
1867 zSig = aSig - bSig;
1931 uint32_t aSig, bSig;
1938 aSig = extractFloat32Frac( a );
1946 if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) {
1957 if ( ( aExp | aSig ) == 0 ) {
1964 if ( aSig == 0 ) return packFloat32( zSign, 0, 0 );
1965 normalizeFloat32Subnormal( aSig, &aExp, &aSig );
1972 aSig = ( aSig | 0x00800000 )<<7;
1974 shift64RightJamming( ( (uint64_t) aSig ) * bSig, 32, &zSig64 );
1994 uint32_t aSig, bSig, zSig;
1998 aSig = extractFloat32Frac( a );
2006 if ( aSig ) return propagateFloat32NaN( a, b STATUS_VAR );
2020 if ( ( aExp | aSig ) == 0 ) {
2030 if ( aSig == 0 ) return packFloat32( zSign, 0, 0 );
2031 normalizeFloat32Subnormal( aSig, &aExp, &aSig );
2034 aSig = ( aSig | 0x00800000 )<<7;
2036 if ( bSig <= ( aSig + aSig ) ) {
2037 aSig >>= 1;
2040 zSig = ( ( (uint64_t) aSig )<<32 ) / bSig;
2042 zSig |= ( (uint64_t) bSig * zSig != ( (uint64_t) aSig )<<32 );
2058 uint32_t aSig, bSig;
2066 aSig = extractFloat32Frac( a );
2072 if ( aSig || ( ( bExp == 0xFF ) && bSig ) ) {
2090 if ( aSig == 0 ) return a;
2091 normalizeFloat32Subnormal( aSig, &aExp, &aSig );
2094 aSig |= 0x00800000;
2097 aSig <<= 8;
2101 aSig >>= 1;
2103 q = ( bSig <= aSig );
2104 if ( q ) aSig -= bSig;
2106 q = ( ( (uint64_t) aSig )<<32 ) / bSig;
2109 aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
2112 aSig >>= 2;
2117 if ( bSig <= aSig ) aSig -= bSig;
2118 aSig64 = ( (uint64_t) aSig )<<40;
2132 aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q;
2135 alternateASig = aSig;
2137 aSig -= bSig;
2138 } while ( 0 <= (int32_t) aSig );
2139 sigMean = aSig + alternateASig;
2141 aSig = alternateASig;
2143 zSign = ( (int32_t) aSig < 0 );
2144 if ( zSign ) aSig = - aSig;
2145 return normalizeRoundAndPackFloat32( aSign ^ zSign, bExp, aSig STATUS_VAR );
2159 uint32_t aSig, zSig;
2163 aSig = extractFloat32Frac( a );
2167 if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR );
2173 if ( ( aExp | aSig ) == 0 ) return a;
2178 if ( aSig == 0 ) return float32_zero;
2179 normalizeFloat32Subnormal( aSig, &aExp, &aSig );
2182 aSig = ( aSig | 0x00800000 )<<8;
2183 zSig = estimateSqrt32( aExp, aSig ) + 2;
2189 aSig >>= aExp & 1;
2191 rem = ( ( (uint64_t) aSig )<<32 ) - term;
2245 uint32_t aSig;
2250 aSig = extractFloat32Frac( a );
2255 if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR );
2259 if (aSig == 0) return float32_one;
2293 uint32_t aSig, zSig, i;
2296 aSig = extractFloat32Frac( a );
2301 if ( aSig == 0 ) return packFloat32( 1, 0xFF, 0 );
2302 normalizeFloat32Subnormal( aSig, &aExp, &aSig );
2309 if ( aSig ) return propagateFloat32NaN( a, float32_zero STATUS_VAR );
2314 aSig |= 0x00800000;
2319 aSig = ( (uint64_t)aSig * aSig ) >> 23;
2320 if ( aSig & 0x01000000 ) {
2321 aSig >>= 1;
2558 uint64_t aSig;
2561 aSig = extractFloat64Frac( a );
2564 if ( ( aExp == 0x7FF ) && aSig ) aSign = 0;
2565 if ( aExp ) aSig |= LIT64( 0x0010000000000000 );
2567 if ( 0 < shiftCount ) shift64RightJamming( aSig, shiftCount, &aSig );
2568 return roundAndPackInt32( aSign, aSig STATUS_VAR );
2586 uint64_t aSig, savedASig;
2590 aSig = extractFloat64Frac( a );
2594 if ( ( aExp == 0x7FF ) && aSig ) aSign = 0;
2598 if ( aExp || aSig ) STATUS(float_exception_flags) |= float_flag_inexact;
2601 aSig |= LIT64( 0x0010000000000000 );
2603 savedASig = aSig;
2604 aSig >>= shiftCount;
2605 z = aSig;
2612 if ( ( aSig<<shiftCount ) != savedASig ) {
2633 uint64_t aSig, savedASig;
2636 aSig = extractFloat64Frac( a );
2640 if ( ( aExp == 0x7FF ) && aSig ) {
2646 if ( aExp || aSig ) {
2651 aSig |= LIT64( 0x0010000000000000 );
2653 savedASig = aSig;
2654 aSig >>= shiftCount;
2655 z = aSig;
2664 if ( ( aSig<<shiftCount ) != savedASig ) {
2684 uint64_t aSig, aSigExtra;
2687 aSig = extractFloat64Frac( a );
2690 if ( aExp ) aSig |= LIT64( 0x0010000000000000 );
2697 && ( aSig != LIT64( 0x0010000000000000 ) ) )
2704 aSig <<= - shiftCount;
2707 shift64ExtraRightJamming( aSig, 0, shiftCount, &aSig, &aSigExtra );
2709 return roundAndPackInt64( aSign, aSig, aSigExtra STATUS_VAR );
2727 uint64_t aSig;
2731 aSig = extractFloat64Frac( a );
2734 if ( aExp ) aSig |= LIT64( 0x0010000000000000 );
2742 && ( aSig != LIT64( 0x0010000000000000 ) ) )
2749 z = aSig<<shiftCount;
2753 if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact;
2756 z = aSig>>( - shiftCount );
2757 if ( (uint64_t) ( aSig<<( shiftCount & 63 ) ) ) {
2777 uint64_t aSig;
2781 aSig = extractFloat64Frac( a );
2785 if ( aSig ) return commonNaNToFloat32( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR );
2788 shift64RightJamming( aSig, 22, &aSig );
2789 zSig = aSig;
2822 uint32_t aSig;
2826 aSig = extractFloat16Frac(a);
2829 if (aSig) {
2832 return packFloat32(aSign, 0xff, aSig << 13);
2837 if (aSig == 0) {
2841 shiftCount = countLeadingZeros32( aSig ) - 21;
2842 aSig = aSig << shiftCount;
2845 return packFloat32( aSign, aExp + 0x70, aSig << 13);
2852 uint32_t aSig;
2858 aSig = extractFloat32Frac( a );
2862 if (aSig) {
2877 if (aExp == 0 && aSig == 0) {
2881 aSig |= 0x00800000;
2891 if (aSig & mask) {
2897 if ((aSig & mask) == increment) {
2898 increment = aSig & (increment << 1);
2911 aSig += increment;
2912 if (aSig >= 0x01000000) {
2913 aSig >>= 1;
2936 aSig >>= -14 - aExp;
2939 return packFloat16(aSign, aExp + 14, aSig >> 13);
2955 uint64_t aSig;
2958 aSig = extractFloat64Frac( a );
2962 if ( aSig ) return commonNaNToFloatx80( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR );
2966 if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 );
2967 normalizeFloat64Subnormal( aSig, &aExp, &aSig );
2971 aSign, aExp + 0x3C00, ( aSig | LIT64( 0x0010000000000000 ) )<<11 );
2990 uint64_t aSig, zSig0, zSig1;
2993 aSig = extractFloat64Frac( a );
2997 if ( aSig ) return commonNaNToFloat128( float64ToCommonNaN( a STATUS_VAR ) STATUS_VAR );
3001 if ( aSig == 0 ) return packFloat128( aSign, 0, 0, 0 );
3002 normalizeFloat64Subnormal( aSig, &aExp, &aSig );
3005 shift128Right( aSig, 0, 4, &zSig0, &zSig1 );
3096 uint64_t aSig, bSig, zSig;
3099 aSig = extractFloat64Frac( a );
3104 aSig <<= 9;
3108 if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR );
3129 aSig |= LIT64( 0x2000000000000000 );
3131 shift64RightJamming( aSig, - expDiff, &aSig );
3136 if ( aSig | bSig ) return propagateFloat64NaN( a, b STATUS_VAR );
3141 if (aSig | bSig) {
3146 return packFloat64( zSign, 0, ( aSig + bSig )>>9 );
3148 zSig = LIT64( 0x4000000000000000 ) + aSig + bSig;
3152 aSig |= LIT64( 0x2000000000000000 );
3153 zSig = ( aSig + bSig )<<1;
3156 zSig = aSig + bSig;
3175 uint64_t aSig, bSig, zSig;
3178 aSig = extractFloat64Frac( a );
3183 aSig <<= 10;
3188 if ( aSig | bSig ) return propagateFloat64NaN( a, b STATUS_VAR );
3196 if ( bSig < aSig
3197 if ( aSig < bSig ) goto bBigger;
3208 aSig |= LIT64( 0x4000000000000000 );
3210 shift64RightJamming( aSig, - expDiff, &aSig );
3213 zSig = bSig - aSig;
3219 if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR );
3229 aSig |= LIT64( 0x4000000000000000 );
3231 zSig = aSig - bSig;
3295 uint64_t aSig, bSig, zSig0, zSig1;
3300 aSig = extractFloat64Frac( a );
3308 if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) {
3319 if ( ( aExp | aSig ) == 0 ) {
3326 if ( aSig == 0 ) return packFloat64( zSign, 0, 0 );
3327 normalizeFloat64Subnormal( aSig, &aExp, &aSig );
3334 aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10;
3336 mul64To128( aSig, bSig, &zSig0, &zSig1 );
3356 uint64_t aSig, bSig, zSig;
3362 aSig = extractFloat64Frac( a );
3370 if ( aSig ) return propagateFloat64NaN( a, b STATUS_VAR );
3384 if ( ( aExp | aSig ) == 0 ) {
3394 if ( aSig == 0 ) return packFloat64( zSign, 0, 0 );
3395 normalizeFloat64Subnormal( aSig, &aExp, &aSig );
3398 aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<10;
3400 if ( bSig <= ( aSig + aSig ) ) {
3401 aSig >>= 1;
3404 zSig = estimateDiv128To64( aSig, 0, bSig );
3407 sub128( aSig, 0, term0, term1, &rem0, &rem1 );
3428 uint64_t aSig, bSig;
3434 aSig = extractFloat64Frac( a );
3440 if ( aSig || ( ( bExp == 0x7FF ) && bSig ) ) {
3458 if ( aSig == 0 ) return a;
3459 normalizeFloat64Subnormal( aSig, &aExp, &aSig );
3462 aSig = ( aSig | LIT64( 0x0010000000000000 ) )<<11;
3466 aSig >>= 1;
3468 q = ( bSig <= aSig );
3469 if ( q ) aSig -= bSig;
3472 q = estimateDiv128To64( aSig, 0, bSig );
3474 aSig = - ( ( bSig>>2 ) * q );
3479 q = estimateDiv128To64( aSig, 0, bSig );
3483 aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q;
3486 aSig >>= 2;
3490 alternateASig = aSig;
3492 aSig -= bSig;
3493 } while ( 0 <= (int64_t) aSig );
3494 sigMean = aSig + alternateASig;
3496 aSig = alternateASig;
3498 zSign = ( (int64_t) aSig < 0 );
3499 if ( zSign ) aSig = - aSig;
3500 return normalizeRoundAndPackFloat64( aSign ^ zSign, bExp, aSig STATUS_VAR );
3514 uint64_t aSig, zSig, doubleZSig;
3518 aSig = extractFloat64Frac( a );
3522 if ( aSig ) return propagateFloat64NaN( a, a STATUS_VAR );
3528 if ( ( aExp | aSig ) == 0 ) return a;
3533 if ( aSig == 0 ) return float64_zero;
3534 normalizeFloat64Subnormal( aSig, &aExp, &aSig );
3537 aSig |= LIT64( 0x0010000000000000 );
3538 zSig = estimateSqrt32( aExp, aSig>>21 );
3539 aSig <<= 9 - ( aExp & 1 );
3540 zSig = estimateDiv128To64( aSig, 0, zSig<<32 ) + ( zSig<<30 );
3544 sub128( aSig, 0, term0, term1, &rem0, &rem1 );
3565 uint64_t aSig, aSig0, aSig1, zSig, i;
3568 aSig = extractFloat64Frac( a );
3573 if ( aSig == 0 ) return packFloat64( 1, 0x7FF, 0 );
3574 normalizeFloat64Subnormal( aSig, &aExp, &aSig );
3581 if ( aSig ) return propagateFloat64NaN( a, float64_zero STATUS_VAR );
3586 aSig |= LIT64( 0x0010000000000000 );
3590 mul64To128( aSig, aSig, &aSig0, &aSig1 );
3591 aSig = ( aSig0 << 12 ) | ( aSig1 >> 52 );
3592 if ( aSig & LIT64( 0x0020000000000000 ) ) {
3593 aSig >>= 1;
3835 uint64_t aSig;
3837 aSig = extractFloatx80Frac( a );
3840 if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) aSign = 0;
3843 shift64RightJamming( aSig, shiftCount, &aSig );
3844 return roundAndPackInt32( aSign, aSig STATUS_VAR );
3862 uint64_t aSig, savedASig;
3865 aSig = extractFloatx80Frac( a );
3869 if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) aSign = 0;
3873 if ( aExp || aSig ) STATUS(float_exception_flags) |= float_flag_inexact;
3877 savedASig = aSig;
3878 aSig >>= shiftCount;
3879 z = aSig;
3886 if ( ( aSig<<shiftCount ) != savedASig ) {
3907 uint64_t aSig, aSigExtra;
3909 aSig = extractFloatx80Frac( a );
3918 && ( aSig != LIT64( 0x8000000000000000 ) ) )
3927 shift64ExtraRightJamming( aSig, 0, shiftCount, &aSig, &aSigExtra );
3929 return roundAndPackInt64( aSign, aSig, aSigExtra STATUS_VAR );
3947 uint64_t aSig;
3950 aSig = extractFloatx80Frac( a );
3955 aSig &= LIT64( 0x7FFFFFFFFFFFFFFF );
3956 if ( ( a.high != 0xC03E ) || aSig ) {
3958 if ( ! aSign || ( ( aExp == 0x7FFF ) && aSig ) ) {
3965 if ( aExp | aSig ) STATUS(float_exception_flags) |= float_flag_inexact;
3968 z = aSig>>( - shiftCount );
3969 if ( (uint64_t) ( aSig<<( shiftCount & 63 ) ) ) {
3988 uint64_t aSig;
3990 aSig = extractFloatx80Frac( a );
3994 if ( (uint64_t) ( aSig<<1 ) ) {
3999 shift64RightJamming( aSig, 33, &aSig );
4000 if ( aExp || aSig ) aExp -= 0x3F81;
4001 return roundAndPackFloat32( aSign, aExp, aSig STATUS_VAR );
4016 uint64_t aSig, zSig;
4018 aSig = extractFloatx80Frac( a );
4022 if ( (uint64_t) ( aSig<<1 ) ) {
4027 shift64RightJamming( aSig, 1, &zSig );
4028 if ( aExp || aSig ) aExp -= 0x3C01;
4046 uint64_t aSig, zSig0, zSig1;
4048 aSig = extractFloatx80Frac( a );
4051 if ( ( aExp == 0x7FFF ) && (uint64_t) ( aSig<<1 ) ) {
4054 shift128Right( aSig<<1, 0, 16, &zSig0, &zSig1 );
4145 uint64_t aSig, bSig, zSig0, zSig1;
4148 aSig = extractFloatx80Frac( a );
4155 if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR );
4168 shift64ExtraRightJamming( aSig, 0, - expDiff, &aSig, &zSig1 );
4173 if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) {
4179 zSig0 = aSig + bSig;
4187 zSig0 = aSig + bSig;
4211 uint64_t aSig, bSig, zSig0, zSig1;
4215 aSig = extractFloatx80Frac( a );
4223 if ( (uint64_t) ( ( aSig | bSig )<<1 ) ) {
4236 if ( bSig < aSig ) goto aBigger;
4237 if ( aSig < bSig ) goto bBigger;
4245 shift128RightJamming( aSig, 0, - expDiff, &aSig, &zSig1 );
4247 sub128( bSig, 0, aSig, zSig1, &zSig0, &zSig1 );
4253 if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR );
4259 sub128( aSig, 0, bSig, zSig1, &zSig0, &zSig1 );
4320 uint64_t aSig, bSig, zSig0, zSig1;
4323 aSig = extractFloatx80Frac( a );
4331 if ( (uint64_t) ( aSig<<1 )
4340 if ( ( aExp | aSig ) == 0 ) {
4350 if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 );
4351 normalizeFloatx80Subnormal( aSig, &aExp, &aSig );
4358 mul64To128( aSig, bSig, &zSig0, &zSig1 );
4379 uint64_t aSig, bSig, zSig0, zSig1;
4383 aSig = extractFloatx80Frac( a );
4391 if ( (uint64_t) ( aSig<<1 ) ) return propagateFloatx80NaN( a, b STATUS_VAR );
4404 if ( ( aExp | aSig ) == 0 ) {
4417 if ( aSig == 0 ) return packFloatx80( zSign, 0, 0 );
4418 normalizeFloatx80Subnormal( aSig, &aExp, &aSig );
4422 if ( bSig <= aSig ) {
4423 shift128Right( aSig, 0, 1, &aSig, &rem1 );
4426 zSig0 = estimateDiv128To64( aSig, rem1, bSig );
4428 sub128( aSig, rem1, term0, term1, &rem0, &rem1 );
6364 uint32_t aSig;
6367 aSig = extractFloat32Frac( a );
6372 if ( aSig ) {
6378 aSig |= 0x00800000;
6379 else if ( aSig == 0 )
6389 aSig <<= 7;
6390 return normalizeRoundAndPackFloat32( aSign, aExp, aSig STATUS_VAR );
6397 uint64_t aSig;
6400 aSig = extractFloat64Frac( a );
6405 if ( aSig ) {
6411 aSig |= LIT64( 0x0010000000000000 );
6412 else if ( aSig == 0 )
6422 aSig <<= 10;
6423 return normalizeRoundAndPackFloat64( aSign, aExp, aSig STATUS_VAR );
6431 uint64_t aSig;
6433 aSig = extractFloatx80Frac( a );
6438 if ( aSig<<1 ) {
6444 if (aExp == 0 && aSig == 0)
6455 aSign, aExp, aSig, 0 STATUS_VAR );