Lines Matching defs:ebx
45 push ebx //CPUID will trash these
90 pop ebx
190 mov ebx,dstptr //load dest
198 movq mm7,[ebx]
201 movq [ebx],mm4
207 movq mm6,[ebx+8]
210 movq [ebx+8],mm5
215 movq mm7,[ebx+16]
218 movq [ebx+16],mm6
221 add ebx,24
237 mov [ebx],ax
240 mov [ebx+2],al
243 add ebx,3
329 mov ebx,dstptr //load dest
338 movq mm7,[ebx]
341 movq [ebx],mm4
346 movq mm6,[ebx+8]
349 movq [ebx+8],mm5
354 movq mm7,[ebx+16]
357 movq [ebx+16],mm6
362 movq mm4,[ebx+24]
365 movq [ebx+24],mm7
368 add ebx,32
384 mov [ebx],eax
387 add ebx,4
461 mov ebx,dstptr //load dest
469 pandn mm6,[ebx]
471 movq [ebx],mm4
474 add ebx,8
491 mov [ebx],al
494 inc ebx
735 mov ebx,dstptr //load dest
743 movq mm7,[ebx]
746 movq [ebx],mm4
751 movq mm6,[ebx+8]
754 movq [ebx+8],mm5
757 add ebx,16
773 mov [ebx],ax
776 add ebx,2
868 mov ebx,dstptr //load dest
877 pandn mm6,[ebx]
879 movq [ebx],mm7
884 pandn mm7,[ebx+8]
886 movq [ebx+8],mm6
891 pandn mm7,[ebx+16]
893 movq [ebx+16],mm6
898 pandn mm6,[ebx+24]
900 movq [ebx+24],mm7
905 pandn mm7,[ebx+32]
907 movq [ebx+32],mm6
912 pandn mm6,[ebx+40]
914 movq [ebx+40],mm7
917 add ebx,48
934 mov [ebx],eax
936 mov [ebx+4],ax // Glenn R-P
939 add ebx,6 // lines. Glenn R-P 20070717
1953 xor ebx, ebx // ebx ==> x
1962 mov al, [esi + ebx] // Load al with Prior(x)
1963 inc ebx
1965 add al, [edi+ebx-1] // Add Avg(x); -1 to offset inc ebx
1966 cmp ebx, bpp
1967 mov [edi+ebx-1], al // Write back Raw(x);
1968 // mov does not affect flags; -1 to offset inc ebx
1972 add diff, ebx // add bpp
1975 sub diff, edi // subtract from start ==> value ebx at alignment
1983 mov cl, [esi + ebx] // load cl with Prior(x)
1984 mov al, [edx + ebx] // load al with Raw(x-bpp)
1986 inc ebx
1988 add al, [edi+ebx-1] // Add Avg(x); -1 to offset inc ebx
1989 cmp ebx, diff // Check if at alignment boundary
1990 mov [edi+ebx-1], al // Write back Raw(x);
1991 // mov does not affect flags; -1 to offset inc ebx
1996 sub eax, ebx // subtract alignment fix
2012 mov ebx, diff // ebx ==> x = offset to alignment boundary
2018 movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
2021 movq mm0, [edi + ebx] // Load mm0 with Avg(x)
2025 movq mm1, [esi + ebx] // Load mm1 with Prior(x)
2069 add ebx, 8
2074 movq [edi + ebx - 8], mm0
2076 cmp ebx, MMXLength
2095 mov ebx, diff // ebx ==> x = offset to alignment boundary
2105 movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
2108 movq mm0, [edi + ebx]
2110 movq mm1, [esi + ebx]
2130 add ebx, 8
2140 cmp ebx, MMXLength
2142 movq [edi + ebx - 8], mm0
2158 mov ebx, diff // ebx ==> x = offset to alignment boundary
2164 movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
2167 movq mm0, [edi + ebx]
2169 movq mm1, [esi + ebx]
2220 add ebx, 8
2230 cmp ebx, MMXLength
2232 movq [edi + ebx - 8], mm0
2244 mov ebx, diff // ebx ==> x = offset to alignment boundary
2246 cmp ebx, FullLength // Test if offset at end of array
2256 mov cl, [esi + ebx] // load cl with Prior(x)
2257 mov al, [edx + ebx] // load al with Raw(x-bpp)
2259 inc ebx
2261 add al, [edi+ebx-1] // Add Avg(x); -1 to offset inc ebx
2262 cmp ebx, FullLength // Check if at end of array
2263 mov [edi+ebx-1], al // Write back Raw(x);
2264 // mov does not affect flags; -1 to offset inc ebx
2275 mov ebx, diff // ebx ==> x = offset to alignment boundary
2281 movq mm2, [edi + ebx - 8] // Load previous aligned 8 bytes
2284 movq mm0, [edi + ebx]
2286 movq mm1, [esi + ebx]
2287 add ebx, 8
2298 cmp ebx, MMXLength
2299 movq [edi + ebx - 8], mm0
2310 mov ebx, diff // ebx ==> x = offset to alignment boundary
2317 movq mm0, [edi + ebx]
2319 movq mm1, [esi + ebx]
2321 movq mm2, [edx + ebx]
2330 add ebx, 8
2332 cmp ebx, MMXLength
2333 movq [edi + ebx - 8], mm0
2343 mov ebx, MMXLength // ebx ==> x = offset bytes remaining after MMX
2345 cmp ebx, FullLength // Test if offset at end of array
2355 mov cl, [esi + ebx] // load cl with Prior(x)
2356 mov al, [edx + ebx] // load al with Raw(x-bpp)
2358 inc ebx
2360 add al, [edi+ebx-1] // Add Avg(x); -1 to offset inc ebx
2361 cmp ebx, FullLength // Check if at end of array
2362 mov [edi+ebx-1], al // Write back Raw(x);
2363 // mov does not affect flags; -1 to offset inc ebx
2391 xor ebx, ebx // ebx ==> x offset
2401 mov al, [edi + ebx]
2402 add al, [esi + ebx]
2403 inc ebx
2404 cmp ebx, bpp
2405 mov [edi + ebx - 1], al
2409 add diff, ebx // add bpp
2413 sub diff, edi // subtract from start ==> value ebx at alignment
2419 mov al, [esi + ebx] // load Prior(x) into al
2460 mov cl, [esi + ebx] // load Prior(x) into cl
2473 inc ebx
2476 add [edi + ebx - 1], cl
2477 cmp ebx, diff
2482 sub eax, ebx // subtract alignment fix
2498 mov ebx, diff
2503 movq mm1, [edi+ebx-8]
2506 movq mm2, [esi + ebx] // load b=Prior(x)
2508 movq mm3, [esi+ebx-8] // Prep c=Prior(x-bpp) bytes
2559 movq mm3, [esi + ebx] // load c=Prior(x-bpp)
2562 paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x)
2564 movq [edi + ebx], mm7 // write back updated value
2612 movq mm2, [esi + ebx] // load b=Prior(x)
2625 paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x)
2627 movq [edi + ebx], mm7 // write back updated value
2679 // Step ebx to next set of 8 bytes and repeat loop til done
2680 add ebx, 8
2682 paddb mm1, [edi + ebx - 8] // add Paeth predictor with Raw(x)
2684 cmp ebx, MMXLength
2686 movq [edi + ebx - 8], mm1 // write back updated value
2704 mov ebx, diff
2708 movq mm1, [edi+ebx-8]
2714 movq mm3, [esi+ebx-8] // read c=Prior(x-bpp) bytes
2716 movq mm2, [esi + ebx] // load b=Prior(x)
2767 movq mm3, [esi + ebx - 8] // load c=Prior(x-bpp)
2770 movq mm2, [esi + ebx] // load b=Prior(x) step 1
2771 paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x)
2773 movq [edi + ebx], mm7 // write back updated value
2774 movq mm1, [edi+ebx-8]
2832 add ebx, 8
2834 paddb mm1, [edi + ebx - 8] // add Paeth predictor with Raw(x)
2835 cmp ebx, MMXLength
2836 movq [edi + ebx - 8], mm1 // write back updated value
2847 mov ebx, diff
2852 movq mm1, [edi+ebx-8] // Only time should need to read
2856 movq mm3, [esi+ebx-8] // read c=Prior(x-bpp) bytes
2858 movq mm2, [esi + ebx] // load b=Prior(x)
2907 movq mm3, [esi + ebx] // load c=Prior(x-bpp)
2910 paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x)
2912 movq [edi + ebx], mm7 // write back updated value
2964 add ebx, 8
2966 paddb mm1, [edi + ebx - 8] // add Paeth predictor with Raw(x)
2967 cmp ebx, MMXLength
2968 movq [edi + ebx - 8], mm1 // write back updated value
2978 mov ebx, diff
2983 movq mm1, [edi+ebx-8] // Only time should need to read
2987 movq mm3, [esi+ebx-8] // read c=Prior(x-bpp) bytes
2989 movq mm2, [esi + ebx] // load b=Prior(x)
3038 movq mm3, [esi+ebx-8] // read c=Prior(x-bpp) bytes
3040 movq mm2, [esi + ebx] // load b=Prior(x)
3041 paddb mm7, [edi + ebx] // add Paeth predictor with Raw(x)
3043 movq [edi + ebx], mm7 // write back updated value
3044 movq mm1, [edi+ebx-8] // read a=Raw(x-bpp) bytes
3096 add ebx, 8
3098 paddb mm1, [edi + ebx - 8] // add Paeth predictor with Raw(x)
3099 cmp ebx, MMXLength
3100 movq [edi + ebx - 8], mm1 // write back updated value
3112 mov ebx, diff
3113 cmp ebx, FullLength
3118 mov edx, ebx
3120 sub edx, bpp // Set edx = ebx - bpp
3124 mov al, [esi + ebx] // load Prior(x) into al
3165 mov cl, [esi + ebx] // load Prior(x) into cl
3178 inc ebx
3181 add [edi + ebx - 1], cl
3182 cmp ebx, FullLength
3193 mov ebx, MMXLength
3194 cmp ebx, FullLength
3199 mov edx, ebx
3201 sub edx, bpp // Set edx = ebx - bpp
3205 mov al, [esi + ebx] // load Prior(x) into al
3246 mov cl, [esi + ebx] // load Prior(x) into cl
3259 inc ebx
3262 add [edi + ebx - 1], cl
3263 cmp ebx, FullLength
3295 xor ebx, ebx
3298 // ebx at alignment
3302 mov al, [esi+ebx]
3303 add [edi+ebx], al
3304 inc ebx
3305 cmp ebx, diff
3310 sub edx, ebx // subtract alignment fix
3330 mov ebx, diff
3334 movq mm1, [edi+ebx-8]
3339 movq mm0, [edi+ebx]
3350 add ebx, 8
3352 cmp ebx, MMXLength
3353 movq [edi+ebx-8], mm0 // Write updated Raws back to array
3376 mov ebx, diff
3378 cmp ebx, FullLength
3384 mov al, [esi+ebx]
3385 add [edi+ebx], al
3386 inc ebx
3387 cmp ebx, FullLength
3403 mov ebx, diff
3407 movq mm1, [edi+ebx-8]
3411 movq mm0, [edi+ebx]
3418 add ebx, 8
3420 cmp ebx, MMXLength
3421 movq [edi+ebx-8], mm0
3435 mov ebx, diff
3446 movq mm1, [edi+ebx-8]
3452 movq mm0, [edi+ebx]
3468 add ebx, 8
3470 cmp ebx, MMXLength
3471 movq [edi+ebx-8], mm0 // Write updated Raws back to array
3481 mov ebx, diff
3485 movq mm7, [edi+ebx
3489 movq mm0, [edi+ebx] // Load Sub(x) for 1st 8 bytes
3491 movq mm1, [edi+ebx+8] // Load Sub(x) for 2nd 8 bytes
3492 movq [edi+ebx], mm0 // Write Raw(x) for 1st 8 bytes
3499 movq mm2, [edi+ebx+16] // Load Sub(x) for 3rd 8 bytes
3500 movq [edi+ebx+8], mm1 // Write Raw(x) for 2nd 8 bytes
3502 movq mm3, [edi+ebx+24] // Load Sub(x) for 4th 8 bytes
3503 movq [edi+ebx+16], mm2 // Write Raw(x) for 3rd 8 bytes
3505 movq mm4, [edi+ebx+32] // Load Sub(x) for 5th 8 bytes
3506 movq [edi+ebx+24], mm3 // Write Raw(x) for 4th 8 bytes
3508 movq mm5, [edi+ebx+40] // Load Sub(x) for 6th 8 bytes
3509 movq [edi+ebx+32], mm4 // Write Raw(x) for 5th 8 bytes
3511 movq mm6, [edi+ebx+48] // Load Sub(x) for 7th 8 bytes
3512 movq [edi+ebx+40], mm5 // Write Raw(x) for 6th 8 bytes
3514 movq mm7, [edi+ebx+56] // Load Sub(x) for 8th 8 bytes
3515 movq [edi+ebx+48], mm6 // Write Raw(x) for 7th 8 bytes
3516 add ebx, 64
3518 cmp ebx, ecx
3519 movq [edi+ebx-8], mm7 // Write Raw(x) for 8th 8 bytes
3521 cmp ebx, MMXLength
3524 movq mm0, [edi+ebx]
3525 add ebx, 8
3527 cmp ebx, MMXLength
3528 movq [edi+ebx-8], mm0 // use -8 to offset early add to ebx
3540 mov ebx, diff
3545 movq mm0, [edi+ebx]
3546 movq mm1, [esi+ebx]
3547 add ebx, 8
3549 cmp ebx, MMXLength
3550 movq [edi+ebx-8], mm0 // mov does not affect flags; -8 to offset
3551 // add ebx
3560 mov ebx, MMXLength
3562 cmp ebx, FullLength
3568 mov al, [esi+ebx]
3569 add [edi+ebx], al
3570 inc ebx
3571 cmp ebx, FullLength
3589 xor ebx, ebx
3598 mov al, [edi+ebx]
3599 add al, [esi+ebx]
3600 inc ebx
3601 cmp ebx, ecx
3602 mov [edi + ebx-1], al // mov does not affect flags; -1 to offset inc ebx
3607 sub edx, ebx // subtract alignment fix
3613 movq mm1, [esi+ebx]
3614 movq mm0, [edi+ebx]
3615 movq mm3, [esi+ebx+8]
3617 movq mm2, [edi+ebx+8]
3618 movq [edi+ebx], mm0
3620 movq mm5, [esi+ebx+16]
3621 movq [edi+ebx+8], mm2
3622 movq mm4, [edi+ebx+16]
3623 movq mm7, [esi+ebx+24]
3625 movq mm6, [edi+ebx+24]
3626 movq [edi+ebx+16], mm4
3628 movq mm1, [esi+ebx+32]
3629 movq [edi+ebx+24], mm6
3630 movq mm0, [edi+ebx+32]
3631 movq mm3, [esi+ebx+40]
3633 movq mm2, [edi+ebx+40]
3634 movq [edi+ebx+32], mm0
3636 movq mm5, [esi+ebx+48]
3637 movq [edi+ebx+40], mm2
3638 movq mm4, [edi+ebx+48]
3639 movq mm7, [esi+ebx+56]
3641 movq mm6, [edi+ebx+56]
3642 movq [edi+ebx+48], mm4
3643 add ebx, 64
3645 cmp ebx, ecx
3646 movq [edi+ebx-8], mm6 // (+56)movq does not affect flags;
3647 // -8 to offset add ebx
3666 movq mm1, [esi+ebx]
3667 movq mm0, [edi+ebx]
3668 add ebx, 8
3670 cmp ebx, ecx
3671 movq [edi+ebx-8], mm0 // movq does not affect flags; -8 to offset add ebx
3680 mov al, [edi + ebx]
3681 add al, [esi + ebx]
3682 inc ebx
3683 cmp ebx, ecx
3684 mov [edi + ebx-1], al // mov does not affect flags; -1 to offset inc ebx