Home | History | Annotate | Download | only in amd64

Lines Matching refs:rdx

269 		mov	$(-MAX_MATCH_8), %rdx
275 prefetcht1 (%windowbestlen, %rdx)
276 prefetcht1 (%prev, %rdx)
280 * adjust %rdx so that it is offset to the exact byte that mismatched.
293 movdqu (%windowbestlen, %rdx), %xmm1
294 movdqu (%prev, %rdx), %xmm2
296 movdqu 16(%windowbestlen, %rdx), %xmm3
297 movdqu 16(%prev, %rdx), %xmm4
299 movdqu 32(%windowbestlen, %rdx), %xmm5
300 movdqu 32(%prev, %rdx), %xmm6
302 movdqu 48(%windowbestlen, %rdx), %xmm7
303 movdqu 48(%prev, %rdx), %xmm8
313 incremented rdx by 0x108 (each loop iteration add 16*4 = 0x40
315 add $8, %rdx
317 add $8, %rdx
326 add $16, %rdx
334 add $16, %rdx
342 add $16, %rdx
345 LeaveLoopCmps: add %rax, %rdx
347 mov (%windowbestlen, %rdx), %rax
348 xor (%prev, %rdx), %rax
351 mov 8(%windowbestlen, %rdx), %rax
352 xor 8(%prev, %rdx), %rax
355 mov 16(%windowbestlen, %rdx), %rax
356 xor 16(%prev, %rdx), %rax
359 add $24, %rdx
370 add %rax, %rdx
373 add $8, %rdx
375 add $8, %rdx
378 add $4, %rdx
382 add $2, %rdx
385 adc $0, %rdx
392 lea (%prev, %rdx), %rax