Home | History | Annotate | Download | only in asm

Lines Matching refs:tmp16_offset

312 my $tmp16_offset        =  8*8 + $tmp_offset;
313 my $garray_offset = 8*16 + $tmp16_offset;
428 lea (+$tmp16_offset+$STACK_DEPTH)(%rsp), %rcx # X (Asrc) 1024 bits, 16 qwords
686 &MUL_512x512("%rsp+$tmp16_offset+8", "%rdi", "%rsi", [map("%r$_",(10..15,8..9))], "%rbp", "%rbx");
1084 &SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi");
1137 movdqa %xmm4, (+$tmp16_offset+16*0)(%rsp)
1138 movdqa %xmm4, (+$tmp16_offset+16*1)(%rsp)
1139 movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp)
1140 movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp)
1141 movdqa %xmm0, (+$tmp16_offset+16*2)(%rsp)
1142 movdqa %xmm1, (+$tmp16_offset+16*3)(%rsp)
1143 movdqa %xmm2, (+$tmp16_offset+16*4)(%rsp)
1144 movdqa %xmm3, (+$tmp16_offset+16*5)(%rsp)
1302 movdqa %xmm4, (+$tmp16_offset+16*4)(%rsp)
1303 movdqa %xmm4, (+$tmp16_offset+16*5)(%rsp)
1304 movdqa %xmm4, (+$tmp16_offset+16*6)(%rsp)
1305 movdqa %xmm4, (+$tmp16_offset+16*7)(%rsp)
1306 movdqa %xmm0, (+$tmp16_offset+16*0)(%rsp)
1307 movdqa %xmm1, (+$tmp16_offset+16*1)(%rsp)
1308 movdqa %xmm2, (+$tmp16_offset+16*2)(%rsp)
1309 movdqa %xmm3, (+$tmp16_offset+16*3)(%rsp)