Home | History | Annotate | Download | only in x86
      1 %verify "executed"
      2     /*
      3      * Signed 64-bit integer multiply, 2-addr version
      4      *
      5      * We could definately use more free registers for
      6      * this code.  We must spill rPC (edx) because it
      7      * is used by imul.  We'll also spill rINST (ebx),
      8      * giving us eax, ebc, ecx and edx as computational
      9      * temps.  On top of that, we'll spill rIBASE (edi)
     10      * for use as the vA pointer and rFP (esi) for use
     11      * as the vB pointer.  Yuck.
     12      */
     13     /* mul-long/2addr vA, vB */
     14     movzbl    rINST_HI,%eax            # eax<- BA
     15     andb      $$0xf,%al                # eax<- A
     16     sarl      $$12,rINST_FULL          # rINST_FULL<- B
     17     SPILL(rPC)
     18     SPILL(rIBASE)
     19     SPILL(rFP)
     20     leal      (rFP,%eax,4),rIBASE      # rIBASE<- &v[A]
     21     leal      (rFP,rINST_FULL,4),rFP   # rFP<- &v[B]
     22     movl      4(rIBASE),%ecx      # ecx<- Amsw
     23     imull     (rFP),%ecx          # ecx<- (Amsw*Blsw)
     24     movl      4(rFP),%eax         # eax<- Bmsw
     25     imull     (rIBASE),%eax       # eax<- (Bmsw*Alsw)
     26     addl      %eax,%ecx           # ecx<- (Amsw*Blsw)+(Bmsw*Alsw)
     27     movl      (rFP),%eax          # eax<- Blsw
     28     mull      (rIBASE)            # eax<- (Blsw*Alsw)
     29     jmp       .L${opcode}_continue
     30 %break
     31 
     32 .L${opcode}_continue:
     33     leal      (%ecx,%edx),%edx    # full result now in %edx:%eax
     34     movl      %edx,4(rIBASE)      # v[A+1]<- %edx
     35     UNSPILL(rPC)                  # restore rPC/%edx
     36     FETCH_INST_WORD(1)
     37     movl      %eax,(rIBASE)       # v[A]<- %eax
     38     UNSPILL(rFP)
     39     UNSPILL(rIBASE)
     40     ADVANCE_PC(1)
     41     GOTO_NEXT
     42 
     43