1 // Do not edit. Bootstrap copy of /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/cgen.go 2 3 //line /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/amd64/cgen.go:1 4 // Copyright 2009 The Go Authors. All rights reserved. 5 // Use of this source code is governed by a BSD-style 6 // license that can be found in the LICENSE file. 7 8 package amd64 9 10 import ( 11 "bootstrap/compile/internal/gc" 12 "bootstrap/internal/obj" 13 "bootstrap/internal/obj/x86" 14 ) 15 16 func blockcopy(n, ns *gc.Node, osrc, odst, w int64) { 17 var noddi gc.Node 18 gc.Nodreg(&noddi, gc.Types[gc.Tptr], x86.REG_DI) 19 var nodsi gc.Node 20 gc.Nodreg(&nodsi, gc.Types[gc.Tptr], x86.REG_SI) 21 22 var nodl gc.Node 23 var nodr gc.Node 24 if n.Ullman >= ns.Ullman { 25 gc.Agenr(n, &nodr, &nodsi) 26 if ns.Op == gc.ONAME { 27 gc.Gvardef(ns) 28 } 29 gc.Agenr(ns, &nodl, &noddi) 30 } else { 31 if ns.Op == gc.ONAME { 32 gc.Gvardef(ns) 33 } 34 gc.Agenr(ns, &nodl, &noddi) 35 gc.Agenr(n, &nodr, &nodsi) 36 } 37 38 if nodl.Reg != x86.REG_DI { 39 gmove(&nodl, &noddi) 40 } 41 if nodr.Reg != x86.REG_SI { 42 gmove(&nodr, &nodsi) 43 } 44 gc.Regfree(&nodl) 45 gc.Regfree(&nodr) 46 47 c := w % 8 // bytes 48 q := w / 8 // quads 49 50 var oldcx gc.Node 51 var cx gc.Node 52 savex(x86.REG_CX, &cx, &oldcx, nil, gc.Types[gc.TINT64]) 53 54 // if we are copying forward on the stack and 55 // the src and dst overlap, then reverse direction 56 if osrc < odst && odst < osrc+w { 57 // reverse direction 58 gins(x86.ASTD, nil, nil) // set direction flag 59 if c > 0 { 60 gconreg(addptr, w-1, x86.REG_SI) 61 gconreg(addptr, w-1, x86.REG_DI) 62 63 gconreg(movptr, c, x86.REG_CX) 64 gins(x86.AREP, nil, nil) // repeat 65 gins(x86.AMOVSB, nil, nil) // MOVB *(SI)-,*(DI)- 66 } 67 68 if q > 0 { 69 if c > 0 { 70 gconreg(addptr, -7, x86.REG_SI) 71 gconreg(addptr, -7, x86.REG_DI) 72 } else { 73 gconreg(addptr, w-8, x86.REG_SI) 74 gconreg(addptr, w-8, x86.REG_DI) 75 } 76 77 gconreg(movptr, q, x86.REG_CX) 78 gins(x86.AREP, nil, nil) // repeat 79 gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)-,*(DI)- 80 } 81 82 // we leave with the flag clear 83 gins(x86.ACLD, nil, nil) 84 } else { 85 // normal direction 86 if q > 128 || (gc.Nacl && q >= 4) { 87 gconreg(movptr, q, x86.REG_CX) 88 gins(x86.AREP, nil, nil) // repeat 89 gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+ 90 } else if q >= 4 { 91 p := gins(obj.ADUFFCOPY, nil, nil) 92 p.To.Type = obj.TYPE_ADDR 93 p.To.Sym = gc.Linksym(gc.Pkglookup("duffcopy", gc.Runtimepkg)) 94 95 // 14 and 128 = magic constants: see ../../runtime/asm_amd64.s 96 p.To.Offset = 14 * (128 - q) 97 } else if !gc.Nacl && c == 0 { 98 // We don't need the MOVSQ side-effect of updating SI and DI, 99 // and issuing a sequence of MOVQs directly is faster. 100 nodsi.Op = gc.OINDREG 101 102 noddi.Op = gc.OINDREG 103 for q > 0 { 104 gmove(&nodsi, &cx) // MOVQ x+(SI),CX 105 gmove(&cx, &noddi) // MOVQ CX,x+(DI) 106 nodsi.Xoffset += 8 107 noddi.Xoffset += 8 108 q-- 109 } 110 } else { 111 for q > 0 { 112 gins(x86.AMOVSQ, nil, nil) // MOVQ *(SI)+,*(DI)+ 113 q-- 114 } 115 } 116 117 // copy the remaining c bytes 118 if w < 4 || c <= 1 || (odst < osrc && osrc < odst+w) { 119 for c > 0 { 120 gins(x86.AMOVSB, nil, nil) // MOVB *(SI)+,*(DI)+ 121 c-- 122 } 123 } else if w < 8 || c <= 4 { 124 nodsi.Op = gc.OINDREG 125 noddi.Op = gc.OINDREG 126 cx.Type = gc.Types[gc.TINT32] 127 nodsi.Type = gc.Types[gc.TINT32] 128 noddi.Type = gc.Types[gc.TINT32] 129 if c > 4 { 130 nodsi.Xoffset = 0 131 noddi.Xoffset = 0 132 gmove(&nodsi, &cx) 133 gmove(&cx, &noddi) 134 } 135 136 nodsi.Xoffset = c - 4 137 noddi.Xoffset = c - 4 138 gmove(&nodsi, &cx) 139 gmove(&cx, &noddi) 140 } else { 141 nodsi.Op = gc.OINDREG 142 noddi.Op = gc.OINDREG 143 cx.Type = gc.Types[gc.TINT64] 144 nodsi.Type = gc.Types[gc.TINT64] 145 noddi.Type = gc.Types[gc.TINT64] 146 nodsi.Xoffset = c - 8 147 noddi.Xoffset = c - 8 148 gmove(&nodsi, &cx) 149 gmove(&cx, &noddi) 150 } 151 } 152 153 restx(&cx, &oldcx) 154 } 155