1 // Do not edit. Bootstrap copy of /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/cgen.go 2 3 //line /Volumes/Android/buildbot/src/android/build-tools/out/obj/go/src/cmd/compile/internal/arm64/cgen.go:1 4 // Copyright 2009 The Go Authors. All rights reserved. 5 // Use of this source code is governed by a BSD-style 6 // license that can be found in the LICENSE file. 7 8 package arm64 9 10 import ( 11 "bootstrap/compile/internal/gc" 12 "bootstrap/internal/obj" 13 "bootstrap/internal/obj/arm64" 14 ) 15 16 func blockcopy(n, res *gc.Node, osrc, odst, w int64) { 17 // determine alignment. 18 // want to avoid unaligned access, so have to use 19 // smaller operations for less aligned types. 20 // for example moving [4]byte must use 4 MOVB not 1 MOVW. 21 align := int(n.Type.Align) 22 23 var op int 24 switch align { 25 default: 26 gc.Fatal("sgen: invalid alignment %d for %v", align, n.Type) 27 28 case 1: 29 op = arm64.AMOVB 30 31 case 2: 32 op = arm64.AMOVH 33 34 case 4: 35 op = arm64.AMOVW 36 37 case 8: 38 op = arm64.AMOVD 39 } 40 41 if w%int64(align) != 0 { 42 gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type) 43 } 44 c := int32(w / int64(align)) 45 46 if osrc%int64(align) != 0 || odst%int64(align) != 0 { 47 gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align) 48 } 49 50 // if we are copying forward on the stack and 51 // the src and dst overlap, then reverse direction 52 dir := align 53 54 if osrc < odst && int64(odst) < int64(osrc)+w { 55 dir = -dir 56 } 57 58 var dst gc.Node 59 var src gc.Node 60 if n.Ullman >= res.Ullman { 61 gc.Agenr(n, &dst, res) // temporarily use dst 62 gc.Regalloc(&src, gc.Types[gc.Tptr], nil) 63 gins(arm64.AMOVD, &dst, &src) 64 if res.Op == gc.ONAME { 65 gc.Gvardef(res) 66 } 67 gc.Agen(res, &dst) 68 } else { 69 if res.Op == gc.ONAME { 70 gc.Gvardef(res) 71 } 72 gc.Agenr(res, &dst, res) 73 gc.Agenr(n, &src, nil) 74 } 75 76 var tmp gc.Node 77 gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil) 78 79 // set up end marker 80 var nend gc.Node 81 82 // move src and dest to the end of block if necessary 83 if dir < 0 { 84 if c >= 4 { 85 gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) 86 gins(arm64.AMOVD, &src, &nend) 87 } 88 89 p := gins(arm64.AADD, nil, &src) 90 p.From.Type = obj.TYPE_CONST 91 p.From.Offset = w 92 93 p = gins(arm64.AADD, nil, &dst) 94 p.From.Type = obj.TYPE_CONST 95 p.From.Offset = w 96 } else { 97 p := gins(arm64.AADD, nil, &src) 98 p.From.Type = obj.TYPE_CONST 99 p.From.Offset = int64(-dir) 100 101 p = gins(arm64.AADD, nil, &dst) 102 p.From.Type = obj.TYPE_CONST 103 p.From.Offset = int64(-dir) 104 105 if c >= 4 { 106 gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) 107 p := gins(arm64.AMOVD, &src, &nend) 108 p.From.Type = obj.TYPE_ADDR 109 p.From.Offset = w 110 } 111 } 112 113 // move 114 // TODO: enable duffcopy for larger copies. 115 if c >= 4 { 116 p := gins(op, &src, &tmp) 117 p.From.Type = obj.TYPE_MEM 118 p.From.Offset = int64(dir) 119 p.Scond = arm64.C_XPRE 120 ploop := p 121 122 p = gins(op, &tmp, &dst) 123 p.To.Type = obj.TYPE_MEM 124 p.To.Offset = int64(dir) 125 p.Scond = arm64.C_XPRE 126 127 p = gcmp(arm64.ACMP, &src, &nend) 128 129 gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), ploop) 130 gc.Regfree(&nend) 131 } else { 132 // TODO(austin): Instead of generating ADD $-8,R8; ADD 133 // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just 134 // generate the offsets directly and eliminate the 135 // ADDs. That will produce shorter, more 136 // pipeline-able code. 137 var p *obj.Prog 138 for { 139 tmp14 := c 140 c-- 141 if tmp14 <= 0 { 142 break 143 } 144 145 p = gins(op, &src, &tmp) 146 p.From.Type = obj.TYPE_MEM 147 p.From.Offset = int64(dir) 148 p.Scond = arm64.C_XPRE 149 150 p = gins(op, &tmp, &dst) 151 p.To.Type = obj.TYPE_MEM 152 p.To.Offset = int64(dir) 153 p.Scond = arm64.C_XPRE 154 } 155 } 156 157 gc.Regfree(&dst) 158 gc.Regfree(&src) 159 gc.Regfree(&tmp) 160 } 161