github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/cmd/compile/internal/arm/cgen.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package arm 6 7 import ( 8 "cmd/compile/internal/gc" 9 "cmd/internal/obj" 10 "cmd/internal/obj/arm" 11 ) 12 13 /* 14 * generate array index into res. 15 * n might be any size; res is 32-bit. 16 * returns Prog* to patch to panic call. 17 */ 18 func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { 19 if !gc.Is64(n.Type) { 20 gc.Cgen(n, res) 21 return nil 22 } 23 24 var tmp gc.Node 25 gc.Tempname(&tmp, gc.Types[gc.TINT64]) 26 gc.Cgen(n, &tmp) 27 var lo gc.Node 28 var hi gc.Node 29 split64(&tmp, &lo, &hi) 30 gmove(&lo, res) 31 if bounded { 32 splitclean() 33 return nil 34 } 35 36 var n1 gc.Node 37 gc.Regalloc(&n1, gc.Types[gc.TINT32], nil) 38 var n2 gc.Node 39 gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) 40 var zero gc.Node 41 gc.Nodconst(&zero, gc.Types[gc.TINT32], 0) 42 gmove(&hi, &n1) 43 gmove(&zero, &n2) 44 gins(arm.ACMP, &n1, &n2) 45 gc.Regfree(&n2) 46 gc.Regfree(&n1) 47 splitclean() 48 return gc.Gbranch(arm.ABNE, nil, -1) 49 } 50 51 func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { 52 gc.Tempname(res, n.Type) 53 return cgenindex(n, res, bounded) 54 } 55 56 func blockcopy(n, res *gc.Node, osrc, odst, w int64) { 57 // determine alignment. 58 // want to avoid unaligned access, so have to use 59 // smaller operations for less aligned types. 60 // for example moving [4]byte must use 4 MOVB not 1 MOVW. 61 align := int(n.Type.Align) 62 63 var op int 64 switch align { 65 default: 66 gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type) 67 68 case 1: 69 op = arm.AMOVB 70 71 case 2: 72 op = arm.AMOVH 73 74 case 4: 75 op = arm.AMOVW 76 } 77 78 if w%int64(align) != 0 { 79 gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type) 80 } 81 c := int32(w / int64(align)) 82 83 if osrc%int64(align) != 0 || odst%int64(align) != 0 { 84 gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align) 85 } 86 87 // if we are copying forward on the stack and 88 // the src and dst overlap, then reverse direction 89 dir := align 90 if osrc < odst && int64(odst) < int64(osrc)+w { 91 dir = -dir 92 } 93 94 if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 { 95 var r0 gc.Node 96 r0.Op = gc.OREGISTER 97 r0.Reg = arm.REG_R0 98 var r1 gc.Node 99 r1.Op = gc.OREGISTER 100 r1.Reg = arm.REG_R0 + 1 101 var r2 gc.Node 102 r2.Op = gc.OREGISTER 103 r2.Reg = arm.REG_R0 + 2 104 105 var src gc.Node 106 gc.Regalloc(&src, gc.Types[gc.Tptr], &r1) 107 var dst gc.Node 108 gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2) 109 if n.Ullman >= res.Ullman { 110 // eval n first 111 gc.Agen(n, &src) 112 113 if res.Op == gc.ONAME { 114 gc.Gvardef(res) 115 } 116 gc.Agen(res, &dst) 117 } else { 118 // eval res first 119 if res.Op == gc.ONAME { 120 gc.Gvardef(res) 121 } 122 gc.Agen(res, &dst) 123 gc.Agen(n, &src) 124 } 125 126 var tmp gc.Node 127 gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0) 128 f := gc.Sysfunc("duffcopy") 129 p := gins(obj.ADUFFCOPY, nil, f) 130 gc.Afunclit(&p.To, f) 131 132 // 8 and 128 = magic constants: see ../../runtime/asm_arm.s 133 p.To.Offset = 8 * (128 - int64(c)) 134 135 gc.Regfree(&tmp) 136 gc.Regfree(&src) 137 gc.Regfree(&dst) 138 return 139 } 140 141 var dst gc.Node 142 var src gc.Node 143 if n.Ullman >= res.Ullman { 144 gc.Agenr(n, &dst, res) // temporarily use dst 145 gc.Regalloc(&src, gc.Types[gc.Tptr], nil) 146 gins(arm.AMOVW, &dst, &src) 147 if res.Op == gc.ONAME { 148 gc.Gvardef(res) 149 } 150 gc.Agen(res, &dst) 151 } else { 152 if res.Op == gc.ONAME { 153 gc.Gvardef(res) 154 } 155 gc.Agenr(res, &dst, res) 156 gc.Agenr(n, &src, nil) 157 } 158 159 var tmp gc.Node 160 gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil) 161 162 // set up end marker 163 var nend gc.Node 164 165 if c >= 4 { 166 gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil) 167 168 p := gins(arm.AMOVW, &src, &nend) 169 p.From.Type = obj.TYPE_ADDR 170 if dir < 0 { 171 p.From.Offset = int64(dir) 172 } else { 173 p.From.Offset = w 174 } 175 } 176 177 // move src and dest to the end of block if necessary 178 if dir < 0 { 179 p := gins(arm.AMOVW, &src, &src) 180 p.From.Type = obj.TYPE_ADDR 181 p.From.Offset = w + int64(dir) 182 183 p = gins(arm.AMOVW, &dst, &dst) 184 p.From.Type = obj.TYPE_ADDR 185 p.From.Offset = w + int64(dir) 186 } 187 188 // move 189 if c >= 4 { 190 p := gins(op, &src, &tmp) 191 p.From.Type = obj.TYPE_MEM 192 p.From.Offset = int64(dir) 193 p.Scond |= arm.C_PBIT 194 ploop := p 195 196 p = gins(op, &tmp, &dst) 197 p.To.Type = obj.TYPE_MEM 198 p.To.Offset = int64(dir) 199 p.Scond |= arm.C_PBIT 200 201 p = gins(arm.ACMP, &src, nil) 202 raddr(&nend, p) 203 204 gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop) 205 gc.Regfree(&nend) 206 } else { 207 var p *obj.Prog 208 for ; c > 0; c-- { 209 p = gins(op, &src, &tmp) 210 p.From.Type = obj.TYPE_MEM 211 p.From.Offset = int64(dir) 212 p.Scond |= arm.C_PBIT 213 214 p = gins(op, &tmp, &dst) 215 p.To.Type = obj.TYPE_MEM 216 p.To.Offset = int64(dir) 217 p.Scond |= arm.C_PBIT 218 } 219 } 220 221 gc.Regfree(&dst) 222 gc.Regfree(&src) 223 gc.Regfree(&tmp) 224 }