rsc.io/go@v0.0.0-20150416155037-e040fd465409/src/cmd/5g/cgen.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package main 6 7 import ( 8 "cmd/internal/gc" 9 "cmd/internal/obj" 10 "cmd/internal/obj/arm" 11 ) 12 13 /* 14 * generate array index into res. 15 * n might be any size; res is 32-bit. 16 * returns Prog* to patch to panic call. 17 */ 18 func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { 19 if !gc.Is64(n.Type) { 20 gc.Cgen(n, res) 21 return nil 22 } 23 24 var tmp gc.Node 25 gc.Tempname(&tmp, gc.Types[gc.TINT64]) 26 gc.Cgen(n, &tmp) 27 var lo gc.Node 28 var hi gc.Node 29 split64(&tmp, &lo, &hi) 30 gmove(&lo, res) 31 if bounded { 32 splitclean() 33 return nil 34 } 35 36 var n1 gc.Node 37 gc.Regalloc(&n1, gc.Types[gc.TINT32], nil) 38 var n2 gc.Node 39 gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) 40 var zero gc.Node 41 gc.Nodconst(&zero, gc.Types[gc.TINT32], 0) 42 gmove(&hi, &n1) 43 gmove(&zero, &n2) 44 gins(arm.ACMP, &n1, &n2) 45 gc.Regfree(&n2) 46 gc.Regfree(&n1) 47 splitclean() 48 return gc.Gbranch(arm.ABNE, nil, -1) 49 } 50 51 func igenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog { 52 gc.Tempname(res, n.Type) 53 return cgenindex(n, res, bounded) 54 } 55 56 func gencmp0(n *gc.Node, t *gc.Type, o int, likely int, to *obj.Prog) { 57 var n1 gc.Node 58 59 gc.Regalloc(&n1, t, nil) 60 gc.Cgen(n, &n1) 61 a := optoas(gc.OCMP, t) 62 if a != arm.ACMP { 63 var n2 gc.Node 64 gc.Nodconst(&n2, t, 0) 65 var n3 gc.Node 66 gc.Regalloc(&n3, t, nil) 67 gmove(&n2, &n3) 68 gins(a, &n1, &n3) 69 gc.Regfree(&n3) 70 } else { 71 gins(arm.ATST, &n1, nil) 72 } 73 a = optoas(o, t) 74 gc.Patch(gc.Gbranch(a, t, likely), to) 75 gc.Regfree(&n1) 76 } 77 78 func stackcopy(n, res *gc.Node, osrc, odst, w int64) { 79 // determine alignment. 80 // want to avoid unaligned access, so have to use 81 // smaller operations for less aligned types. 82 // for example moving [4]byte must use 4 MOVB not 1 MOVW. 83 align := int(n.Type.Align) 84 85 var op int 86 switch align { 87 default: 88 gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0)) 89 90 case 1: 91 op = arm.AMOVB 92 93 case 2: 94 op = arm.AMOVH 95 96 case 4: 97 op = arm.AMOVW 98 } 99 100 if w%int64(align) != 0 { 101 gc.Fatal("sgen: unaligned size %d (align=%d) for %v", w, align, gc.Tconv(n.Type, 0)) 102 } 103 c := int32(w / int64(align)) 104 105 if osrc%int64(align) != 0 || odst%int64(align) != 0 { 106 gc.Fatal("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align) 107 } 108 109 // if we are copying forward on the stack and 110 // the src and dst overlap, then reverse direction 111 dir := align 112 if osrc < odst && int64(odst) < int64(osrc)+w { 113 dir = -dir 114 } 115 116 if op == arm.AMOVW && !gc.Nacl && dir > 0 && c >= 4 && c <= 128 { 117 var r0 gc.Node 118 r0.Op = gc.OREGISTER 119 r0.Reg = arm.REG_R0 120 var r1 gc.Node 121 r1.Op = gc.OREGISTER 122 r1.Reg = arm.REG_R0 + 1 123 var r2 gc.Node 124 r2.Op = gc.OREGISTER 125 r2.Reg = arm.REG_R0 + 2 126 127 var src gc.Node 128 gc.Regalloc(&src, gc.Types[gc.Tptr], &r1) 129 var dst gc.Node 130 gc.Regalloc(&dst, gc.Types[gc.Tptr], &r2) 131 if n.Ullman >= res.Ullman { 132 // eval n first 133 gc.Agen(n, &src) 134 135 if res.Op == gc.ONAME { 136 gc.Gvardef(res) 137 } 138 gc.Agen(res, &dst) 139 } else { 140 // eval res first 141 if res.Op == gc.ONAME { 142 gc.Gvardef(res) 143 } 144 gc.Agen(res, &dst) 145 gc.Agen(n, &src) 146 } 147 148 var tmp gc.Node 149 gc.Regalloc(&tmp, gc.Types[gc.Tptr], &r0) 150 f := gc.Sysfunc("duffcopy") 151 p := gins(obj.ADUFFCOPY, nil, f) 152 gc.Afunclit(&p.To, f) 153 154 // 8 and 128 = magic constants: see ../../runtime/asm_arm.s 155 p.To.Offset = 8 * (128 - int64(c)) 156 157 gc.Regfree(&tmp) 158 gc.Regfree(&src) 159 gc.Regfree(&dst) 160 return 161 } 162 163 var dst gc.Node 164 var src gc.Node 165 if n.Ullman >= res.Ullman { 166 gc.Agenr(n, &dst, res) // temporarily use dst 167 gc.Regalloc(&src, gc.Types[gc.Tptr], nil) 168 gins(arm.AMOVW, &dst, &src) 169 if res.Op == gc.ONAME { 170 gc.Gvardef(res) 171 } 172 gc.Agen(res, &dst) 173 } else { 174 if res.Op == gc.ONAME { 175 gc.Gvardef(res) 176 } 177 gc.Agenr(res, &dst, res) 178 gc.Agenr(n, &src, nil) 179 } 180 181 var tmp gc.Node 182 gc.Regalloc(&tmp, gc.Types[gc.TUINT32], nil) 183 184 // set up end marker 185 var nend gc.Node 186 187 if c >= 4 { 188 gc.Regalloc(&nend, gc.Types[gc.TUINT32], nil) 189 190 p := gins(arm.AMOVW, &src, &nend) 191 p.From.Type = obj.TYPE_ADDR 192 if dir < 0 { 193 p.From.Offset = int64(dir) 194 } else { 195 p.From.Offset = w 196 } 197 } 198 199 // move src and dest to the end of block if necessary 200 if dir < 0 { 201 p := gins(arm.AMOVW, &src, &src) 202 p.From.Type = obj.TYPE_ADDR 203 p.From.Offset = w + int64(dir) 204 205 p = gins(arm.AMOVW, &dst, &dst) 206 p.From.Type = obj.TYPE_ADDR 207 p.From.Offset = w + int64(dir) 208 } 209 210 // move 211 if c >= 4 { 212 p := gins(op, &src, &tmp) 213 p.From.Type = obj.TYPE_MEM 214 p.From.Offset = int64(dir) 215 p.Scond |= arm.C_PBIT 216 ploop := p 217 218 p = gins(op, &tmp, &dst) 219 p.To.Type = obj.TYPE_MEM 220 p.To.Offset = int64(dir) 221 p.Scond |= arm.C_PBIT 222 223 p = gins(arm.ACMP, &src, nil) 224 raddr(&nend, p) 225 226 gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), ploop) 227 gc.Regfree(&nend) 228 } else { 229 var p *obj.Prog 230 for { 231 tmp14 := c 232 c-- 233 if tmp14 <= 0 { 234 break 235 } 236 p = gins(op, &src, &tmp) 237 p.From.Type = obj.TYPE_MEM 238 p.From.Offset = int64(dir) 239 p.Scond |= arm.C_PBIT 240 241 p = gins(op, &tmp, &dst) 242 p.To.Type = obj.TYPE_MEM 243 p.To.Offset = int64(dir) 244 p.Scond |= arm.C_PBIT 245 } 246 } 247 248 gc.Regfree(&dst) 249 gc.Regfree(&src) 250 gc.Regfree(&tmp) 251 }