github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/cmd/compile/internal/arm/ggen.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package arm 6 7 import ( 8 "cmd/compile/internal/gc" 9 "cmd/internal/obj" 10 "cmd/internal/obj/arm" 11 ) 12 13 func defframe(ptxt *obj.Prog) { 14 var n *gc.Node 15 16 // fill in argument size, stack size 17 ptxt.To.Type = obj.TYPE_TEXTSIZE 18 19 ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr))) 20 frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg))) 21 ptxt.To.Offset = int64(frame) 22 23 // insert code to contain ambiguously live variables 24 // so that garbage collector only sees initialized values 25 // when it looks for pointers. 26 p := ptxt 27 28 hi := int64(0) 29 lo := hi 30 r0 := uint32(0) 31 for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next { 32 n = l.N 33 if !n.Name.Needzero { 34 continue 35 } 36 if n.Class != gc.PAUTO { 37 gc.Fatalf("needzero class %d", n.Class) 38 } 39 if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 { 40 gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset)) 41 } 42 if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthptr) { 43 // merge with range we already have 44 lo = gc.Rnd(n.Xoffset, int64(gc.Widthptr)) 45 46 continue 47 } 48 49 // zero old range 50 p = zerorange(p, int64(frame), lo, hi, &r0) 51 52 // set new range 53 hi = n.Xoffset + n.Type.Width 54 55 lo = n.Xoffset 56 } 57 58 // zero final range 59 zerorange(p, int64(frame), lo, hi, &r0) 60 } 61 62 func zerorange(p *obj.Prog, frame int64, lo int64, hi int64, r0 *uint32) *obj.Prog { 63 cnt := hi - lo 64 if cnt == 0 { 65 return p 66 } 67 if *r0 == 0 { 68 p = appendpp(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0) 69 *r0 = 1 70 } 71 72 if cnt < int64(4*gc.Widthptr) { 73 for i := int64(0); i < cnt; i += int64(gc.Widthptr) { 74 p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, int32(4+frame+lo+i)) 75 } 76 } else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) { 77 p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0) 78 p.Reg = arm.REGSP 79 p = appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0) 80 f := gc.Sysfunc("duffzero") 81 gc.Naddr(&p.To, f) 82 gc.Afunclit(&p.To, f) 83 p.To.Offset = 4 * (128 - cnt/int64(gc.Widthptr)) 84 } else { 85 p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(4+frame+lo), obj.TYPE_REG, arm.REG_R1, 0) 86 p.Reg = arm.REGSP 87 p = appendpp(p, arm.AADD, obj.TYPE_CONST, 0, int32(cnt), obj.TYPE_REG, arm.REG_R2, 0) 88 p.Reg = arm.REG_R1 89 p = appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4) 90 p1 := p 91 p.Scond |= arm.C_PBIT 92 p = appendpp(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0) 93 p.Reg = arm.REG_R2 94 p = appendpp(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0) 95 gc.Patch(p, p1) 96 } 97 98 return p 99 } 100 101 func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog { 102 q := gc.Ctxt.NewProg() 103 gc.Clearp(q) 104 q.As = int16(as) 105 q.Lineno = p.Lineno 106 q.From.Type = int16(ftype) 107 q.From.Reg = int16(freg) 108 q.From.Offset = int64(foffset) 109 q.To.Type = int16(ttype) 110 q.To.Reg = int16(treg) 111 q.To.Offset = int64(toffset) 112 q.Link = p.Link 113 p.Link = q 114 return q 115 } 116 117 /* 118 * generate high multiply 119 * res = (nl * nr) >> wordsize 120 */ 121 func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) { 122 if nl.Ullman < nr.Ullman { 123 nl, nr = nr, nl 124 } 125 126 t := nl.Type 127 w := int(t.Width * 8) 128 var n1 gc.Node 129 gc.Regalloc(&n1, t, res) 130 gc.Cgen(nl, &n1) 131 var n2 gc.Node 132 gc.Regalloc(&n2, t, nil) 133 gc.Cgen(nr, &n2) 134 switch gc.Simtype[t.Etype] { 135 case gc.TINT8, 136 gc.TINT16: 137 gins(optoas(gc.OMUL, t), &n2, &n1) 138 gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) 139 140 case gc.TUINT8, 141 gc.TUINT16: 142 gins(optoas(gc.OMUL, t), &n2, &n1) 143 gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(w), &n1) 144 145 // perform a long multiplication. 146 case gc.TINT32, 147 gc.TUINT32: 148 var p *obj.Prog 149 if gc.Issigned[t.Etype] { 150 p = gins(arm.AMULL, &n2, nil) 151 } else { 152 p = gins(arm.AMULLU, &n2, nil) 153 } 154 155 // n2 * n1 -> (n1 n2) 156 p.Reg = n1.Reg 157 158 p.To.Type = obj.TYPE_REGREG 159 p.To.Reg = n1.Reg 160 p.To.Offset = int64(n2.Reg) 161 162 default: 163 gc.Fatalf("cgen_hmul %v", t) 164 } 165 166 gc.Cgen(&n1, res) 167 gc.Regfree(&n1) 168 gc.Regfree(&n2) 169 } 170 171 /* 172 * generate shift according to op, one of: 173 * res = nl << nr 174 * res = nl >> nr 175 */ 176 func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) { 177 if nl.Type.Width > 4 { 178 gc.Fatalf("cgen_shift %v", nl.Type) 179 } 180 181 w := int(nl.Type.Width * 8) 182 183 if op == gc.OLROT { 184 v := nr.Int() 185 var n1 gc.Node 186 gc.Regalloc(&n1, nl.Type, res) 187 if w == 32 { 188 gc.Cgen(nl, &n1) 189 gshift(arm.AMOVW, &n1, arm.SHIFT_RR, int32(w)-int32(v), &n1) 190 } else { 191 var n2 gc.Node 192 gc.Regalloc(&n2, nl.Type, nil) 193 gc.Cgen(nl, &n2) 194 gshift(arm.AMOVW, &n2, arm.SHIFT_LL, int32(v), &n1) 195 gshift(arm.AORR, &n2, arm.SHIFT_LR, int32(w)-int32(v), &n1) 196 gc.Regfree(&n2) 197 198 // Ensure sign/zero-extended result. 199 gins(optoas(gc.OAS, nl.Type), &n1, &n1) 200 } 201 202 gmove(&n1, res) 203 gc.Regfree(&n1) 204 return 205 } 206 207 if nr.Op == gc.OLITERAL { 208 var n1 gc.Node 209 gc.Regalloc(&n1, nl.Type, res) 210 gc.Cgen(nl, &n1) 211 sc := uint64(nr.Int()) 212 if sc == 0 { 213 } else // nothing to do 214 if sc >= uint64(nl.Type.Width*8) { 215 if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { 216 gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(w), &n1) 217 } else { 218 gins(arm.AEOR, &n1, &n1) 219 } 220 } else { 221 if op == gc.ORSH && gc.Issigned[nl.Type.Etype] { 222 gshift(arm.AMOVW, &n1, arm.SHIFT_AR, int32(sc), &n1) 223 } else if op == gc.ORSH { 224 gshift(arm.AMOVW, &n1, arm.SHIFT_LR, int32(sc), &n1) // OLSH 225 } else { 226 gshift(arm.AMOVW, &n1, arm.SHIFT_LL, int32(sc), &n1) 227 } 228 } 229 230 if w < 32 && op == gc.OLSH { 231 gins(optoas(gc.OAS, nl.Type), &n1, &n1) 232 } 233 gmove(&n1, res) 234 gc.Regfree(&n1) 235 return 236 } 237 238 tr := nr.Type 239 var t gc.Node 240 var n1 gc.Node 241 var n2 gc.Node 242 var n3 gc.Node 243 if tr.Width > 4 { 244 var nt gc.Node 245 gc.Tempname(&nt, nr.Type) 246 if nl.Ullman >= nr.Ullman { 247 gc.Regalloc(&n2, nl.Type, res) 248 gc.Cgen(nl, &n2) 249 gc.Cgen(nr, &nt) 250 n1 = nt 251 } else { 252 gc.Cgen(nr, &nt) 253 gc.Regalloc(&n2, nl.Type, res) 254 gc.Cgen(nl, &n2) 255 } 256 257 var hi gc.Node 258 var lo gc.Node 259 split64(&nt, &lo, &hi) 260 gc.Regalloc(&n1, gc.Types[gc.TUINT32], nil) 261 gc.Regalloc(&n3, gc.Types[gc.TUINT32], nil) 262 gmove(&lo, &n1) 263 gmove(&hi, &n3) 264 splitclean() 265 gins(arm.ATST, &n3, nil) 266 gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w)) 267 p1 := gins(arm.AMOVW, &t, &n1) 268 p1.Scond = arm.C_SCOND_NE 269 tr = gc.Types[gc.TUINT32] 270 gc.Regfree(&n3) 271 } else { 272 if nl.Ullman >= nr.Ullman { 273 gc.Regalloc(&n2, nl.Type, res) 274 gc.Cgen(nl, &n2) 275 gc.Regalloc(&n1, nr.Type, nil) 276 gc.Cgen(nr, &n1) 277 } else { 278 gc.Regalloc(&n1, nr.Type, nil) 279 gc.Cgen(nr, &n1) 280 gc.Regalloc(&n2, nl.Type, res) 281 gc.Cgen(nl, &n2) 282 } 283 } 284 285 // test for shift being 0 286 gins(arm.ATST, &n1, nil) 287 288 p3 := gc.Gbranch(arm.ABEQ, nil, -1) 289 290 // test and fix up large shifts 291 // TODO: if(!bounded), don't emit some of this. 292 gc.Regalloc(&n3, tr, nil) 293 294 gc.Nodconst(&t, gc.Types[gc.TUINT32], int64(w)) 295 gmove(&t, &n3) 296 gins(arm.ACMP, &n1, &n3) 297 if op == gc.ORSH { 298 var p1 *obj.Prog 299 var p2 *obj.Prog 300 if gc.Issigned[nl.Type.Etype] { 301 p1 = gshift(arm.AMOVW, &n2, arm.SHIFT_AR, int32(w)-1, &n2) 302 p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_AR, &n1, &n2) 303 } else { 304 p1 = gins(arm.AEOR, &n2, &n2) 305 p2 = gregshift(arm.AMOVW, &n2, arm.SHIFT_LR, &n1, &n2) 306 } 307 308 p1.Scond = arm.C_SCOND_HS 309 p2.Scond = arm.C_SCOND_LO 310 } else { 311 p1 := gins(arm.AEOR, &n2, &n2) 312 p2 := gregshift(arm.AMOVW, &n2, arm.SHIFT_LL, &n1, &n2) 313 p1.Scond = arm.C_SCOND_HS 314 p2.Scond = arm.C_SCOND_LO 315 } 316 317 gc.Regfree(&n3) 318 319 gc.Patch(p3, gc.Pc) 320 321 // Left-shift of smaller word must be sign/zero-extended. 322 if w < 32 && op == gc.OLSH { 323 gins(optoas(gc.OAS, nl.Type), &n2, &n2) 324 } 325 gmove(&n2, res) 326 327 gc.Regfree(&n1) 328 gc.Regfree(&n2) 329 } 330 331 func clearfat(nl *gc.Node) { 332 /* clear a fat object */ 333 if gc.Debug['g'] != 0 { 334 gc.Dump("\nclearfat", nl) 335 } 336 337 w := uint32(nl.Type.Width) 338 339 // Avoid taking the address for simple enough types. 340 if gc.Componentgen(nil, nl) { 341 return 342 } 343 344 c := w % 4 // bytes 345 q := w / 4 // quads 346 347 var r0 gc.Node 348 r0.Op = gc.OREGISTER 349 350 r0.Reg = arm.REG_R0 351 var r1 gc.Node 352 r1.Op = gc.OREGISTER 353 r1.Reg = arm.REG_R1 354 var dst gc.Node 355 gc.Regalloc(&dst, gc.Types[gc.Tptr], &r1) 356 gc.Agen(nl, &dst) 357 var nc gc.Node 358 gc.Nodconst(&nc, gc.Types[gc.TUINT32], 0) 359 var nz gc.Node 360 gc.Regalloc(&nz, gc.Types[gc.TUINT32], &r0) 361 gc.Cgen(&nc, &nz) 362 363 if q > 128 { 364 var end gc.Node 365 gc.Regalloc(&end, gc.Types[gc.Tptr], nil) 366 p := gins(arm.AMOVW, &dst, &end) 367 p.From.Type = obj.TYPE_ADDR 368 p.From.Offset = int64(q) * 4 369 370 p = gins(arm.AMOVW, &nz, &dst) 371 p.To.Type = obj.TYPE_MEM 372 p.To.Offset = 4 373 p.Scond |= arm.C_PBIT 374 pl := p 375 376 p = gins(arm.ACMP, &dst, nil) 377 raddr(&end, p) 378 gc.Patch(gc.Gbranch(arm.ABNE, nil, 0), pl) 379 380 gc.Regfree(&end) 381 } else if q >= 4 && !gc.Nacl { 382 f := gc.Sysfunc("duffzero") 383 p := gins(obj.ADUFFZERO, nil, f) 384 gc.Afunclit(&p.To, f) 385 386 // 4 and 128 = magic constants: see ../../runtime/asm_arm.s 387 p.To.Offset = 4 * (128 - int64(q)) 388 } else { 389 var p *obj.Prog 390 for q > 0 { 391 p = gins(arm.AMOVW, &nz, &dst) 392 p.To.Type = obj.TYPE_MEM 393 p.To.Offset = 4 394 p.Scond |= arm.C_PBIT 395 396 //print("1. %v\n", p); 397 q-- 398 } 399 } 400 401 var p *obj.Prog 402 for c > 0 { 403 p = gins(arm.AMOVB, &nz, &dst) 404 p.To.Type = obj.TYPE_MEM 405 p.To.Offset = 1 406 p.Scond |= arm.C_PBIT 407 408 //print("2. %v\n", p); 409 c-- 410 } 411 412 gc.Regfree(&dst) 413 gc.Regfree(&nz) 414 } 415 416 // Called after regopt and peep have run. 417 // Expand CHECKNIL pseudo-op into actual nil pointer check. 418 func expandchecks(firstp *obj.Prog) { 419 var reg int 420 var p1 *obj.Prog 421 422 for p := firstp; p != nil; p = p.Link { 423 if p.As != obj.ACHECKNIL { 424 continue 425 } 426 if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers 427 gc.Warnl(int(p.Lineno), "generated nil check") 428 } 429 if p.From.Type != obj.TYPE_REG { 430 gc.Fatalf("invalid nil check %v", p) 431 } 432 reg = int(p.From.Reg) 433 434 // check is 435 // CMP arg, $0 436 // MOV.EQ arg, 0(arg) 437 p1 = gc.Ctxt.NewProg() 438 439 gc.Clearp(p1) 440 p1.Link = p.Link 441 p.Link = p1 442 p1.Lineno = p.Lineno 443 p1.Pc = 9999 444 p1.As = arm.AMOVW 445 p1.From.Type = obj.TYPE_REG 446 p1.From.Reg = int16(reg) 447 p1.To.Type = obj.TYPE_MEM 448 p1.To.Reg = int16(reg) 449 p1.To.Offset = 0 450 p1.Scond = arm.C_SCOND_EQ 451 p.As = arm.ACMP 452 p.From.Type = obj.TYPE_CONST 453 p.From.Reg = 0 454 p.From.Offset = 0 455 p.Reg = int16(reg) 456 } 457 } 458 459 func ginsnop() { 460 var r gc.Node 461 gc.Nodreg(&r, gc.Types[gc.TINT], arm.REG_R0) 462 p := gins(arm.AAND, &r, &r) 463 p.Scond = arm.C_SCOND_EQ 464 } 465 466 /* 467 * generate 468 * as $c, n 469 */ 470 func ginscon(as int, c int64, n *gc.Node) { 471 var n1 gc.Node 472 gc.Nodconst(&n1, gc.Types[gc.TINT32], c) 473 var n2 gc.Node 474 gc.Regalloc(&n2, gc.Types[gc.TINT32], nil) 475 gmove(&n1, &n2) 476 gins(as, &n2, n) 477 gc.Regfree(&n2) 478 } 479 480 func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 481 if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n1.Int() == 0 && n2.Op != gc.OLITERAL { 482 op = gc.Brrev(op) 483 n1, n2 = n2, n1 484 } 485 var r1, r2, g1, g2 gc.Node 486 gc.Regalloc(&r1, t, n1) 487 gc.Regalloc(&g1, n1.Type, &r1) 488 gc.Cgen(n1, &g1) 489 gmove(&g1, &r1) 490 if gc.Isint[t.Etype] && n2.Op == gc.OLITERAL && n2.Int() == 0 { 491 gins(arm.ACMP, &r1, n2) 492 } else { 493 gc.Regalloc(&r2, t, n2) 494 gc.Regalloc(&g2, n1.Type, &r2) 495 gc.Cgen(n2, &g2) 496 gmove(&g2, &r2) 497 gins(optoas(gc.OCMP, t), &r1, &r2) 498 gc.Regfree(&g2) 499 gc.Regfree(&r2) 500 } 501 gc.Regfree(&g1) 502 gc.Regfree(&r1) 503 return gc.Gbranch(optoas(op, t), nil, likely) 504 } 505 506 // addr += index*width if possible. 507 func addindex(index *gc.Node, width int64, addr *gc.Node) bool { 508 switch width { 509 case 2: 510 gshift(arm.AADD, index, arm.SHIFT_LL, 1, addr) 511 return true 512 case 4: 513 gshift(arm.AADD, index, arm.SHIFT_LL, 2, addr) 514 return true 515 case 8: 516 gshift(arm.AADD, index, arm.SHIFT_LL, 3, addr) 517 return true 518 } 519 return false 520 } 521 522 // res = runtime.getg() 523 func getg(res *gc.Node) { 524 var n1 gc.Node 525 gc.Nodreg(&n1, res.Type, arm.REGG) 526 gmove(&n1, res) 527 }