github.com/FenixAra/go@v0.0.0-20170127160404-96ea0918e670/src/cmd/internal/obj/x86/obj6.go (about) 1 // Inferno utils/6l/pass.c 2 // https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/pass.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package x86 32 33 import ( 34 "cmd/internal/obj" 35 "cmd/internal/sys" 36 "fmt" 37 "log" 38 "math" 39 "strings" 40 ) 41 42 func CanUse1InsnTLS(ctxt *obj.Link) bool { 43 if isAndroid { 44 // For android, we use a disgusting hack that assumes 45 // the thread-local storage slot for g is allocated 46 // using pthread_key_create with a fixed offset 47 // (see src/runtime/cgo/gcc_android_amd64.c). 48 // This makes access to the TLS storage (for g) doable 49 // with 1 instruction. 50 return true 51 } 52 53 if ctxt.Arch.RegSize == 4 { 54 switch ctxt.Headtype { 55 case obj.Hlinux, 56 obj.Hnacl, 57 obj.Hplan9, 58 obj.Hwindows, 59 obj.Hwindowsgui: 60 return false 61 } 62 63 return true 64 } 65 66 switch ctxt.Headtype { 67 case obj.Hplan9, obj.Hwindows, obj.Hwindowsgui: 68 return false 69 case obj.Hlinux: 70 return !ctxt.Flag_shared 71 } 72 73 return true 74 } 75 76 func progedit(ctxt *obj.Link, p *obj.Prog) { 77 // Maintain information about code generation mode. 78 if ctxt.Mode == 0 { 79 ctxt.Mode = ctxt.Arch.RegSize * 8 80 } 81 p.Mode = int8(ctxt.Mode) 82 83 switch p.As { 84 case AMODE: 85 if p.From.Type == obj.TYPE_CONST || (p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_NONE) { 86 switch int(p.From.Offset) { 87 case 16, 32, 64: 88 ctxt.Mode = int(p.From.Offset) 89 } 90 } 91 obj.Nopout(p) 92 } 93 94 // Thread-local storage references use the TLS pseudo-register. 95 // As a register, TLS refers to the thread-local storage base, and it 96 // can only be loaded into another register: 97 // 98 // MOVQ TLS, AX 99 // 100 // An offset from the thread-local storage base is written off(reg)(TLS*1). 101 // Semantically it is off(reg), but the (TLS*1) annotation marks this as 102 // indexing from the loaded TLS base. This emits a relocation so that 103 // if the linker needs to adjust the offset, it can. For example: 104 // 105 // MOVQ TLS, AX 106 // MOVQ 0(AX)(TLS*1), CX // load g into CX 107 // 108 // On systems that support direct access to the TLS memory, this 109 // pair of instructions can be reduced to a direct TLS memory reference: 110 // 111 // MOVQ 0(TLS), CX // load g into CX 112 // 113 // The 2-instruction and 1-instruction forms correspond to the two code 114 // sequences for loading a TLS variable in the local exec model given in "ELF 115 // Handling For Thread-Local Storage". 116 // 117 // We apply this rewrite on systems that support the 1-instruction form. 118 // The decision is made using only the operating system and the -shared flag, 119 // not the link mode. If some link modes on a particular operating system 120 // require the 2-instruction form, then all builds for that operating system 121 // will use the 2-instruction form, so that the link mode decision can be 122 // delayed to link time. 123 // 124 // In this way, all supported systems use identical instructions to 125 // access TLS, and they are rewritten appropriately first here in 126 // liblink and then finally using relocations in the linker. 127 // 128 // When -shared is passed, we leave the code in the 2-instruction form but 129 // assemble (and relocate) them in different ways to generate the initial 130 // exec code sequence. It's a bit of a fluke that this is possible without 131 // rewriting the instructions more comprehensively, and it only does because 132 // we only support a single TLS variable (g). 133 134 if CanUse1InsnTLS(ctxt) { 135 // Reduce 2-instruction sequence to 1-instruction sequence. 136 // Sequences like 137 // MOVQ TLS, BX 138 // ... off(BX)(TLS*1) ... 139 // become 140 // NOP 141 // ... off(TLS) ... 142 // 143 // TODO(rsc): Remove the Hsolaris special case. It exists only to 144 // guarantee we are producing byte-identical binaries as before this code. 145 // But it should be unnecessary. 146 if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris { 147 obj.Nopout(p) 148 } 149 if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 { 150 p.From.Reg = REG_TLS 151 p.From.Scale = 0 152 p.From.Index = REG_NONE 153 } 154 155 if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { 156 p.To.Reg = REG_TLS 157 p.To.Scale = 0 158 p.To.Index = REG_NONE 159 } 160 } else { 161 // load_g_cx, below, always inserts the 1-instruction sequence. Rewrite it 162 // as the 2-instruction sequence if necessary. 163 // MOVQ 0(TLS), BX 164 // becomes 165 // MOVQ TLS, BX 166 // MOVQ 0(BX)(TLS*1), BX 167 if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { 168 q := obj.Appendp(ctxt, p) 169 q.As = p.As 170 q.From = p.From 171 q.From.Type = obj.TYPE_MEM 172 q.From.Reg = p.To.Reg 173 q.From.Index = REG_TLS 174 q.From.Scale = 2 // TODO: use 1 175 q.To = p.To 176 p.From.Type = obj.TYPE_REG 177 p.From.Reg = REG_TLS 178 p.From.Index = REG_NONE 179 p.From.Offset = 0 180 } 181 } 182 183 // TODO: Remove. 184 if (ctxt.Headtype == obj.Hwindows || ctxt.Headtype == obj.Hwindowsgui) && p.Mode == 64 || ctxt.Headtype == obj.Hplan9 { 185 if p.From.Scale == 1 && p.From.Index == REG_TLS { 186 p.From.Scale = 2 187 } 188 if p.To.Scale == 1 && p.To.Index == REG_TLS { 189 p.To.Scale = 2 190 } 191 } 192 193 // Rewrite 0 to $0 in 3rd argument to CMPPS etc. 194 // That's what the tables expect. 195 switch p.As { 196 case ACMPPD, ACMPPS, ACMPSD, ACMPSS: 197 if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE && p.To.Reg == REG_NONE && p.To.Index == REG_NONE && p.To.Sym == nil { 198 p.To.Type = obj.TYPE_CONST 199 } 200 } 201 202 // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH. 203 switch p.As { 204 case obj.ACALL, obj.AJMP, obj.ARET: 205 if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { 206 p.To.Type = obj.TYPE_BRANCH 207 } 208 } 209 210 // Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ. 211 if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Family == sys.AMD64 || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) { 212 switch p.As { 213 case AMOVL: 214 p.As = ALEAL 215 p.From.Type = obj.TYPE_MEM 216 case AMOVQ: 217 p.As = ALEAQ 218 p.From.Type = obj.TYPE_MEM 219 } 220 } 221 222 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 223 if p.From3 != nil { 224 nacladdr(ctxt, p, p.From3) 225 } 226 nacladdr(ctxt, p, &p.From) 227 nacladdr(ctxt, p, &p.To) 228 } 229 230 // Rewrite float constants to values stored in memory. 231 switch p.As { 232 // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx 233 case AMOVSS: 234 if p.From.Type == obj.TYPE_FCONST { 235 // f == 0 can't be used here due to -0, so use Float64bits 236 if f := p.From.Val.(float64); math.Float64bits(f) == 0 { 237 if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { 238 p.As = AXORPS 239 p.From = p.To 240 break 241 } 242 } 243 } 244 fallthrough 245 246 case AFMOVF, 247 AFADDF, 248 AFSUBF, 249 AFSUBRF, 250 AFMULF, 251 AFDIVF, 252 AFDIVRF, 253 AFCOMF, 254 AFCOMFP, 255 AADDSS, 256 ASUBSS, 257 AMULSS, 258 ADIVSS, 259 ACOMISS, 260 AUCOMISS: 261 if p.From.Type == obj.TYPE_FCONST { 262 f32 := float32(p.From.Val.(float64)) 263 i32 := math.Float32bits(f32) 264 literal := fmt.Sprintf("$f32.%08x", i32) 265 s := obj.Linklookup(ctxt, literal, 0) 266 p.From.Type = obj.TYPE_MEM 267 p.From.Name = obj.NAME_EXTERN 268 p.From.Sym = s 269 p.From.Sym.Set(obj.AttrLocal, true) 270 p.From.Offset = 0 271 } 272 273 case AMOVSD: 274 // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx 275 if p.From.Type == obj.TYPE_FCONST { 276 // f == 0 can't be used here due to -0, so use Float64bits 277 if f := p.From.Val.(float64); math.Float64bits(f) == 0 { 278 if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { 279 p.As = AXORPS 280 p.From = p.To 281 break 282 } 283 } 284 } 285 fallthrough 286 287 case AFMOVD, 288 AFADDD, 289 AFSUBD, 290 AFSUBRD, 291 AFMULD, 292 AFDIVD, 293 AFDIVRD, 294 AFCOMD, 295 AFCOMDP, 296 AADDSD, 297 ASUBSD, 298 AMULSD, 299 ADIVSD, 300 ACOMISD, 301 AUCOMISD: 302 if p.From.Type == obj.TYPE_FCONST { 303 i64 := math.Float64bits(p.From.Val.(float64)) 304 literal := fmt.Sprintf("$f64.%016x", i64) 305 s := obj.Linklookup(ctxt, literal, 0) 306 p.From.Type = obj.TYPE_MEM 307 p.From.Name = obj.NAME_EXTERN 308 p.From.Sym = s 309 p.From.Sym.Set(obj.AttrLocal, true) 310 p.From.Offset = 0 311 } 312 } 313 314 if ctxt.Flag_dynlink { 315 rewriteToUseGot(ctxt, p) 316 } 317 318 if ctxt.Flag_shared && p.Mode == 32 { 319 rewriteToPcrel(ctxt, p) 320 } 321 } 322 323 // Rewrite p, if necessary, to access global data via the global offset table. 324 func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) { 325 var add, lea, mov obj.As 326 var reg int16 327 if p.Mode == 64 { 328 add = AADDQ 329 lea = ALEAQ 330 mov = AMOVQ 331 reg = REG_R15 332 } else { 333 add = AADDL 334 lea = ALEAL 335 mov = AMOVL 336 reg = REG_CX 337 if p.As == ALEAL && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { 338 // Special case: clobber the destination register with 339 // the PC so we don't have to clobber CX. 340 // The SSA backend depends on CX not being clobbered across LEAL. 341 // See cmd/compile/internal/ssa/gen/386.rules (search for Flag_shared). 342 reg = p.To.Reg 343 } 344 } 345 346 if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { 347 // ADUFFxxx $offset 348 // becomes 349 // $MOV runtime.duffxxx@GOT, $reg 350 // $ADD $offset, $reg 351 // CALL $reg 352 var sym *obj.LSym 353 if p.As == obj.ADUFFZERO { 354 sym = obj.Linklookup(ctxt, "runtime.duffzero", 0) 355 } else { 356 sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0) 357 } 358 offset := p.To.Offset 359 p.As = mov 360 p.From.Type = obj.TYPE_MEM 361 p.From.Name = obj.NAME_GOTREF 362 p.From.Sym = sym 363 p.To.Type = obj.TYPE_REG 364 p.To.Reg = reg 365 p.To.Offset = 0 366 p.To.Sym = nil 367 p1 := obj.Appendp(ctxt, p) 368 p1.As = add 369 p1.From.Type = obj.TYPE_CONST 370 p1.From.Offset = offset 371 p1.To.Type = obj.TYPE_REG 372 p1.To.Reg = reg 373 p2 := obj.Appendp(ctxt, p1) 374 p2.As = obj.ACALL 375 p2.To.Type = obj.TYPE_REG 376 p2.To.Reg = reg 377 } 378 379 // We only care about global data: NAME_EXTERN means a global 380 // symbol in the Go sense, and p.Sym.Local is true for a few 381 // internally defined symbols. 382 if p.As == lea && p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 383 // $LEA sym, Rx becomes $MOV $sym, Rx which will be rewritten below 384 p.As = mov 385 p.From.Type = obj.TYPE_ADDR 386 } 387 if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 388 // $MOV $sym, Rx becomes $MOV sym@GOT, Rx 389 // $MOV $sym+<off>, Rx becomes $MOV sym@GOT, Rx; $LEA <off>(Rx), Rx 390 // On 386 only, more complicated things like PUSHL $sym become $MOV sym@GOT, CX; PUSHL CX 391 cmplxdest := false 392 pAs := p.As 393 var dest obj.Addr 394 if p.To.Type != obj.TYPE_REG || pAs != mov { 395 if p.Mode == 64 { 396 ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) 397 } 398 cmplxdest = true 399 dest = p.To 400 p.As = mov 401 p.To.Type = obj.TYPE_REG 402 p.To.Reg = reg 403 p.To.Sym = nil 404 p.To.Name = obj.NAME_NONE 405 } 406 p.From.Type = obj.TYPE_MEM 407 p.From.Name = obj.NAME_GOTREF 408 q := p 409 if p.From.Offset != 0 { 410 q = obj.Appendp(ctxt, p) 411 q.As = lea 412 q.From.Type = obj.TYPE_MEM 413 q.From.Reg = p.To.Reg 414 q.From.Offset = p.From.Offset 415 q.To = p.To 416 p.From.Offset = 0 417 } 418 if cmplxdest { 419 q = obj.Appendp(ctxt, q) 420 q.As = pAs 421 q.To = dest 422 q.From.Type = obj.TYPE_REG 423 q.From.Reg = reg 424 } 425 } 426 if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { 427 ctxt.Diag("don't know how to handle %v with -dynlink", p) 428 } 429 var source *obj.Addr 430 // MOVx sym, Ry becomes $MOV sym@GOT, R15; MOVx (R15), Ry 431 // MOVx Ry, sym becomes $MOV sym@GOT, R15; MOVx Ry, (R15) 432 // An addition may be inserted between the two MOVs if there is an offset. 433 if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 434 if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { 435 ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) 436 } 437 source = &p.From 438 } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { 439 source = &p.To 440 } else { 441 return 442 } 443 if p.As == obj.ACALL { 444 // When dynlinking on 386, almost any call might end up being a call 445 // to a PLT, so make sure the GOT pointer is loaded into BX. 446 // RegTo2 is set on the replacement call insn to stop it being 447 // processed when it is in turn passed to progedit. 448 if p.Mode == 64 || (p.To.Sym != nil && p.To.Sym.Local()) || p.RegTo2 != 0 { 449 return 450 } 451 p1 := obj.Appendp(ctxt, p) 452 p2 := obj.Appendp(ctxt, p1) 453 454 p1.As = ALEAL 455 p1.From.Type = obj.TYPE_MEM 456 p1.From.Name = obj.NAME_STATIC 457 p1.From.Sym = obj.Linklookup(ctxt, "_GLOBAL_OFFSET_TABLE_", 0) 458 p1.To.Type = obj.TYPE_REG 459 p1.To.Reg = REG_BX 460 461 p2.As = p.As 462 p2.Scond = p.Scond 463 p2.From = p.From 464 p2.From3 = p.From3 465 p2.Reg = p.Reg 466 p2.To = p.To 467 // p.To.Type was set to TYPE_BRANCH above, but that makes checkaddr 468 // in ../pass.go complain, so set it back to TYPE_MEM here, until p2 469 // itself gets passed to progedit. 470 p2.To.Type = obj.TYPE_MEM 471 p2.RegTo2 = 1 472 473 obj.Nopout(p) 474 return 475 476 } 477 if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP { 478 return 479 } 480 if source.Type != obj.TYPE_MEM { 481 ctxt.Diag("don't know how to handle %v with -dynlink", p) 482 } 483 p1 := obj.Appendp(ctxt, p) 484 p2 := obj.Appendp(ctxt, p1) 485 486 p1.As = mov 487 p1.From.Type = obj.TYPE_MEM 488 p1.From.Sym = source.Sym 489 p1.From.Name = obj.NAME_GOTREF 490 p1.To.Type = obj.TYPE_REG 491 p1.To.Reg = reg 492 493 p2.As = p.As 494 p2.From = p.From 495 p2.To = p.To 496 if p.From.Name == obj.NAME_EXTERN { 497 p2.From.Reg = reg 498 p2.From.Name = obj.NAME_NONE 499 p2.From.Sym = nil 500 } else if p.To.Name == obj.NAME_EXTERN { 501 p2.To.Reg = reg 502 p2.To.Name = obj.NAME_NONE 503 p2.To.Sym = nil 504 } else { 505 return 506 } 507 obj.Nopout(p) 508 } 509 510 func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) { 511 // RegTo2 is set on the instructions we insert here so they don't get 512 // processed twice. 513 if p.RegTo2 != 0 { 514 return 515 } 516 if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { 517 return 518 } 519 // Any Prog (aside from the above special cases) with an Addr with Name == 520 // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.XX 521 // inserted before it. 522 isName := func(a *obj.Addr) bool { 523 if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 { 524 return false 525 } 526 if a.Sym.Type == obj.STLSBSS { 527 return false 528 } 529 return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF 530 } 531 532 if isName(&p.From) && p.From.Type == obj.TYPE_ADDR { 533 // Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting 534 // to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX" 535 // respectively. 536 if p.To.Type != obj.TYPE_REG { 537 q := obj.Appendp(ctxt, p) 538 q.As = p.As 539 q.From.Type = obj.TYPE_REG 540 q.From.Reg = REG_CX 541 q.To = p.To 542 p.As = AMOVL 543 p.To.Type = obj.TYPE_REG 544 p.To.Reg = REG_CX 545 p.To.Sym = nil 546 p.To.Name = obj.NAME_NONE 547 } 548 } 549 550 if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) { 551 return 552 } 553 var dst int16 = REG_CX 554 if (p.As == ALEAL || p.As == AMOVL) && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { 555 dst = p.To.Reg 556 // Why? See the comment near the top of rewriteToUseGot above. 557 // AMOVLs might be introduced by the GOT rewrites. 558 } 559 q := obj.Appendp(ctxt, p) 560 q.RegTo2 = 1 561 r := obj.Appendp(ctxt, q) 562 r.RegTo2 = 1 563 q.As = obj.ACALL 564 q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk."+strings.ToLower(Rconv(int(dst))), 0) 565 q.To.Type = obj.TYPE_MEM 566 q.To.Name = obj.NAME_EXTERN 567 q.To.Sym.Set(obj.AttrLocal, true) 568 r.As = p.As 569 r.Scond = p.Scond 570 r.From = p.From 571 r.From3 = p.From3 572 r.Reg = p.Reg 573 r.To = p.To 574 if isName(&p.From) { 575 r.From.Reg = dst 576 } 577 if isName(&p.To) { 578 r.To.Reg = dst 579 } 580 if p.From3 != nil && isName(p.From3) { 581 r.From3.Reg = dst 582 } 583 obj.Nopout(p) 584 } 585 586 func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { 587 if p.As == ALEAL || p.As == ALEAQ { 588 return 589 } 590 591 if a.Reg == REG_BP { 592 ctxt.Diag("invalid address: %v", p) 593 return 594 } 595 596 if a.Reg == REG_TLS { 597 a.Reg = REG_BP 598 } 599 if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE { 600 switch a.Reg { 601 // all ok 602 case REG_BP, REG_SP, REG_R15: 603 break 604 605 default: 606 if a.Index != REG_NONE { 607 ctxt.Diag("invalid address %v", p) 608 } 609 a.Index = a.Reg 610 if a.Index != REG_NONE { 611 a.Scale = 1 612 } 613 a.Reg = REG_R15 614 } 615 } 616 } 617 618 func preprocess(ctxt *obj.Link, cursym *obj.LSym) { 619 if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil { 620 ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0) 621 } 622 623 ctxt.Cursym = cursym 624 625 if cursym.Text == nil || cursym.Text.Link == nil { 626 return 627 } 628 629 p := cursym.Text 630 autoffset := int32(p.To.Offset) 631 if autoffset < 0 { 632 autoffset = 0 633 } 634 635 hasCall := false 636 for q := p; q != nil; q = q.Link { 637 if q.As == obj.ACALL || q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO { 638 hasCall = true 639 break 640 } 641 } 642 643 var bpsize int 644 if p.Mode == 64 && ctxt.Framepointer_enabled && 645 p.From3.Offset&obj.NOFRAME == 0 && // (1) below 646 !(autoffset == 0 && p.From3.Offset&obj.NOSPLIT != 0) && // (2) below 647 !(autoffset == 0 && !hasCall) { // (3) below 648 // Make room to save a base pointer. 649 // There are 2 cases we must avoid: 650 // 1) If noframe is set (which we do for functions which tail call). 651 // 2) Scary runtime internals which would be all messed up by frame pointers. 652 // We detect these using a heuristic: frameless nosplit functions. 653 // TODO: Maybe someday we label them all with NOFRAME and get rid of this heuristic. 654 // For performance, we also want to avoid: 655 // 3) Frameless leaf functions 656 bpsize = ctxt.Arch.PtrSize 657 autoffset += int32(bpsize) 658 p.To.Offset += int64(bpsize) 659 } else { 660 bpsize = 0 661 } 662 663 textarg := int64(p.To.Val.(int32)) 664 cursym.Args = int32(textarg) 665 cursym.Locals = int32(p.To.Offset) 666 667 // TODO(rsc): Remove. 668 if p.Mode == 32 && cursym.Locals < 0 { 669 cursym.Locals = 0 670 } 671 672 // TODO(rsc): Remove 'p.Mode == 64 &&'. 673 if p.Mode == 64 && autoffset < obj.StackSmall && p.From3Offset()&obj.NOSPLIT == 0 { 674 leaf := true 675 LeafSearch: 676 for q := p; q != nil; q = q.Link { 677 switch q.As { 678 case obj.ACALL: 679 // Treat common runtime calls that take no arguments 680 // the same as duffcopy and duffzero. 681 if !isZeroArgRuntimeCall(q.To.Sym) { 682 leaf = false 683 break LeafSearch 684 } 685 fallthrough 686 case obj.ADUFFCOPY, obj.ADUFFZERO: 687 if autoffset >= obj.StackSmall-8 { 688 leaf = false 689 break LeafSearch 690 } 691 } 692 } 693 694 if leaf { 695 p.From3.Offset |= obj.NOSPLIT 696 } 697 } 698 699 if p.From3Offset()&obj.NOSPLIT == 0 || p.From3Offset()&obj.WRAPPER != 0 { 700 p = obj.Appendp(ctxt, p) 701 p = load_g_cx(ctxt, p) // load g into CX 702 } 703 704 if cursym.Text.From3Offset()&obj.NOSPLIT == 0 { 705 p = stacksplit(ctxt, p, autoffset, int32(textarg)) // emit split check 706 } 707 708 if autoffset != 0 { 709 if autoffset%int32(ctxt.Arch.RegSize) != 0 { 710 ctxt.Diag("unaligned stack size %d", autoffset) 711 } 712 p = obj.Appendp(ctxt, p) 713 p.As = AADJSP 714 p.From.Type = obj.TYPE_CONST 715 p.From.Offset = int64(autoffset) 716 p.Spadj = autoffset 717 } 718 719 deltasp := autoffset 720 721 if bpsize > 0 { 722 // Save caller's BP 723 p = obj.Appendp(ctxt, p) 724 725 p.As = AMOVQ 726 p.From.Type = obj.TYPE_REG 727 p.From.Reg = REG_BP 728 p.To.Type = obj.TYPE_MEM 729 p.To.Reg = REG_SP 730 p.To.Scale = 1 731 p.To.Offset = int64(autoffset) - int64(bpsize) 732 733 // Move current frame to BP 734 p = obj.Appendp(ctxt, p) 735 736 p.As = ALEAQ 737 p.From.Type = obj.TYPE_MEM 738 p.From.Reg = REG_SP 739 p.From.Scale = 1 740 p.From.Offset = int64(autoffset) - int64(bpsize) 741 p.To.Type = obj.TYPE_REG 742 p.To.Reg = REG_BP 743 } 744 745 if cursym.Text.From3Offset()&obj.WRAPPER != 0 { 746 // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame 747 // 748 // MOVQ g_panic(CX), BX 749 // TESTQ BX, BX 750 // JEQ end 751 // LEAQ (autoffset+8)(SP), DI 752 // CMPQ panic_argp(BX), DI 753 // JNE end 754 // MOVQ SP, panic_argp(BX) 755 // end: 756 // NOP 757 // 758 // The NOP is needed to give the jumps somewhere to land. 759 // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes. 760 761 p = obj.Appendp(ctxt, p) 762 763 p.As = AMOVQ 764 p.From.Type = obj.TYPE_MEM 765 p.From.Reg = REG_CX 766 p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic 767 p.To.Type = obj.TYPE_REG 768 p.To.Reg = REG_BX 769 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 770 p.As = AMOVL 771 p.From.Type = obj.TYPE_MEM 772 p.From.Reg = REG_R15 773 p.From.Scale = 1 774 p.From.Index = REG_CX 775 } 776 if p.Mode == 32 { 777 p.As = AMOVL 778 } 779 780 p = obj.Appendp(ctxt, p) 781 p.As = ATESTQ 782 p.From.Type = obj.TYPE_REG 783 p.From.Reg = REG_BX 784 p.To.Type = obj.TYPE_REG 785 p.To.Reg = REG_BX 786 if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { 787 p.As = ATESTL 788 } 789 790 p = obj.Appendp(ctxt, p) 791 p.As = AJEQ 792 p.To.Type = obj.TYPE_BRANCH 793 p1 := p 794 795 p = obj.Appendp(ctxt, p) 796 p.As = ALEAQ 797 p.From.Type = obj.TYPE_MEM 798 p.From.Reg = REG_SP 799 p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize) 800 p.To.Type = obj.TYPE_REG 801 p.To.Reg = REG_DI 802 if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { 803 p.As = ALEAL 804 } 805 806 p = obj.Appendp(ctxt, p) 807 p.As = ACMPQ 808 p.From.Type = obj.TYPE_MEM 809 p.From.Reg = REG_BX 810 p.From.Offset = 0 // Panic.argp 811 p.To.Type = obj.TYPE_REG 812 p.To.Reg = REG_DI 813 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 814 p.As = ACMPL 815 p.From.Type = obj.TYPE_MEM 816 p.From.Reg = REG_R15 817 p.From.Scale = 1 818 p.From.Index = REG_BX 819 } 820 if p.Mode == 32 { 821 p.As = ACMPL 822 } 823 824 p = obj.Appendp(ctxt, p) 825 p.As = AJNE 826 p.To.Type = obj.TYPE_BRANCH 827 p2 := p 828 829 p = obj.Appendp(ctxt, p) 830 p.As = AMOVQ 831 p.From.Type = obj.TYPE_REG 832 p.From.Reg = REG_SP 833 p.To.Type = obj.TYPE_MEM 834 p.To.Reg = REG_BX 835 p.To.Offset = 0 // Panic.argp 836 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 837 p.As = AMOVL 838 p.To.Type = obj.TYPE_MEM 839 p.To.Reg = REG_R15 840 p.To.Scale = 1 841 p.To.Index = REG_BX 842 } 843 if p.Mode == 32 { 844 p.As = AMOVL 845 } 846 847 p = obj.Appendp(ctxt, p) 848 p.As = obj.ANOP 849 p1.Pcond = p 850 p2.Pcond = p 851 } 852 853 for ; p != nil; p = p.Link { 854 pcsize := int(p.Mode) / 8 855 switch p.From.Name { 856 case obj.NAME_AUTO: 857 p.From.Offset += int64(deltasp) - int64(bpsize) 858 case obj.NAME_PARAM: 859 p.From.Offset += int64(deltasp) + int64(pcsize) 860 } 861 if p.From3 != nil { 862 switch p.From3.Name { 863 case obj.NAME_AUTO: 864 p.From3.Offset += int64(deltasp) - int64(bpsize) 865 case obj.NAME_PARAM: 866 p.From3.Offset += int64(deltasp) + int64(pcsize) 867 } 868 } 869 switch p.To.Name { 870 case obj.NAME_AUTO: 871 p.To.Offset += int64(deltasp) - int64(bpsize) 872 case obj.NAME_PARAM: 873 p.To.Offset += int64(deltasp) + int64(pcsize) 874 } 875 876 switch p.As { 877 default: 878 continue 879 880 case APUSHL, APUSHFL: 881 deltasp += 4 882 p.Spadj = 4 883 continue 884 885 case APUSHQ, APUSHFQ: 886 deltasp += 8 887 p.Spadj = 8 888 continue 889 890 case APUSHW, APUSHFW: 891 deltasp += 2 892 p.Spadj = 2 893 continue 894 895 case APOPL, APOPFL: 896 deltasp -= 4 897 p.Spadj = -4 898 continue 899 900 case APOPQ, APOPFQ: 901 deltasp -= 8 902 p.Spadj = -8 903 continue 904 905 case APOPW, APOPFW: 906 deltasp -= 2 907 p.Spadj = -2 908 continue 909 910 case obj.ARET: 911 // do nothing 912 } 913 914 if autoffset != deltasp { 915 ctxt.Diag("unbalanced PUSH/POP") 916 } 917 918 if autoffset != 0 { 919 if bpsize > 0 { 920 // Restore caller's BP 921 p.As = AMOVQ 922 923 p.From.Type = obj.TYPE_MEM 924 p.From.Reg = REG_SP 925 p.From.Scale = 1 926 p.From.Offset = int64(autoffset) - int64(bpsize) 927 p.To.Type = obj.TYPE_REG 928 p.To.Reg = REG_BP 929 p = obj.Appendp(ctxt, p) 930 } 931 932 p.As = AADJSP 933 p.From.Type = obj.TYPE_CONST 934 p.From.Offset = int64(-autoffset) 935 p.Spadj = -autoffset 936 p = obj.Appendp(ctxt, p) 937 p.As = obj.ARET 938 939 // If there are instructions following 940 // this ARET, they come from a branch 941 // with the same stackframe, so undo 942 // the cleanup. 943 p.Spadj = +autoffset 944 } 945 946 if p.To.Sym != nil { // retjmp 947 p.As = obj.AJMP 948 } 949 } 950 } 951 952 func isZeroArgRuntimeCall(s *obj.LSym) bool { 953 if s == nil { 954 return false 955 } 956 switch s.Name { 957 case "runtime.panicindex", "runtime.panicslice", "runtime.panicdivide": 958 return true 959 } 960 return false 961 } 962 963 func indir_cx(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { 964 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 965 a.Type = obj.TYPE_MEM 966 a.Reg = REG_R15 967 a.Index = REG_CX 968 a.Scale = 1 969 return 970 } 971 972 a.Type = obj.TYPE_MEM 973 a.Reg = REG_CX 974 } 975 976 // Append code to p to load g into cx. 977 // Overwrites p with the first instruction (no first appendp). 978 // Overwriting p is unusual but it lets use this in both the 979 // prologue (caller must call appendp first) and in the epilogue. 980 // Returns last new instruction. 981 func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog { 982 p.As = AMOVQ 983 if ctxt.Arch.PtrSize == 4 { 984 p.As = AMOVL 985 } 986 p.From.Type = obj.TYPE_MEM 987 p.From.Reg = REG_TLS 988 p.From.Offset = 0 989 p.To.Type = obj.TYPE_REG 990 p.To.Reg = REG_CX 991 992 next := p.Link 993 progedit(ctxt, p) 994 for p.Link != next { 995 p = p.Link 996 } 997 998 if p.From.Index == REG_TLS { 999 p.From.Scale = 2 1000 } 1001 1002 return p 1003 } 1004 1005 // Append code to p to check for stack split. 1006 // Appends to (does not overwrite) p. 1007 // Assumes g is in CX. 1008 // Returns last new instruction. 1009 func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32) *obj.Prog { 1010 cmp := ACMPQ 1011 lea := ALEAQ 1012 mov := AMOVQ 1013 sub := ASUBQ 1014 1015 if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { 1016 cmp = ACMPL 1017 lea = ALEAL 1018 mov = AMOVL 1019 sub = ASUBL 1020 } 1021 1022 var q1 *obj.Prog 1023 if framesize <= obj.StackSmall { 1024 // small stack: SP <= stackguard 1025 // CMPQ SP, stackguard 1026 p = obj.Appendp(ctxt, p) 1027 1028 p.As = cmp 1029 p.From.Type = obj.TYPE_REG 1030 p.From.Reg = REG_SP 1031 indir_cx(ctxt, p, &p.To) 1032 p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 1033 if ctxt.Cursym.CFunc() { 1034 p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1035 } 1036 } else if framesize <= obj.StackBig { 1037 // large stack: SP-framesize <= stackguard-StackSmall 1038 // LEAQ -xxx(SP), AX 1039 // CMPQ AX, stackguard 1040 p = obj.Appendp(ctxt, p) 1041 1042 p.As = lea 1043 p.From.Type = obj.TYPE_MEM 1044 p.From.Reg = REG_SP 1045 p.From.Offset = -(int64(framesize) - obj.StackSmall) 1046 p.To.Type = obj.TYPE_REG 1047 p.To.Reg = REG_AX 1048 1049 p = obj.Appendp(ctxt, p) 1050 p.As = cmp 1051 p.From.Type = obj.TYPE_REG 1052 p.From.Reg = REG_AX 1053 indir_cx(ctxt, p, &p.To) 1054 p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 1055 if ctxt.Cursym.CFunc() { 1056 p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1057 } 1058 } else { 1059 // Such a large stack we need to protect against wraparound. 1060 // If SP is close to zero: 1061 // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) 1062 // The +StackGuard on both sides is required to keep the left side positive: 1063 // SP is allowed to be slightly below stackguard. See stack.h. 1064 // 1065 // Preemption sets stackguard to StackPreempt, a very large value. 1066 // That breaks the math above, so we have to check for that explicitly. 1067 // MOVQ stackguard, CX 1068 // CMPQ CX, $StackPreempt 1069 // JEQ label-of-call-to-morestack 1070 // LEAQ StackGuard(SP), AX 1071 // SUBQ CX, AX 1072 // CMPQ AX, $(framesize+(StackGuard-StackSmall)) 1073 1074 p = obj.Appendp(ctxt, p) 1075 1076 p.As = mov 1077 indir_cx(ctxt, p, &p.From) 1078 p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 1079 if ctxt.Cursym.CFunc() { 1080 p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1081 } 1082 p.To.Type = obj.TYPE_REG 1083 p.To.Reg = REG_SI 1084 1085 p = obj.Appendp(ctxt, p) 1086 p.As = cmp 1087 p.From.Type = obj.TYPE_REG 1088 p.From.Reg = REG_SI 1089 p.To.Type = obj.TYPE_CONST 1090 p.To.Offset = obj.StackPreempt 1091 if p.Mode == 32 { 1092 p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1))) 1093 } 1094 1095 p = obj.Appendp(ctxt, p) 1096 p.As = AJEQ 1097 p.To.Type = obj.TYPE_BRANCH 1098 q1 = p 1099 1100 p = obj.Appendp(ctxt, p) 1101 p.As = lea 1102 p.From.Type = obj.TYPE_MEM 1103 p.From.Reg = REG_SP 1104 p.From.Offset = obj.StackGuard 1105 p.To.Type = obj.TYPE_REG 1106 p.To.Reg = REG_AX 1107 1108 p = obj.Appendp(ctxt, p) 1109 p.As = sub 1110 p.From.Type = obj.TYPE_REG 1111 p.From.Reg = REG_SI 1112 p.To.Type = obj.TYPE_REG 1113 p.To.Reg = REG_AX 1114 1115 p = obj.Appendp(ctxt, p) 1116 p.As = cmp 1117 p.From.Type = obj.TYPE_REG 1118 p.From.Reg = REG_AX 1119 p.To.Type = obj.TYPE_CONST 1120 p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall) 1121 } 1122 1123 // common 1124 jls := obj.Appendp(ctxt, p) 1125 jls.As = AJLS 1126 jls.To.Type = obj.TYPE_BRANCH 1127 1128 var last *obj.Prog 1129 for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link { 1130 } 1131 1132 // Now we are at the end of the function, but logically 1133 // we are still in function prologue. We need to fix the 1134 // SP data and PCDATA. 1135 spfix := obj.Appendp(ctxt, last) 1136 spfix.As = obj.ANOP 1137 spfix.Spadj = -framesize 1138 1139 pcdata := obj.Appendp(ctxt, spfix) 1140 pcdata.Lineno = ctxt.Cursym.Text.Lineno 1141 pcdata.Mode = ctxt.Cursym.Text.Mode 1142 pcdata.As = obj.APCDATA 1143 pcdata.From.Type = obj.TYPE_CONST 1144 pcdata.From.Offset = obj.PCDATA_StackMapIndex 1145 pcdata.To.Type = obj.TYPE_CONST 1146 pcdata.To.Offset = -1 // pcdata starts at -1 at function entry 1147 1148 call := obj.Appendp(ctxt, pcdata) 1149 call.Lineno = ctxt.Cursym.Text.Lineno 1150 call.Mode = ctxt.Cursym.Text.Mode 1151 call.As = obj.ACALL 1152 call.To.Type = obj.TYPE_BRANCH 1153 call.To.Name = obj.NAME_EXTERN 1154 morestack := "runtime.morestack" 1155 switch { 1156 case ctxt.Cursym.CFunc(): 1157 morestack = "runtime.morestackc" 1158 case ctxt.Cursym.Text.From3Offset()&obj.NEEDCTXT == 0: 1159 morestack = "runtime.morestack_noctxt" 1160 } 1161 call.To.Sym = obj.Linklookup(ctxt, morestack, 0) 1162 // When compiling 386 code for dynamic linking, the call needs to be adjusted 1163 // to follow PIC rules. This in turn can insert more instructions, so we need 1164 // to keep track of the start of the call (where the jump will be to) and the 1165 // end (which following instructions are appended to). 1166 callend := call 1167 progedit(ctxt, callend) 1168 for ; callend.Link != nil; callend = callend.Link { 1169 progedit(ctxt, callend.Link) 1170 } 1171 1172 jmp := obj.Appendp(ctxt, callend) 1173 jmp.As = obj.AJMP 1174 jmp.To.Type = obj.TYPE_BRANCH 1175 jmp.Pcond = ctxt.Cursym.Text.Link 1176 jmp.Spadj = +framesize 1177 1178 jls.Pcond = call 1179 if q1 != nil { 1180 q1.Pcond = call 1181 } 1182 1183 return jls 1184 } 1185 1186 func follow(ctxt *obj.Link, s *obj.LSym) { 1187 ctxt.Cursym = s 1188 1189 firstp := ctxt.NewProg() 1190 lastp := firstp 1191 xfol(ctxt, s.Text, &lastp) 1192 lastp.Link = nil 1193 s.Text = firstp.Link 1194 } 1195 1196 func nofollow(a obj.As) bool { 1197 switch a { 1198 case obj.AJMP, 1199 obj.ARET, 1200 AIRETL, 1201 AIRETQ, 1202 AIRETW, 1203 ARETFL, 1204 ARETFQ, 1205 ARETFW, 1206 obj.AUNDEF: 1207 return true 1208 } 1209 1210 return false 1211 } 1212 1213 func pushpop(a obj.As) bool { 1214 switch a { 1215 case APUSHL, 1216 APUSHFL, 1217 APUSHQ, 1218 APUSHFQ, 1219 APUSHW, 1220 APUSHFW, 1221 APOPL, 1222 APOPFL, 1223 APOPQ, 1224 APOPFQ, 1225 APOPW, 1226 APOPFW: 1227 return true 1228 } 1229 1230 return false 1231 } 1232 1233 func relinv(a obj.As) obj.As { 1234 switch a { 1235 case AJEQ: 1236 return AJNE 1237 case AJNE: 1238 return AJEQ 1239 case AJLE: 1240 return AJGT 1241 case AJLS: 1242 return AJHI 1243 case AJLT: 1244 return AJGE 1245 case AJMI: 1246 return AJPL 1247 case AJGE: 1248 return AJLT 1249 case AJPL: 1250 return AJMI 1251 case AJGT: 1252 return AJLE 1253 case AJHI: 1254 return AJLS 1255 case AJCS: 1256 return AJCC 1257 case AJCC: 1258 return AJCS 1259 case AJPS: 1260 return AJPC 1261 case AJPC: 1262 return AJPS 1263 case AJOS: 1264 return AJOC 1265 case AJOC: 1266 return AJOS 1267 } 1268 1269 log.Fatalf("unknown relation: %s", a) 1270 return 0 1271 } 1272 1273 func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { 1274 var q *obj.Prog 1275 var i int 1276 var a obj.As 1277 1278 loop: 1279 if p == nil { 1280 return 1281 } 1282 if p.As == obj.AJMP { 1283 q = p.Pcond 1284 if q != nil && q.As != obj.ATEXT { 1285 /* mark instruction as done and continue layout at target of jump */ 1286 p.Mark |= DONE 1287 1288 p = q 1289 if p.Mark&DONE == 0 { 1290 goto loop 1291 } 1292 } 1293 } 1294 1295 if p.Mark&DONE != 0 { 1296 /* 1297 * p goes here, but already used it elsewhere. 1298 * copy up to 4 instructions or else branch to other copy. 1299 */ 1300 i = 0 1301 q = p 1302 for ; i < 4; i, q = i+1, q.Link { 1303 if q == nil { 1304 break 1305 } 1306 if q == *last { 1307 break 1308 } 1309 a = q.As 1310 if a == obj.ANOP { 1311 i-- 1312 continue 1313 } 1314 1315 if nofollow(a) || pushpop(a) { 1316 break // NOTE(rsc): arm does goto copy 1317 } 1318 if q.Pcond == nil || q.Pcond.Mark&DONE != 0 { 1319 continue 1320 } 1321 if a == obj.ACALL || a == ALOOP { 1322 continue 1323 } 1324 for { 1325 if p.As == obj.ANOP { 1326 p = p.Link 1327 continue 1328 } 1329 1330 q = obj.Copyp(ctxt, p) 1331 p = p.Link 1332 q.Mark |= DONE 1333 (*last).Link = q 1334 *last = q 1335 if q.As != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 { 1336 continue 1337 } 1338 1339 q.As = relinv(q.As) 1340 p = q.Pcond 1341 q.Pcond = q.Link 1342 q.Link = p 1343 xfol(ctxt, q.Link, last) 1344 p = q.Link 1345 if p.Mark&DONE != 0 { 1346 return 1347 } 1348 goto loop 1349 /* */ 1350 } 1351 } 1352 q = ctxt.NewProg() 1353 q.As = obj.AJMP 1354 q.Lineno = p.Lineno 1355 q.To.Type = obj.TYPE_BRANCH 1356 q.To.Offset = p.Pc 1357 q.Pcond = p 1358 p = q 1359 } 1360 1361 /* emit p */ 1362 p.Mark |= DONE 1363 1364 (*last).Link = p 1365 *last = p 1366 a = p.As 1367 1368 /* continue loop with what comes after p */ 1369 if nofollow(a) { 1370 return 1371 } 1372 if p.Pcond != nil && a != obj.ACALL { 1373 /* 1374 * some kind of conditional branch. 1375 * recurse to follow one path. 1376 * continue loop on the other. 1377 */ 1378 q = obj.Brchain(ctxt, p.Pcond) 1379 if q != nil { 1380 p.Pcond = q 1381 } 1382 q = obj.Brchain(ctxt, p.Link) 1383 if q != nil { 1384 p.Link = q 1385 } 1386 if p.From.Type == obj.TYPE_CONST { 1387 if p.From.Offset == 1 { 1388 /* 1389 * expect conditional jump to be taken. 1390 * rewrite so that's the fall-through case. 1391 */ 1392 p.As = relinv(a) 1393 1394 q = p.Link 1395 p.Link = p.Pcond 1396 p.Pcond = q 1397 } 1398 } else { 1399 q = p.Link 1400 if q.Mark&DONE != 0 { 1401 if a != ALOOP { 1402 p.As = relinv(a) 1403 p.Link = p.Pcond 1404 p.Pcond = q 1405 } 1406 } 1407 } 1408 1409 xfol(ctxt, p.Link, last) 1410 if p.Pcond.Mark&DONE != 0 { 1411 return 1412 } 1413 p = p.Pcond 1414 goto loop 1415 } 1416 1417 p = p.Link 1418 goto loop 1419 } 1420 1421 var unaryDst = map[obj.As]bool{ 1422 ABSWAPL: true, 1423 ABSWAPQ: true, 1424 ACMPXCHG8B: true, 1425 ADECB: true, 1426 ADECL: true, 1427 ADECQ: true, 1428 ADECW: true, 1429 AINCB: true, 1430 AINCL: true, 1431 AINCQ: true, 1432 AINCW: true, 1433 ANEGB: true, 1434 ANEGL: true, 1435 ANEGQ: true, 1436 ANEGW: true, 1437 ANOTB: true, 1438 ANOTL: true, 1439 ANOTQ: true, 1440 ANOTW: true, 1441 APOPL: true, 1442 APOPQ: true, 1443 APOPW: true, 1444 ASETCC: true, 1445 ASETCS: true, 1446 ASETEQ: true, 1447 ASETGE: true, 1448 ASETGT: true, 1449 ASETHI: true, 1450 ASETLE: true, 1451 ASETLS: true, 1452 ASETLT: true, 1453 ASETMI: true, 1454 ASETNE: true, 1455 ASETOC: true, 1456 ASETOS: true, 1457 ASETPC: true, 1458 ASETPL: true, 1459 ASETPS: true, 1460 AFFREE: true, 1461 AFLDENV: true, 1462 AFSAVE: true, 1463 AFSTCW: true, 1464 AFSTENV: true, 1465 AFSTSW: true, 1466 AFXSAVE: true, 1467 AFXSAVE64: true, 1468 ASTMXCSR: true, 1469 } 1470 1471 var Linkamd64 = obj.LinkArch{ 1472 Arch: sys.ArchAMD64, 1473 Preprocess: preprocess, 1474 Assemble: span6, 1475 Follow: follow, 1476 Progedit: progedit, 1477 UnaryDst: unaryDst, 1478 } 1479 1480 var Linkamd64p32 = obj.LinkArch{ 1481 Arch: sys.ArchAMD64P32, 1482 Preprocess: preprocess, 1483 Assemble: span6, 1484 Follow: follow, 1485 Progedit: progedit, 1486 UnaryDst: unaryDst, 1487 } 1488 1489 var Link386 = obj.LinkArch{ 1490 Arch: sys.Arch386, 1491 Preprocess: preprocess, 1492 Assemble: span6, 1493 Follow: follow, 1494 Progedit: progedit, 1495 UnaryDst: unaryDst, 1496 }