github.com/freddyisaac/sicortex-golang@v0.0.0-20231019035217-e03519e66f60/src/cmd/internal/obj/x86/obj6.go (about) 1 // Inferno utils/6l/pass.c 2 // https://bitbucket.org/inferno-os/inferno-os/src/default/utils/6l/pass.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package x86 32 33 import ( 34 "cmd/internal/obj" 35 "cmd/internal/sys" 36 "fmt" 37 "log" 38 "math" 39 "strings" 40 ) 41 42 func CanUse1InsnTLS(ctxt *obj.Link) bool { 43 if isAndroid { 44 // For android, we use a disgusting hack that assumes 45 // the thread-local storage slot for g is allocated 46 // using pthread_key_create with a fixed offset 47 // (see src/runtime/cgo/gcc_android_amd64.c). 48 // This makes access to the TLS storage (for g) doable 49 // with 1 instruction. 50 return true 51 } 52 53 if ctxt.Arch.RegSize == 4 { 54 switch ctxt.Headtype { 55 case obj.Hlinux, 56 obj.Hnacl, 57 obj.Hplan9, 58 obj.Hwindows, 59 obj.Hwindowsgui: 60 return false 61 } 62 63 return true 64 } 65 66 switch ctxt.Headtype { 67 case obj.Hplan9, obj.Hwindows, obj.Hwindowsgui: 68 return false 69 case obj.Hlinux: 70 return !ctxt.Flag_shared 71 } 72 73 return true 74 } 75 76 func progedit(ctxt *obj.Link, p *obj.Prog) { 77 // Maintain information about code generation mode. 78 if ctxt.Mode == 0 { 79 ctxt.Mode = ctxt.Arch.RegSize * 8 80 } 81 p.Mode = int8(ctxt.Mode) 82 83 switch p.As { 84 case AMODE: 85 if p.From.Type == obj.TYPE_CONST || (p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_NONE) { 86 switch int(p.From.Offset) { 87 case 16, 32, 64: 88 ctxt.Mode = int(p.From.Offset) 89 } 90 } 91 obj.Nopout(p) 92 } 93 94 // Thread-local storage references use the TLS pseudo-register. 95 // As a register, TLS refers to the thread-local storage base, and it 96 // can only be loaded into another register: 97 // 98 // MOVQ TLS, AX 99 // 100 // An offset from the thread-local storage base is written off(reg)(TLS*1). 101 // Semantically it is off(reg), but the (TLS*1) annotation marks this as 102 // indexing from the loaded TLS base. This emits a relocation so that 103 // if the linker needs to adjust the offset, it can. For example: 104 // 105 // MOVQ TLS, AX 106 // MOVQ 0(AX)(TLS*1), CX // load g into CX 107 // 108 // On systems that support direct access to the TLS memory, this 109 // pair of instructions can be reduced to a direct TLS memory reference: 110 // 111 // MOVQ 0(TLS), CX // load g into CX 112 // 113 // The 2-instruction and 1-instruction forms correspond to the two code 114 // sequences for loading a TLS variable in the local exec model given in "ELF 115 // Handling For Thread-Local Storage". 116 // 117 // We apply this rewrite on systems that support the 1-instruction form. 118 // The decision is made using only the operating system and the -shared flag, 119 // not the link mode. If some link modes on a particular operating system 120 // require the 2-instruction form, then all builds for that operating system 121 // will use the 2-instruction form, so that the link mode decision can be 122 // delayed to link time. 123 // 124 // In this way, all supported systems use identical instructions to 125 // access TLS, and they are rewritten appropriately first here in 126 // liblink and then finally using relocations in the linker. 127 // 128 // When -shared is passed, we leave the code in the 2-instruction form but 129 // assemble (and relocate) them in different ways to generate the initial 130 // exec code sequence. It's a bit of a fluke that this is possible without 131 // rewriting the instructions more comprehensively, and it only does because 132 // we only support a single TLS variable (g). 133 134 if CanUse1InsnTLS(ctxt) { 135 // Reduce 2-instruction sequence to 1-instruction sequence. 136 // Sequences like 137 // MOVQ TLS, BX 138 // ... off(BX)(TLS*1) ... 139 // become 140 // NOP 141 // ... off(TLS) ... 142 // 143 // TODO(rsc): Remove the Hsolaris special case. It exists only to 144 // guarantee we are producing byte-identical binaries as before this code. 145 // But it should be unnecessary. 146 if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris { 147 obj.Nopout(p) 148 } 149 if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 { 150 p.From.Reg = REG_TLS 151 p.From.Scale = 0 152 p.From.Index = REG_NONE 153 } 154 155 if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { 156 p.To.Reg = REG_TLS 157 p.To.Scale = 0 158 p.To.Index = REG_NONE 159 } 160 } else { 161 // load_g_cx, below, always inserts the 1-instruction sequence. Rewrite it 162 // as the 2-instruction sequence if necessary. 163 // MOVQ 0(TLS), BX 164 // becomes 165 // MOVQ TLS, BX 166 // MOVQ 0(BX)(TLS*1), BX 167 if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 { 168 q := obj.Appendp(ctxt, p) 169 q.As = p.As 170 q.From = p.From 171 q.From.Type = obj.TYPE_MEM 172 q.From.Reg = p.To.Reg 173 q.From.Index = REG_TLS 174 q.From.Scale = 2 // TODO: use 1 175 q.To = p.To 176 p.From.Type = obj.TYPE_REG 177 p.From.Reg = REG_TLS 178 p.From.Index = REG_NONE 179 p.From.Offset = 0 180 } 181 } 182 183 // TODO: Remove. 184 if (ctxt.Headtype == obj.Hwindows || ctxt.Headtype == obj.Hwindowsgui) && p.Mode == 64 || ctxt.Headtype == obj.Hplan9 { 185 if p.From.Scale == 1 && p.From.Index == REG_TLS { 186 p.From.Scale = 2 187 } 188 if p.To.Scale == 1 && p.To.Index == REG_TLS { 189 p.To.Scale = 2 190 } 191 } 192 193 // Rewrite 0 to $0 in 3rd argument to CMPPS etc. 194 // That's what the tables expect. 195 switch p.As { 196 case ACMPPD, ACMPPS, ACMPSD, ACMPSS: 197 if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE && p.To.Reg == REG_NONE && p.To.Index == REG_NONE && p.To.Sym == nil { 198 p.To.Type = obj.TYPE_CONST 199 } 200 } 201 202 // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH. 203 switch p.As { 204 case obj.ACALL, obj.AJMP, obj.ARET: 205 if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil { 206 p.To.Type = obj.TYPE_BRANCH 207 } 208 } 209 210 // Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ. 211 if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Family == sys.AMD64 || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) { 212 switch p.As { 213 case AMOVL: 214 p.As = ALEAL 215 p.From.Type = obj.TYPE_MEM 216 case AMOVQ: 217 p.As = ALEAQ 218 p.From.Type = obj.TYPE_MEM 219 } 220 } 221 222 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 223 if p.From3 != nil { 224 nacladdr(ctxt, p, p.From3) 225 } 226 nacladdr(ctxt, p, &p.From) 227 nacladdr(ctxt, p, &p.To) 228 } 229 230 // Rewrite float constants to values stored in memory. 231 switch p.As { 232 // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx 233 case AMOVSS: 234 if p.From.Type == obj.TYPE_FCONST { 235 // f == 0 can't be used here due to -0, so use Float64bits 236 if f := p.From.Val.(float64); math.Float64bits(f) == 0 { 237 if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { 238 p.As = AXORPS 239 p.From = p.To 240 break 241 } 242 } 243 } 244 fallthrough 245 246 case AFMOVF, 247 AFADDF, 248 AFSUBF, 249 AFSUBRF, 250 AFMULF, 251 AFDIVF, 252 AFDIVRF, 253 AFCOMF, 254 AFCOMFP, 255 AADDSS, 256 ASUBSS, 257 AMULSS, 258 ADIVSS, 259 ACOMISS, 260 AUCOMISS: 261 if p.From.Type == obj.TYPE_FCONST { 262 f32 := float32(p.From.Val.(float64)) 263 i32 := math.Float32bits(f32) 264 literal := fmt.Sprintf("$f32.%08x", i32) 265 s := obj.Linklookup(ctxt, literal, 0) 266 p.From.Type = obj.TYPE_MEM 267 p.From.Name = obj.NAME_EXTERN 268 p.From.Sym = s 269 p.From.Sym.Set(obj.AttrLocal, true) 270 p.From.Offset = 0 271 } 272 273 case AMOVSD: 274 // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx 275 if p.From.Type == obj.TYPE_FCONST { 276 // f == 0 can't be used here due to -0, so use Float64bits 277 if f := p.From.Val.(float64); math.Float64bits(f) == 0 { 278 if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 { 279 p.As = AXORPS 280 p.From = p.To 281 break 282 } 283 } 284 } 285 fallthrough 286 287 case AFMOVD, 288 AFADDD, 289 AFSUBD, 290 AFSUBRD, 291 AFMULD, 292 AFDIVD, 293 AFDIVRD, 294 AFCOMD, 295 AFCOMDP, 296 AADDSD, 297 ASUBSD, 298 AMULSD, 299 ADIVSD, 300 ACOMISD, 301 AUCOMISD: 302 if p.From.Type == obj.TYPE_FCONST { 303 i64 := math.Float64bits(p.From.Val.(float64)) 304 literal := fmt.Sprintf("$f64.%016x", i64) 305 s := obj.Linklookup(ctxt, literal, 0) 306 p.From.Type = obj.TYPE_MEM 307 p.From.Name = obj.NAME_EXTERN 308 p.From.Sym = s 309 p.From.Sym.Set(obj.AttrLocal, true) 310 p.From.Offset = 0 311 } 312 } 313 314 if ctxt.Flag_dynlink { 315 rewriteToUseGot(ctxt, p) 316 } 317 318 if ctxt.Flag_shared && p.Mode == 32 { 319 rewriteToPcrel(ctxt, p) 320 } 321 } 322 323 // Rewrite p, if necessary, to access global data via the global offset table. 324 func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) { 325 var lea, mov obj.As 326 var reg int16 327 if p.Mode == 64 { 328 lea = ALEAQ 329 mov = AMOVQ 330 reg = REG_R15 331 } else { 332 lea = ALEAL 333 mov = AMOVL 334 reg = REG_CX 335 if p.As == ALEAL && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { 336 // Special case: clobber the destination register with 337 // the PC so we don't have to clobber CX. 338 // The SSA backend depends on CX not being clobbered across LEAL. 339 // See cmd/compile/internal/ssa/gen/386.rules (search for Flag_shared). 340 reg = p.To.Reg 341 } 342 } 343 344 if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { 345 // ADUFFxxx $offset 346 // becomes 347 // $MOV runtime.duffxxx@GOT, $reg 348 // $LEA $offset($reg), $reg 349 // CALL $reg 350 // (we use LEAx rather than ADDx because ADDx clobbers 351 // flags and duffzero on 386 does not otherwise do so) 352 var sym *obj.LSym 353 if p.As == obj.ADUFFZERO { 354 sym = obj.Linklookup(ctxt, "runtime.duffzero", 0) 355 } else { 356 sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0) 357 } 358 offset := p.To.Offset 359 p.As = mov 360 p.From.Type = obj.TYPE_MEM 361 p.From.Name = obj.NAME_GOTREF 362 p.From.Sym = sym 363 p.To.Type = obj.TYPE_REG 364 p.To.Reg = reg 365 p.To.Offset = 0 366 p.To.Sym = nil 367 p1 := obj.Appendp(ctxt, p) 368 p1.As = lea 369 p1.From.Type = obj.TYPE_MEM 370 p1.From.Offset = offset 371 p1.From.Reg = reg 372 p1.To.Type = obj.TYPE_REG 373 p1.To.Reg = reg 374 p2 := obj.Appendp(ctxt, p1) 375 p2.As = obj.ACALL 376 p2.To.Type = obj.TYPE_REG 377 p2.To.Reg = reg 378 } 379 380 // We only care about global data: NAME_EXTERN means a global 381 // symbol in the Go sense, and p.Sym.Local is true for a few 382 // internally defined symbols. 383 if p.As == lea && p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 384 // $LEA sym, Rx becomes $MOV $sym, Rx which will be rewritten below 385 p.As = mov 386 p.From.Type = obj.TYPE_ADDR 387 } 388 if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 389 // $MOV $sym, Rx becomes $MOV sym@GOT, Rx 390 // $MOV $sym+<off>, Rx becomes $MOV sym@GOT, Rx; $LEA <off>(Rx), Rx 391 // On 386 only, more complicated things like PUSHL $sym become $MOV sym@GOT, CX; PUSHL CX 392 cmplxdest := false 393 pAs := p.As 394 var dest obj.Addr 395 if p.To.Type != obj.TYPE_REG || pAs != mov { 396 if p.Mode == 64 { 397 ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p) 398 } 399 cmplxdest = true 400 dest = p.To 401 p.As = mov 402 p.To.Type = obj.TYPE_REG 403 p.To.Reg = reg 404 p.To.Sym = nil 405 p.To.Name = obj.NAME_NONE 406 } 407 p.From.Type = obj.TYPE_MEM 408 p.From.Name = obj.NAME_GOTREF 409 q := p 410 if p.From.Offset != 0 { 411 q = obj.Appendp(ctxt, p) 412 q.As = lea 413 q.From.Type = obj.TYPE_MEM 414 q.From.Reg = p.To.Reg 415 q.From.Offset = p.From.Offset 416 q.To = p.To 417 p.From.Offset = 0 418 } 419 if cmplxdest { 420 q = obj.Appendp(ctxt, q) 421 q.As = pAs 422 q.To = dest 423 q.From.Type = obj.TYPE_REG 424 q.From.Reg = reg 425 } 426 } 427 if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN { 428 ctxt.Diag("don't know how to handle %v with -dynlink", p) 429 } 430 var source *obj.Addr 431 // MOVx sym, Ry becomes $MOV sym@GOT, R15; MOVx (R15), Ry 432 // MOVx Ry, sym becomes $MOV sym@GOT, R15; MOVx Ry, (R15) 433 // An addition may be inserted between the two MOVs if there is an offset. 434 if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { 435 if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { 436 ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p) 437 } 438 source = &p.From 439 } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { 440 source = &p.To 441 } else { 442 return 443 } 444 if p.As == obj.ACALL { 445 // When dynlinking on 386, almost any call might end up being a call 446 // to a PLT, so make sure the GOT pointer is loaded into BX. 447 // RegTo2 is set on the replacement call insn to stop it being 448 // processed when it is in turn passed to progedit. 449 if p.Mode == 64 || (p.To.Sym != nil && p.To.Sym.Local()) || p.RegTo2 != 0 { 450 return 451 } 452 p1 := obj.Appendp(ctxt, p) 453 p2 := obj.Appendp(ctxt, p1) 454 455 p1.As = ALEAL 456 p1.From.Type = obj.TYPE_MEM 457 p1.From.Name = obj.NAME_STATIC 458 p1.From.Sym = obj.Linklookup(ctxt, "_GLOBAL_OFFSET_TABLE_", 0) 459 p1.To.Type = obj.TYPE_REG 460 p1.To.Reg = REG_BX 461 462 p2.As = p.As 463 p2.Scond = p.Scond 464 p2.From = p.From 465 p2.From3 = p.From3 466 p2.Reg = p.Reg 467 p2.To = p.To 468 // p.To.Type was set to TYPE_BRANCH above, but that makes checkaddr 469 // in ../pass.go complain, so set it back to TYPE_MEM here, until p2 470 // itself gets passed to progedit. 471 p2.To.Type = obj.TYPE_MEM 472 p2.RegTo2 = 1 473 474 obj.Nopout(p) 475 return 476 477 } 478 if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP { 479 return 480 } 481 if source.Type != obj.TYPE_MEM { 482 ctxt.Diag("don't know how to handle %v with -dynlink", p) 483 } 484 p1 := obj.Appendp(ctxt, p) 485 p2 := obj.Appendp(ctxt, p1) 486 487 p1.As = mov 488 p1.From.Type = obj.TYPE_MEM 489 p1.From.Sym = source.Sym 490 p1.From.Name = obj.NAME_GOTREF 491 p1.To.Type = obj.TYPE_REG 492 p1.To.Reg = reg 493 494 p2.As = p.As 495 p2.From = p.From 496 p2.To = p.To 497 if p.From.Name == obj.NAME_EXTERN { 498 p2.From.Reg = reg 499 p2.From.Name = obj.NAME_NONE 500 p2.From.Sym = nil 501 } else if p.To.Name == obj.NAME_EXTERN { 502 p2.To.Reg = reg 503 p2.To.Name = obj.NAME_NONE 504 p2.To.Sym = nil 505 } else { 506 return 507 } 508 obj.Nopout(p) 509 } 510 511 func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) { 512 // RegTo2 is set on the instructions we insert here so they don't get 513 // processed twice. 514 if p.RegTo2 != 0 { 515 return 516 } 517 if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { 518 return 519 } 520 // Any Prog (aside from the above special cases) with an Addr with Name == 521 // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.XX 522 // inserted before it. 523 isName := func(a *obj.Addr) bool { 524 if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 { 525 return false 526 } 527 if a.Sym.Type == obj.STLSBSS { 528 return false 529 } 530 return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF 531 } 532 533 if isName(&p.From) && p.From.Type == obj.TYPE_ADDR { 534 // Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting 535 // to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX" 536 // respectively. 537 if p.To.Type != obj.TYPE_REG { 538 q := obj.Appendp(ctxt, p) 539 q.As = p.As 540 q.From.Type = obj.TYPE_REG 541 q.From.Reg = REG_CX 542 q.To = p.To 543 p.As = AMOVL 544 p.To.Type = obj.TYPE_REG 545 p.To.Reg = REG_CX 546 p.To.Sym = nil 547 p.To.Name = obj.NAME_NONE 548 } 549 } 550 551 if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) { 552 return 553 } 554 var dst int16 = REG_CX 555 if (p.As == ALEAL || p.As == AMOVL) && p.To.Reg != p.From.Reg && p.To.Reg != p.From.Index { 556 dst = p.To.Reg 557 // Why? See the comment near the top of rewriteToUseGot above. 558 // AMOVLs might be introduced by the GOT rewrites. 559 } 560 q := obj.Appendp(ctxt, p) 561 q.RegTo2 = 1 562 r := obj.Appendp(ctxt, q) 563 r.RegTo2 = 1 564 q.As = obj.ACALL 565 q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk."+strings.ToLower(Rconv(int(dst))), 0) 566 q.To.Type = obj.TYPE_MEM 567 q.To.Name = obj.NAME_EXTERN 568 q.To.Sym.Set(obj.AttrLocal, true) 569 r.As = p.As 570 r.Scond = p.Scond 571 r.From = p.From 572 r.From3 = p.From3 573 r.Reg = p.Reg 574 r.To = p.To 575 if isName(&p.From) { 576 r.From.Reg = dst 577 } 578 if isName(&p.To) { 579 r.To.Reg = dst 580 } 581 if p.From3 != nil && isName(p.From3) { 582 r.From3.Reg = dst 583 } 584 obj.Nopout(p) 585 } 586 587 func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { 588 if p.As == ALEAL || p.As == ALEAQ { 589 return 590 } 591 592 if a.Reg == REG_BP { 593 ctxt.Diag("invalid address: %v", p) 594 return 595 } 596 597 if a.Reg == REG_TLS { 598 a.Reg = REG_BP 599 } 600 if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE { 601 switch a.Reg { 602 // all ok 603 case REG_BP, REG_SP, REG_R15: 604 break 605 606 default: 607 if a.Index != REG_NONE { 608 ctxt.Diag("invalid address %v", p) 609 } 610 a.Index = a.Reg 611 if a.Index != REG_NONE { 612 a.Scale = 1 613 } 614 a.Reg = REG_R15 615 } 616 } 617 } 618 619 func preprocess(ctxt *obj.Link, cursym *obj.LSym) { 620 if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil { 621 ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0) 622 } 623 624 ctxt.Cursym = cursym 625 626 if cursym.Text == nil || cursym.Text.Link == nil { 627 return 628 } 629 630 p := cursym.Text 631 autoffset := int32(p.To.Offset) 632 if autoffset < 0 { 633 autoffset = 0 634 } 635 636 hasCall := false 637 for q := p; q != nil; q = q.Link { 638 if q.As == obj.ACALL || q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO { 639 hasCall = true 640 break 641 } 642 } 643 644 var bpsize int 645 if p.Mode == 64 && ctxt.Framepointer_enabled && 646 p.From3.Offset&obj.NOFRAME == 0 && // (1) below 647 !(autoffset == 0 && p.From3.Offset&obj.NOSPLIT != 0) && // (2) below 648 !(autoffset == 0 && !hasCall) { // (3) below 649 // Make room to save a base pointer. 650 // There are 2 cases we must avoid: 651 // 1) If noframe is set (which we do for functions which tail call). 652 // 2) Scary runtime internals which would be all messed up by frame pointers. 653 // We detect these using a heuristic: frameless nosplit functions. 654 // TODO: Maybe someday we label them all with NOFRAME and get rid of this heuristic. 655 // For performance, we also want to avoid: 656 // 3) Frameless leaf functions 657 bpsize = ctxt.Arch.PtrSize 658 autoffset += int32(bpsize) 659 p.To.Offset += int64(bpsize) 660 } else { 661 bpsize = 0 662 } 663 664 textarg := int64(p.To.Val.(int32)) 665 cursym.Args = int32(textarg) 666 cursym.Locals = int32(p.To.Offset) 667 668 // TODO(rsc): Remove. 669 if p.Mode == 32 && cursym.Locals < 0 { 670 cursym.Locals = 0 671 } 672 673 // TODO(rsc): Remove 'p.Mode == 64 &&'. 674 if p.Mode == 64 && autoffset < obj.StackSmall && p.From3Offset()&obj.NOSPLIT == 0 { 675 leaf := true 676 LeafSearch: 677 for q := p; q != nil; q = q.Link { 678 switch q.As { 679 case obj.ACALL: 680 // Treat common runtime calls that take no arguments 681 // the same as duffcopy and duffzero. 682 if !isZeroArgRuntimeCall(q.To.Sym) { 683 leaf = false 684 break LeafSearch 685 } 686 fallthrough 687 case obj.ADUFFCOPY, obj.ADUFFZERO: 688 if autoffset >= obj.StackSmall-8 { 689 leaf = false 690 break LeafSearch 691 } 692 } 693 } 694 695 if leaf { 696 p.From3.Offset |= obj.NOSPLIT 697 } 698 } 699 700 if p.From3Offset()&obj.NOSPLIT == 0 || p.From3Offset()&obj.WRAPPER != 0 { 701 p = obj.Appendp(ctxt, p) 702 p = load_g_cx(ctxt, p) // load g into CX 703 } 704 705 if cursym.Text.From3Offset()&obj.NOSPLIT == 0 { 706 p = stacksplit(ctxt, p, autoffset, int32(textarg)) // emit split check 707 } 708 709 if autoffset != 0 { 710 if autoffset%int32(ctxt.Arch.RegSize) != 0 { 711 ctxt.Diag("unaligned stack size %d", autoffset) 712 } 713 p = obj.Appendp(ctxt, p) 714 p.As = AADJSP 715 p.From.Type = obj.TYPE_CONST 716 p.From.Offset = int64(autoffset) 717 p.Spadj = autoffset 718 } 719 720 deltasp := autoffset 721 722 if bpsize > 0 { 723 // Save caller's BP 724 p = obj.Appendp(ctxt, p) 725 726 p.As = AMOVQ 727 p.From.Type = obj.TYPE_REG 728 p.From.Reg = REG_BP 729 p.To.Type = obj.TYPE_MEM 730 p.To.Reg = REG_SP 731 p.To.Scale = 1 732 p.To.Offset = int64(autoffset) - int64(bpsize) 733 734 // Move current frame to BP 735 p = obj.Appendp(ctxt, p) 736 737 p.As = ALEAQ 738 p.From.Type = obj.TYPE_MEM 739 p.From.Reg = REG_SP 740 p.From.Scale = 1 741 p.From.Offset = int64(autoffset) - int64(bpsize) 742 p.To.Type = obj.TYPE_REG 743 p.To.Reg = REG_BP 744 } 745 746 if cursym.Text.From3Offset()&obj.WRAPPER != 0 { 747 // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame 748 // 749 // MOVQ g_panic(CX), BX 750 // TESTQ BX, BX 751 // JEQ end 752 // LEAQ (autoffset+8)(SP), DI 753 // CMPQ panic_argp(BX), DI 754 // JNE end 755 // MOVQ SP, panic_argp(BX) 756 // end: 757 // NOP 758 // 759 // The NOP is needed to give the jumps somewhere to land. 760 // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes. 761 762 p = obj.Appendp(ctxt, p) 763 764 p.As = AMOVQ 765 p.From.Type = obj.TYPE_MEM 766 p.From.Reg = REG_CX 767 p.From.Offset = 4 * int64(ctxt.Arch.PtrSize) // G.panic 768 p.To.Type = obj.TYPE_REG 769 p.To.Reg = REG_BX 770 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 771 p.As = AMOVL 772 p.From.Type = obj.TYPE_MEM 773 p.From.Reg = REG_R15 774 p.From.Scale = 1 775 p.From.Index = REG_CX 776 } 777 if p.Mode == 32 { 778 p.As = AMOVL 779 } 780 781 p = obj.Appendp(ctxt, p) 782 p.As = ATESTQ 783 p.From.Type = obj.TYPE_REG 784 p.From.Reg = REG_BX 785 p.To.Type = obj.TYPE_REG 786 p.To.Reg = REG_BX 787 if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { 788 p.As = ATESTL 789 } 790 791 p = obj.Appendp(ctxt, p) 792 p.As = AJEQ 793 p.To.Type = obj.TYPE_BRANCH 794 p1 := p 795 796 p = obj.Appendp(ctxt, p) 797 p.As = ALEAQ 798 p.From.Type = obj.TYPE_MEM 799 p.From.Reg = REG_SP 800 p.From.Offset = int64(autoffset) + int64(ctxt.Arch.RegSize) 801 p.To.Type = obj.TYPE_REG 802 p.To.Reg = REG_DI 803 if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { 804 p.As = ALEAL 805 } 806 807 p = obj.Appendp(ctxt, p) 808 p.As = ACMPQ 809 p.From.Type = obj.TYPE_MEM 810 p.From.Reg = REG_BX 811 p.From.Offset = 0 // Panic.argp 812 p.To.Type = obj.TYPE_REG 813 p.To.Reg = REG_DI 814 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 815 p.As = ACMPL 816 p.From.Type = obj.TYPE_MEM 817 p.From.Reg = REG_R15 818 p.From.Scale = 1 819 p.From.Index = REG_BX 820 } 821 if p.Mode == 32 { 822 p.As = ACMPL 823 } 824 825 p = obj.Appendp(ctxt, p) 826 p.As = AJNE 827 p.To.Type = obj.TYPE_BRANCH 828 p2 := p 829 830 p = obj.Appendp(ctxt, p) 831 p.As = AMOVQ 832 p.From.Type = obj.TYPE_REG 833 p.From.Reg = REG_SP 834 p.To.Type = obj.TYPE_MEM 835 p.To.Reg = REG_BX 836 p.To.Offset = 0 // Panic.argp 837 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 838 p.As = AMOVL 839 p.To.Type = obj.TYPE_MEM 840 p.To.Reg = REG_R15 841 p.To.Scale = 1 842 p.To.Index = REG_BX 843 } 844 if p.Mode == 32 { 845 p.As = AMOVL 846 } 847 848 p = obj.Appendp(ctxt, p) 849 p.As = obj.ANOP 850 p1.Pcond = p 851 p2.Pcond = p 852 } 853 854 for ; p != nil; p = p.Link { 855 pcsize := int(p.Mode) / 8 856 switch p.From.Name { 857 case obj.NAME_AUTO: 858 p.From.Offset += int64(deltasp) - int64(bpsize) 859 case obj.NAME_PARAM: 860 p.From.Offset += int64(deltasp) + int64(pcsize) 861 } 862 if p.From3 != nil { 863 switch p.From3.Name { 864 case obj.NAME_AUTO: 865 p.From3.Offset += int64(deltasp) - int64(bpsize) 866 case obj.NAME_PARAM: 867 p.From3.Offset += int64(deltasp) + int64(pcsize) 868 } 869 } 870 switch p.To.Name { 871 case obj.NAME_AUTO: 872 p.To.Offset += int64(deltasp) - int64(bpsize) 873 case obj.NAME_PARAM: 874 p.To.Offset += int64(deltasp) + int64(pcsize) 875 } 876 877 switch p.As { 878 default: 879 continue 880 881 case APUSHL, APUSHFL: 882 deltasp += 4 883 p.Spadj = 4 884 continue 885 886 case APUSHQ, APUSHFQ: 887 deltasp += 8 888 p.Spadj = 8 889 continue 890 891 case APUSHW, APUSHFW: 892 deltasp += 2 893 p.Spadj = 2 894 continue 895 896 case APOPL, APOPFL: 897 deltasp -= 4 898 p.Spadj = -4 899 continue 900 901 case APOPQ, APOPFQ: 902 deltasp -= 8 903 p.Spadj = -8 904 continue 905 906 case APOPW, APOPFW: 907 deltasp -= 2 908 p.Spadj = -2 909 continue 910 911 case obj.ARET: 912 // do nothing 913 } 914 915 if autoffset != deltasp { 916 ctxt.Diag("unbalanced PUSH/POP") 917 } 918 919 if autoffset != 0 { 920 if bpsize > 0 { 921 // Restore caller's BP 922 p.As = AMOVQ 923 924 p.From.Type = obj.TYPE_MEM 925 p.From.Reg = REG_SP 926 p.From.Scale = 1 927 p.From.Offset = int64(autoffset) - int64(bpsize) 928 p.To.Type = obj.TYPE_REG 929 p.To.Reg = REG_BP 930 p = obj.Appendp(ctxt, p) 931 } 932 933 p.As = AADJSP 934 p.From.Type = obj.TYPE_CONST 935 p.From.Offset = int64(-autoffset) 936 p.Spadj = -autoffset 937 p = obj.Appendp(ctxt, p) 938 p.As = obj.ARET 939 940 // If there are instructions following 941 // this ARET, they come from a branch 942 // with the same stackframe, so undo 943 // the cleanup. 944 p.Spadj = +autoffset 945 } 946 947 if p.To.Sym != nil { // retjmp 948 p.As = obj.AJMP 949 } 950 } 951 } 952 953 func isZeroArgRuntimeCall(s *obj.LSym) bool { 954 if s == nil { 955 return false 956 } 957 switch s.Name { 958 case "runtime.panicindex", "runtime.panicslice", "runtime.panicdivide": 959 return true 960 } 961 return false 962 } 963 964 func indir_cx(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) { 965 if ctxt.Headtype == obj.Hnacl && p.Mode == 64 { 966 a.Type = obj.TYPE_MEM 967 a.Reg = REG_R15 968 a.Index = REG_CX 969 a.Scale = 1 970 return 971 } 972 973 a.Type = obj.TYPE_MEM 974 a.Reg = REG_CX 975 } 976 977 // Append code to p to load g into cx. 978 // Overwrites p with the first instruction (no first appendp). 979 // Overwriting p is unusual but it lets use this in both the 980 // prologue (caller must call appendp first) and in the epilogue. 981 // Returns last new instruction. 982 func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog { 983 p.As = AMOVQ 984 if ctxt.Arch.PtrSize == 4 { 985 p.As = AMOVL 986 } 987 p.From.Type = obj.TYPE_MEM 988 p.From.Reg = REG_TLS 989 p.From.Offset = 0 990 p.To.Type = obj.TYPE_REG 991 p.To.Reg = REG_CX 992 993 next := p.Link 994 progedit(ctxt, p) 995 for p.Link != next { 996 p = p.Link 997 } 998 999 if p.From.Index == REG_TLS { 1000 p.From.Scale = 2 1001 } 1002 1003 return p 1004 } 1005 1006 // Append code to p to check for stack split. 1007 // Appends to (does not overwrite) p. 1008 // Assumes g is in CX. 1009 // Returns last new instruction. 1010 func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32) *obj.Prog { 1011 cmp := ACMPQ 1012 lea := ALEAQ 1013 mov := AMOVQ 1014 sub := ASUBQ 1015 1016 if ctxt.Headtype == obj.Hnacl || p.Mode == 32 { 1017 cmp = ACMPL 1018 lea = ALEAL 1019 mov = AMOVL 1020 sub = ASUBL 1021 } 1022 1023 var q1 *obj.Prog 1024 if framesize <= obj.StackSmall { 1025 // small stack: SP <= stackguard 1026 // CMPQ SP, stackguard 1027 p = obj.Appendp(ctxt, p) 1028 1029 p.As = cmp 1030 p.From.Type = obj.TYPE_REG 1031 p.From.Reg = REG_SP 1032 indir_cx(ctxt, p, &p.To) 1033 p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 1034 if ctxt.Cursym.CFunc() { 1035 p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1036 } 1037 } else if framesize <= obj.StackBig { 1038 // large stack: SP-framesize <= stackguard-StackSmall 1039 // LEAQ -xxx(SP), AX 1040 // CMPQ AX, stackguard 1041 p = obj.Appendp(ctxt, p) 1042 1043 p.As = lea 1044 p.From.Type = obj.TYPE_MEM 1045 p.From.Reg = REG_SP 1046 p.From.Offset = -(int64(framesize) - obj.StackSmall) 1047 p.To.Type = obj.TYPE_REG 1048 p.To.Reg = REG_AX 1049 1050 p = obj.Appendp(ctxt, p) 1051 p.As = cmp 1052 p.From.Type = obj.TYPE_REG 1053 p.From.Reg = REG_AX 1054 indir_cx(ctxt, p, &p.To) 1055 p.To.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 1056 if ctxt.Cursym.CFunc() { 1057 p.To.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1058 } 1059 } else { 1060 // Such a large stack we need to protect against wraparound. 1061 // If SP is close to zero: 1062 // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall) 1063 // The +StackGuard on both sides is required to keep the left side positive: 1064 // SP is allowed to be slightly below stackguard. See stack.h. 1065 // 1066 // Preemption sets stackguard to StackPreempt, a very large value. 1067 // That breaks the math above, so we have to check for that explicitly. 1068 // MOVQ stackguard, CX 1069 // CMPQ CX, $StackPreempt 1070 // JEQ label-of-call-to-morestack 1071 // LEAQ StackGuard(SP), AX 1072 // SUBQ CX, AX 1073 // CMPQ AX, $(framesize+(StackGuard-StackSmall)) 1074 1075 p = obj.Appendp(ctxt, p) 1076 1077 p.As = mov 1078 indir_cx(ctxt, p, &p.From) 1079 p.From.Offset = 2 * int64(ctxt.Arch.PtrSize) // G.stackguard0 1080 if ctxt.Cursym.CFunc() { 1081 p.From.Offset = 3 * int64(ctxt.Arch.PtrSize) // G.stackguard1 1082 } 1083 p.To.Type = obj.TYPE_REG 1084 p.To.Reg = REG_SI 1085 1086 p = obj.Appendp(ctxt, p) 1087 p.As = cmp 1088 p.From.Type = obj.TYPE_REG 1089 p.From.Reg = REG_SI 1090 p.To.Type = obj.TYPE_CONST 1091 p.To.Offset = obj.StackPreempt 1092 if p.Mode == 32 { 1093 p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1))) 1094 } 1095 1096 p = obj.Appendp(ctxt, p) 1097 p.As = AJEQ 1098 p.To.Type = obj.TYPE_BRANCH 1099 q1 = p 1100 1101 p = obj.Appendp(ctxt, p) 1102 p.As = lea 1103 p.From.Type = obj.TYPE_MEM 1104 p.From.Reg = REG_SP 1105 p.From.Offset = obj.StackGuard 1106 p.To.Type = obj.TYPE_REG 1107 p.To.Reg = REG_AX 1108 1109 p = obj.Appendp(ctxt, p) 1110 p.As = sub 1111 p.From.Type = obj.TYPE_REG 1112 p.From.Reg = REG_SI 1113 p.To.Type = obj.TYPE_REG 1114 p.To.Reg = REG_AX 1115 1116 p = obj.Appendp(ctxt, p) 1117 p.As = cmp 1118 p.From.Type = obj.TYPE_REG 1119 p.From.Reg = REG_AX 1120 p.To.Type = obj.TYPE_CONST 1121 p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall) 1122 } 1123 1124 // common 1125 jls := obj.Appendp(ctxt, p) 1126 jls.As = AJLS 1127 jls.To.Type = obj.TYPE_BRANCH 1128 1129 var last *obj.Prog 1130 for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link { 1131 } 1132 1133 // Now we are at the end of the function, but logically 1134 // we are still in function prologue. We need to fix the 1135 // SP data and PCDATA. 1136 spfix := obj.Appendp(ctxt, last) 1137 spfix.As = obj.ANOP 1138 spfix.Spadj = -framesize 1139 1140 pcdata := obj.Appendp(ctxt, spfix) 1141 pcdata.Lineno = ctxt.Cursym.Text.Lineno 1142 pcdata.Mode = ctxt.Cursym.Text.Mode 1143 pcdata.As = obj.APCDATA 1144 pcdata.From.Type = obj.TYPE_CONST 1145 pcdata.From.Offset = obj.PCDATA_StackMapIndex 1146 pcdata.To.Type = obj.TYPE_CONST 1147 pcdata.To.Offset = -1 // pcdata starts at -1 at function entry 1148 1149 call := obj.Appendp(ctxt, pcdata) 1150 call.Lineno = ctxt.Cursym.Text.Lineno 1151 call.Mode = ctxt.Cursym.Text.Mode 1152 call.As = obj.ACALL 1153 call.To.Type = obj.TYPE_BRANCH 1154 call.To.Name = obj.NAME_EXTERN 1155 morestack := "runtime.morestack" 1156 switch { 1157 case ctxt.Cursym.CFunc(): 1158 morestack = "runtime.morestackc" 1159 case ctxt.Cursym.Text.From3Offset()&obj.NEEDCTXT == 0: 1160 morestack = "runtime.morestack_noctxt" 1161 } 1162 call.To.Sym = obj.Linklookup(ctxt, morestack, 0) 1163 // When compiling 386 code for dynamic linking, the call needs to be adjusted 1164 // to follow PIC rules. This in turn can insert more instructions, so we need 1165 // to keep track of the start of the call (where the jump will be to) and the 1166 // end (which following instructions are appended to). 1167 callend := call 1168 progedit(ctxt, callend) 1169 for ; callend.Link != nil; callend = callend.Link { 1170 progedit(ctxt, callend.Link) 1171 } 1172 1173 jmp := obj.Appendp(ctxt, callend) 1174 jmp.As = obj.AJMP 1175 jmp.To.Type = obj.TYPE_BRANCH 1176 jmp.Pcond = ctxt.Cursym.Text.Link 1177 jmp.Spadj = +framesize 1178 1179 jls.Pcond = call 1180 if q1 != nil { 1181 q1.Pcond = call 1182 } 1183 1184 return jls 1185 } 1186 1187 func follow(ctxt *obj.Link, s *obj.LSym) { 1188 ctxt.Cursym = s 1189 1190 firstp := ctxt.NewProg() 1191 lastp := firstp 1192 xfol(ctxt, s.Text, &lastp) 1193 lastp.Link = nil 1194 s.Text = firstp.Link 1195 } 1196 1197 func nofollow(a obj.As) bool { 1198 switch a { 1199 case obj.AJMP, 1200 obj.ARET, 1201 AIRETL, 1202 AIRETQ, 1203 AIRETW, 1204 ARETFL, 1205 ARETFQ, 1206 ARETFW, 1207 obj.AUNDEF: 1208 return true 1209 } 1210 1211 return false 1212 } 1213 1214 func pushpop(a obj.As) bool { 1215 switch a { 1216 case APUSHL, 1217 APUSHFL, 1218 APUSHQ, 1219 APUSHFQ, 1220 APUSHW, 1221 APUSHFW, 1222 APOPL, 1223 APOPFL, 1224 APOPQ, 1225 APOPFQ, 1226 APOPW, 1227 APOPFW: 1228 return true 1229 } 1230 1231 return false 1232 } 1233 1234 func relinv(a obj.As) obj.As { 1235 switch a { 1236 case AJEQ: 1237 return AJNE 1238 case AJNE: 1239 return AJEQ 1240 case AJLE: 1241 return AJGT 1242 case AJLS: 1243 return AJHI 1244 case AJLT: 1245 return AJGE 1246 case AJMI: 1247 return AJPL 1248 case AJGE: 1249 return AJLT 1250 case AJPL: 1251 return AJMI 1252 case AJGT: 1253 return AJLE 1254 case AJHI: 1255 return AJLS 1256 case AJCS: 1257 return AJCC 1258 case AJCC: 1259 return AJCS 1260 case AJPS: 1261 return AJPC 1262 case AJPC: 1263 return AJPS 1264 case AJOS: 1265 return AJOC 1266 case AJOC: 1267 return AJOS 1268 } 1269 1270 log.Fatalf("unknown relation: %s", a) 1271 return 0 1272 } 1273 1274 func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) { 1275 var q *obj.Prog 1276 var i int 1277 var a obj.As 1278 1279 loop: 1280 if p == nil { 1281 return 1282 } 1283 if p.As == obj.AJMP { 1284 q = p.Pcond 1285 if q != nil && q.As != obj.ATEXT { 1286 /* mark instruction as done and continue layout at target of jump */ 1287 p.Mark |= DONE 1288 1289 p = q 1290 if p.Mark&DONE == 0 { 1291 goto loop 1292 } 1293 } 1294 } 1295 1296 if p.Mark&DONE != 0 { 1297 /* 1298 * p goes here, but already used it elsewhere. 1299 * copy up to 4 instructions or else branch to other copy. 1300 */ 1301 i = 0 1302 q = p 1303 for ; i < 4; i, q = i+1, q.Link { 1304 if q == nil { 1305 break 1306 } 1307 if q == *last { 1308 break 1309 } 1310 a = q.As 1311 if a == obj.ANOP { 1312 i-- 1313 continue 1314 } 1315 1316 if nofollow(a) || pushpop(a) { 1317 break // NOTE(rsc): arm does goto copy 1318 } 1319 if q.Pcond == nil || q.Pcond.Mark&DONE != 0 { 1320 continue 1321 } 1322 if a == obj.ACALL || a == ALOOP { 1323 continue 1324 } 1325 for { 1326 if p.As == obj.ANOP { 1327 p = p.Link 1328 continue 1329 } 1330 1331 q = obj.Copyp(ctxt, p) 1332 p = p.Link 1333 q.Mark |= DONE 1334 (*last).Link = q 1335 *last = q 1336 if q.As != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 { 1337 continue 1338 } 1339 1340 q.As = relinv(q.As) 1341 p = q.Pcond 1342 q.Pcond = q.Link 1343 q.Link = p 1344 xfol(ctxt, q.Link, last) 1345 p = q.Link 1346 if p.Mark&DONE != 0 { 1347 return 1348 } 1349 goto loop 1350 /* */ 1351 } 1352 } 1353 q = ctxt.NewProg() 1354 q.As = obj.AJMP 1355 q.Lineno = p.Lineno 1356 q.To.Type = obj.TYPE_BRANCH 1357 q.To.Offset = p.Pc 1358 q.Pcond = p 1359 p = q 1360 } 1361 1362 /* emit p */ 1363 p.Mark |= DONE 1364 1365 (*last).Link = p 1366 *last = p 1367 a = p.As 1368 1369 /* continue loop with what comes after p */ 1370 if nofollow(a) { 1371 return 1372 } 1373 if p.Pcond != nil && a != obj.ACALL { 1374 /* 1375 * some kind of conditional branch. 1376 * recurse to follow one path. 1377 * continue loop on the other. 1378 */ 1379 q = obj.Brchain(ctxt, p.Pcond) 1380 if q != nil { 1381 p.Pcond = q 1382 } 1383 q = obj.Brchain(ctxt, p.Link) 1384 if q != nil { 1385 p.Link = q 1386 } 1387 if p.From.Type == obj.TYPE_CONST { 1388 if p.From.Offset == 1 { 1389 /* 1390 * expect conditional jump to be taken. 1391 * rewrite so that's the fall-through case. 1392 */ 1393 p.As = relinv(a) 1394 1395 q = p.Link 1396 p.Link = p.Pcond 1397 p.Pcond = q 1398 } 1399 } else { 1400 q = p.Link 1401 if q.Mark&DONE != 0 { 1402 if a != ALOOP { 1403 p.As = relinv(a) 1404 p.Link = p.Pcond 1405 p.Pcond = q 1406 } 1407 } 1408 } 1409 1410 xfol(ctxt, p.Link, last) 1411 if p.Pcond.Mark&DONE != 0 { 1412 return 1413 } 1414 p = p.Pcond 1415 goto loop 1416 } 1417 1418 p = p.Link 1419 goto loop 1420 } 1421 1422 var unaryDst = map[obj.As]bool{ 1423 ABSWAPL: true, 1424 ABSWAPQ: true, 1425 ACMPXCHG8B: true, 1426 ADECB: true, 1427 ADECL: true, 1428 ADECQ: true, 1429 ADECW: true, 1430 AINCB: true, 1431 AINCL: true, 1432 AINCQ: true, 1433 AINCW: true, 1434 ANEGB: true, 1435 ANEGL: true, 1436 ANEGQ: true, 1437 ANEGW: true, 1438 ANOTB: true, 1439 ANOTL: true, 1440 ANOTQ: true, 1441 ANOTW: true, 1442 APOPL: true, 1443 APOPQ: true, 1444 APOPW: true, 1445 ASETCC: true, 1446 ASETCS: true, 1447 ASETEQ: true, 1448 ASETGE: true, 1449 ASETGT: true, 1450 ASETHI: true, 1451 ASETLE: true, 1452 ASETLS: true, 1453 ASETLT: true, 1454 ASETMI: true, 1455 ASETNE: true, 1456 ASETOC: true, 1457 ASETOS: true, 1458 ASETPC: true, 1459 ASETPL: true, 1460 ASETPS: true, 1461 AFFREE: true, 1462 AFLDENV: true, 1463 AFSAVE: true, 1464 AFSTCW: true, 1465 AFSTENV: true, 1466 AFSTSW: true, 1467 AFXSAVE: true, 1468 AFXSAVE64: true, 1469 ASTMXCSR: true, 1470 } 1471 1472 var Linkamd64 = obj.LinkArch{ 1473 Arch: sys.ArchAMD64, 1474 Preprocess: preprocess, 1475 Assemble: span6, 1476 Follow: follow, 1477 Progedit: progedit, 1478 UnaryDst: unaryDst, 1479 } 1480 1481 var Linkamd64p32 = obj.LinkArch{ 1482 Arch: sys.ArchAMD64P32, 1483 Preprocess: preprocess, 1484 Assemble: span6, 1485 Follow: follow, 1486 Progedit: progedit, 1487 UnaryDst: unaryDst, 1488 } 1489 1490 var Link386 = obj.LinkArch{ 1491 Arch: sys.Arch386, 1492 Preprocess: preprocess, 1493 Assemble: span6, 1494 Follow: follow, 1495 Progedit: progedit, 1496 UnaryDst: unaryDst, 1497 }