github.com/mh-cbon/go@v0.0.0-20160603070303-9e112a3fe4c0/src/cmd/compile/internal/amd64/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package amd64 32 33 import ( 34 "cmd/compile/internal/big" 35 "cmd/compile/internal/gc" 36 "cmd/internal/obj" 37 "cmd/internal/obj/x86" 38 "fmt" 39 ) 40 41 var resvd = []int{ 42 x86.REG_DI, // for movstring 43 x86.REG_SI, // for movstring 44 45 x86.REG_AX, // for divide 46 x86.REG_CX, // for shift 47 x86.REG_DX, // for divide 48 x86.REG_SP, // for stack 49 } 50 51 /* 52 * generate 53 * as $c, reg 54 */ 55 func gconreg(as obj.As, c int64, reg int) { 56 var nr gc.Node 57 58 switch as { 59 case x86.AADDL, 60 x86.AMOVL, 61 x86.ALEAL: 62 gc.Nodreg(&nr, gc.Types[gc.TINT32], reg) 63 64 default: 65 gc.Nodreg(&nr, gc.Types[gc.TINT64], reg) 66 } 67 68 ginscon(as, c, &nr) 69 } 70 71 /* 72 * generate 73 * as $c, n 74 */ 75 func ginscon(as obj.As, c int64, n2 *gc.Node) { 76 var n1 gc.Node 77 78 switch as { 79 case x86.AADDL, 80 x86.AMOVL, 81 x86.ALEAL: 82 gc.Nodconst(&n1, gc.Types[gc.TINT32], c) 83 84 default: 85 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 86 } 87 88 if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) { 89 // cannot have 64-bit immediate in ADD, etc. 90 // instead, MOV into register first. 91 var ntmp gc.Node 92 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 93 94 gins(x86.AMOVQ, &n1, &ntmp) 95 gins(as, &ntmp, n2) 96 gc.Regfree(&ntmp) 97 return 98 } 99 100 gins(as, &n1, n2) 101 } 102 103 func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 104 if t.IsInteger() && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL { 105 // Reverse comparison to place constant last. 106 op = gc.Brrev(op) 107 n1, n2 = n2, n1 108 } 109 // General case. 110 var r1, r2, g1, g2 gc.Node 111 112 // A special case to make write barriers more efficient. 113 // Comparing the first field of a named struct can be done directly. 114 base := n1 115 if n1.Op == gc.ODOT && n1.Left.Type.IsStruct() && n1.Left.Type.Field(0).Sym == n1.Sym { 116 base = n1.Left 117 } 118 119 if base.Op == gc.ONAME && base.Class != gc.PAUTOHEAP || n1.Op == gc.OINDREG { 120 r1 = *n1 121 } else { 122 gc.Regalloc(&r1, t, n1) 123 gc.Regalloc(&g1, n1.Type, &r1) 124 gc.Cgen(n1, &g1) 125 gmove(&g1, &r1) 126 } 127 if n2.Op == gc.OLITERAL && t.IsInteger() && gc.Smallintconst(n2) { 128 r2 = *n2 129 } else { 130 gc.Regalloc(&r2, t, n2) 131 gc.Regalloc(&g2, n1.Type, &r2) 132 gc.Cgen(n2, &g2) 133 gmove(&g2, &r2) 134 } 135 gins(optoas(gc.OCMP, t), &r1, &r2) 136 if r1.Op == gc.OREGISTER { 137 gc.Regfree(&g1) 138 gc.Regfree(&r1) 139 } 140 if r2.Op == gc.OREGISTER { 141 gc.Regfree(&g2) 142 gc.Regfree(&r2) 143 } 144 return gc.Gbranch(optoas(op, t), nil, likely) 145 } 146 147 func ginsboolval(a obj.As, n *gc.Node) { 148 gins(jmptoset(a), nil, n) 149 } 150 151 // set up nodes representing 2^63 152 var ( 153 bigi gc.Node 154 bigf gc.Node 155 bignodes_did bool 156 ) 157 158 func bignodes() { 159 if bignodes_did { 160 return 161 } 162 bignodes_did = true 163 164 var i big.Int 165 i.SetInt64(1) 166 i.Lsh(&i, 63) 167 168 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0) 169 bigi.SetBigInt(&i) 170 171 bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64]) 172 } 173 174 /* 175 * generate move: 176 * t = f 177 * hard part is conversions. 178 */ 179 func gmove(f *gc.Node, t *gc.Node) { 180 if gc.Debug['M'] != 0 { 181 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, gc.FmtLong), gc.Nconv(t, gc.FmtLong)) 182 } 183 184 ft := gc.Simsimtype(f.Type) 185 tt := gc.Simsimtype(t.Type) 186 cvt := t.Type 187 188 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 189 gc.Complexmove(f, t) 190 return 191 } 192 193 // cannot have two memory operands 194 var a obj.As 195 if gc.Ismem(f) && gc.Ismem(t) { 196 goto hard 197 } 198 199 // convert constant to desired type 200 if f.Op == gc.OLITERAL { 201 var con gc.Node 202 f.Convconst(&con, t.Type) 203 f = &con 204 ft = tt // so big switch will choose a simple mov 205 206 // some constants can't move directly to memory. 207 if gc.Ismem(t) { 208 // float constants come from memory. 209 if gc.Isfloat[tt] { 210 goto hard 211 } 212 213 // 64-bit immediates are really 32-bit sign-extended 214 // unless moving into a register. 215 if gc.Isint[tt] { 216 if i := con.Int64(); int64(int32(i)) != i { 217 goto hard 218 } 219 } 220 } 221 } 222 223 // value -> value copy, only one memory operand. 224 // figure out the instruction to use. 225 // break out of switch for one-instruction gins. 226 // goto rdst for "destination must be register". 227 // goto hard for "convert to cvt type first". 228 // otherwise handle and return. 229 230 switch uint32(ft)<<16 | uint32(tt) { 231 default: 232 gc.Dump("f", f) 233 gc.Dump("t", t) 234 gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, gc.FmtLong), gc.Tconv(t.Type, gc.FmtLong)) 235 236 /* 237 * integer copy and truncate 238 */ 239 case gc.TINT8<<16 | gc.TINT8, // same size 240 gc.TINT8<<16 | gc.TUINT8, 241 gc.TUINT8<<16 | gc.TINT8, 242 gc.TUINT8<<16 | gc.TUINT8, 243 gc.TINT16<<16 | gc.TINT8, 244 // truncate 245 gc.TUINT16<<16 | gc.TINT8, 246 gc.TINT32<<16 | gc.TINT8, 247 gc.TUINT32<<16 | gc.TINT8, 248 gc.TINT64<<16 | gc.TINT8, 249 gc.TUINT64<<16 | gc.TINT8, 250 gc.TINT16<<16 | gc.TUINT8, 251 gc.TUINT16<<16 | gc.TUINT8, 252 gc.TINT32<<16 | gc.TUINT8, 253 gc.TUINT32<<16 | gc.TUINT8, 254 gc.TINT64<<16 | gc.TUINT8, 255 gc.TUINT64<<16 | gc.TUINT8: 256 a = x86.AMOVB 257 258 case gc.TINT16<<16 | gc.TINT16, // same size 259 gc.TINT16<<16 | gc.TUINT16, 260 gc.TUINT16<<16 | gc.TINT16, 261 gc.TUINT16<<16 | gc.TUINT16, 262 gc.TINT32<<16 | gc.TINT16, 263 // truncate 264 gc.TUINT32<<16 | gc.TINT16, 265 gc.TINT64<<16 | gc.TINT16, 266 gc.TUINT64<<16 | gc.TINT16, 267 gc.TINT32<<16 | gc.TUINT16, 268 gc.TUINT32<<16 | gc.TUINT16, 269 gc.TINT64<<16 | gc.TUINT16, 270 gc.TUINT64<<16 | gc.TUINT16: 271 a = x86.AMOVW 272 273 case gc.TINT32<<16 | gc.TINT32, // same size 274 gc.TINT32<<16 | gc.TUINT32, 275 gc.TUINT32<<16 | gc.TINT32, 276 gc.TUINT32<<16 | gc.TUINT32: 277 a = x86.AMOVL 278 279 case gc.TINT64<<16 | gc.TINT32, // truncate 280 gc.TUINT64<<16 | gc.TINT32, 281 gc.TINT64<<16 | gc.TUINT32, 282 gc.TUINT64<<16 | gc.TUINT32: 283 a = x86.AMOVQL 284 285 case gc.TINT64<<16 | gc.TINT64, // same size 286 gc.TINT64<<16 | gc.TUINT64, 287 gc.TUINT64<<16 | gc.TINT64, 288 gc.TUINT64<<16 | gc.TUINT64: 289 a = x86.AMOVQ 290 291 /* 292 * integer up-conversions 293 */ 294 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 295 gc.TINT8<<16 | gc.TUINT16: 296 a = x86.AMOVBWSX 297 298 goto rdst 299 300 case gc.TINT8<<16 | gc.TINT32, 301 gc.TINT8<<16 | gc.TUINT32: 302 a = x86.AMOVBLSX 303 goto rdst 304 305 case gc.TINT8<<16 | gc.TINT64, 306 gc.TINT8<<16 | gc.TUINT64: 307 a = x86.AMOVBQSX 308 goto rdst 309 310 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 311 gc.TUINT8<<16 | gc.TUINT16: 312 a = x86.AMOVBWZX 313 314 goto rdst 315 316 case gc.TUINT8<<16 | gc.TINT32, 317 gc.TUINT8<<16 | gc.TUINT32: 318 a = x86.AMOVBLZX 319 goto rdst 320 321 case gc.TUINT8<<16 | gc.TINT64, 322 gc.TUINT8<<16 | gc.TUINT64: 323 a = x86.AMOVBQZX 324 goto rdst 325 326 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 327 gc.TINT16<<16 | gc.TUINT32: 328 a = x86.AMOVWLSX 329 330 goto rdst 331 332 case gc.TINT16<<16 | gc.TINT64, 333 gc.TINT16<<16 | gc.TUINT64: 334 a = x86.AMOVWQSX 335 goto rdst 336 337 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 338 gc.TUINT16<<16 | gc.TUINT32: 339 a = x86.AMOVWLZX 340 341 goto rdst 342 343 case gc.TUINT16<<16 | gc.TINT64, 344 gc.TUINT16<<16 | gc.TUINT64: 345 a = x86.AMOVWQZX 346 goto rdst 347 348 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 349 gc.TINT32<<16 | gc.TUINT64: 350 a = x86.AMOVLQSX 351 352 goto rdst 353 354 // AMOVL into a register zeros the top of the register, 355 // so this is not always necessary, but if we rely on AMOVL 356 // the optimizer is almost certain to screw with us. 357 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 358 gc.TUINT32<<16 | gc.TUINT64: 359 a = x86.AMOVLQZX 360 361 goto rdst 362 363 /* 364 * float to integer 365 */ 366 case gc.TFLOAT32<<16 | gc.TINT32: 367 a = x86.ACVTTSS2SL 368 369 goto rdst 370 371 case gc.TFLOAT64<<16 | gc.TINT32: 372 a = x86.ACVTTSD2SL 373 goto rdst 374 375 case gc.TFLOAT32<<16 | gc.TINT64: 376 a = x86.ACVTTSS2SQ 377 goto rdst 378 379 case gc.TFLOAT64<<16 | gc.TINT64: 380 a = x86.ACVTTSD2SQ 381 goto rdst 382 383 // convert via int32. 384 case gc.TFLOAT32<<16 | gc.TINT16, 385 gc.TFLOAT32<<16 | gc.TINT8, 386 gc.TFLOAT32<<16 | gc.TUINT16, 387 gc.TFLOAT32<<16 | gc.TUINT8, 388 gc.TFLOAT64<<16 | gc.TINT16, 389 gc.TFLOAT64<<16 | gc.TINT8, 390 gc.TFLOAT64<<16 | gc.TUINT16, 391 gc.TFLOAT64<<16 | gc.TUINT8: 392 cvt = gc.Types[gc.TINT32] 393 394 goto hard 395 396 // convert via int64. 397 case gc.TFLOAT32<<16 | gc.TUINT32, 398 gc.TFLOAT64<<16 | gc.TUINT32: 399 cvt = gc.Types[gc.TINT64] 400 401 goto hard 402 403 // algorithm is: 404 // if small enough, use native float64 -> int64 conversion. 405 // otherwise, subtract 2^63, convert, and add it back. 406 case gc.TFLOAT32<<16 | gc.TUINT64, 407 gc.TFLOAT64<<16 | gc.TUINT64: 408 a := x86.ACVTTSS2SQ 409 410 if ft == gc.TFLOAT64 { 411 a = x86.ACVTTSD2SQ 412 } 413 bignodes() 414 var r1 gc.Node 415 gc.Regalloc(&r1, gc.Types[ft], nil) 416 var r2 gc.Node 417 gc.Regalloc(&r2, gc.Types[tt], t) 418 var r3 gc.Node 419 gc.Regalloc(&r3, gc.Types[ft], nil) 420 var r4 gc.Node 421 gc.Regalloc(&r4, gc.Types[tt], nil) 422 gins(optoas(gc.OAS, f.Type), f, &r1) 423 gins(optoas(gc.OCMP, f.Type), &bigf, &r1) 424 p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1) 425 gins(a, &r1, &r2) 426 p2 := gc.Gbranch(obj.AJMP, nil, 0) 427 gc.Patch(p1, gc.Pc) 428 gins(optoas(gc.OAS, f.Type), &bigf, &r3) 429 gins(optoas(gc.OSUB, f.Type), &r3, &r1) 430 gins(a, &r1, &r2) 431 gins(x86.AMOVQ, &bigi, &r4) 432 gins(x86.AXORQ, &r4, &r2) 433 gc.Patch(p2, gc.Pc) 434 gmove(&r2, t) 435 gc.Regfree(&r4) 436 gc.Regfree(&r3) 437 gc.Regfree(&r2) 438 gc.Regfree(&r1) 439 return 440 441 /* 442 * integer to float 443 */ 444 case gc.TINT32<<16 | gc.TFLOAT32: 445 a = x86.ACVTSL2SS 446 447 goto rdst 448 449 case gc.TINT32<<16 | gc.TFLOAT64: 450 a = x86.ACVTSL2SD 451 goto rdst 452 453 case gc.TINT64<<16 | gc.TFLOAT32: 454 a = x86.ACVTSQ2SS 455 goto rdst 456 457 case gc.TINT64<<16 | gc.TFLOAT64: 458 a = x86.ACVTSQ2SD 459 goto rdst 460 461 // convert via int32 462 case gc.TINT16<<16 | gc.TFLOAT32, 463 gc.TINT16<<16 | gc.TFLOAT64, 464 gc.TINT8<<16 | gc.TFLOAT32, 465 gc.TINT8<<16 | gc.TFLOAT64, 466 gc.TUINT16<<16 | gc.TFLOAT32, 467 gc.TUINT16<<16 | gc.TFLOAT64, 468 gc.TUINT8<<16 | gc.TFLOAT32, 469 gc.TUINT8<<16 | gc.TFLOAT64: 470 cvt = gc.Types[gc.TINT32] 471 472 goto hard 473 474 // convert via int64. 475 case gc.TUINT32<<16 | gc.TFLOAT32, 476 gc.TUINT32<<16 | gc.TFLOAT64: 477 cvt = gc.Types[gc.TINT64] 478 479 goto hard 480 481 // algorithm is: 482 // if small enough, use native int64 -> uint64 conversion. 483 // otherwise, halve (rounding to odd?), convert, and double. 484 case gc.TUINT64<<16 | gc.TFLOAT32, 485 gc.TUINT64<<16 | gc.TFLOAT64: 486 a := x86.ACVTSQ2SS 487 488 if tt == gc.TFLOAT64 { 489 a = x86.ACVTSQ2SD 490 } 491 var zero gc.Node 492 gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0) 493 var one gc.Node 494 gc.Nodconst(&one, gc.Types[gc.TUINT64], 1) 495 var r1 gc.Node 496 gc.Regalloc(&r1, f.Type, f) 497 var r2 gc.Node 498 gc.Regalloc(&r2, t.Type, t) 499 var r3 gc.Node 500 gc.Regalloc(&r3, f.Type, nil) 501 var r4 gc.Node 502 gc.Regalloc(&r4, f.Type, nil) 503 gmove(f, &r1) 504 gins(x86.ACMPQ, &r1, &zero) 505 p1 := gc.Gbranch(x86.AJLT, nil, +1) 506 gins(a, &r1, &r2) 507 p2 := gc.Gbranch(obj.AJMP, nil, 0) 508 gc.Patch(p1, gc.Pc) 509 gmove(&r1, &r3) 510 gins(x86.ASHRQ, &one, &r3) 511 gmove(&r1, &r4) 512 gins(x86.AANDL, &one, &r4) 513 gins(x86.AORQ, &r4, &r3) 514 gins(a, &r3, &r2) 515 gins(optoas(gc.OADD, t.Type), &r2, &r2) 516 gc.Patch(p2, gc.Pc) 517 gmove(&r2, t) 518 gc.Regfree(&r4) 519 gc.Regfree(&r3) 520 gc.Regfree(&r2) 521 gc.Regfree(&r1) 522 return 523 524 /* 525 * float to float 526 */ 527 case gc.TFLOAT32<<16 | gc.TFLOAT32: 528 a = x86.AMOVSS 529 530 case gc.TFLOAT64<<16 | gc.TFLOAT64: 531 a = x86.AMOVSD 532 533 case gc.TFLOAT32<<16 | gc.TFLOAT64: 534 a = x86.ACVTSS2SD 535 goto rdst 536 537 case gc.TFLOAT64<<16 | gc.TFLOAT32: 538 a = x86.ACVTSD2SS 539 goto rdst 540 } 541 542 gins(a, f, t) 543 return 544 545 // requires register destination 546 rdst: 547 { 548 var r1 gc.Node 549 gc.Regalloc(&r1, t.Type, t) 550 551 gins(a, f, &r1) 552 gmove(&r1, t) 553 gc.Regfree(&r1) 554 return 555 } 556 557 // requires register intermediate 558 hard: 559 var r1 gc.Node 560 gc.Regalloc(&r1, cvt, t) 561 562 gmove(f, &r1) 563 gmove(&r1, t) 564 gc.Regfree(&r1) 565 return 566 } 567 568 func samaddr(f *gc.Node, t *gc.Node) bool { 569 if f.Op != t.Op { 570 return false 571 } 572 573 switch f.Op { 574 case gc.OREGISTER: 575 if f.Reg != t.Reg { 576 break 577 } 578 return true 579 } 580 581 return false 582 } 583 584 /* 585 * generate one instruction: 586 * as f, t 587 */ 588 func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog { 589 // Node nod; 590 591 // if(f != N && f->op == OINDEX) { 592 // gc.Regalloc(&nod, ®node, Z); 593 // v = constnode.vconst; 594 // gc.Cgen(f->right, &nod); 595 // constnode.vconst = v; 596 // idx.reg = nod.reg; 597 // gc.Regfree(&nod); 598 // } 599 // if(t != N && t->op == OINDEX) { 600 // gc.Regalloc(&nod, ®node, Z); 601 // v = constnode.vconst; 602 // gc.Cgen(t->right, &nod); 603 // constnode.vconst = v; 604 // idx.reg = nod.reg; 605 // gc.Regfree(&nod); 606 // } 607 608 if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) { 609 // Turn MOVL $xxx into LEAL xxx. 610 // These should be equivalent but most of the backend 611 // only expects to see LEAL, because that's what we had 612 // historically generated. Various hidden assumptions are baked in by now. 613 if as == x86.AMOVL { 614 as = x86.ALEAL 615 } else { 616 as = x86.ALEAQ 617 } 618 f = f.Left 619 } 620 621 switch as { 622 case x86.AMOVB, 623 x86.AMOVW, 624 x86.AMOVL, 625 x86.AMOVQ, 626 x86.AMOVSS, 627 x86.AMOVSD: 628 if f != nil && t != nil && samaddr(f, t) { 629 return nil 630 } 631 632 case x86.ALEAQ: 633 if f != nil && gc.Isconst(f, gc.CTNIL) { 634 gc.Fatalf("gins LEAQ nil %v", f.Type) 635 } 636 } 637 638 p := gc.Prog(as) 639 gc.Naddr(&p.From, f) 640 gc.Naddr(&p.To, t) 641 642 if gc.Debug['g'] != 0 { 643 fmt.Printf("%v\n", p) 644 } 645 646 w := int32(0) 647 switch as { 648 case x86.AMOVB: 649 w = 1 650 651 case x86.AMOVW: 652 w = 2 653 654 case x86.AMOVL: 655 w = 4 656 657 case x86.AMOVQ: 658 w = 8 659 } 660 661 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) { 662 gc.Dump("f", f) 663 gc.Dump("t", t) 664 gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) 665 } 666 667 if p.To.Type == obj.TYPE_ADDR && w > 0 { 668 gc.Fatalf("bad use of addr: %v", p) 669 } 670 671 return p 672 } 673 674 func ginsnop() { 675 // This is actually not the x86 NOP anymore, 676 // but at the point where it gets used, AX is dead 677 // so it's okay if we lose the high bits. 678 var reg gc.Node 679 gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX) 680 gins(x86.AXCHGL, ®, ®) 681 } 682 683 /* 684 * return Axxx for Oxxx on type t. 685 */ 686 func optoas(op gc.Op, t *gc.Type) obj.As { 687 if t == nil { 688 gc.Fatalf("optoas: t is nil") 689 } 690 691 // avoid constant conversions in switches below 692 const ( 693 OMINUS_ = uint32(gc.OMINUS) << 16 694 OLSH_ = uint32(gc.OLSH) << 16 695 ORSH_ = uint32(gc.ORSH) << 16 696 OADD_ = uint32(gc.OADD) << 16 697 OSUB_ = uint32(gc.OSUB) << 16 698 OMUL_ = uint32(gc.OMUL) << 16 699 ODIV_ = uint32(gc.ODIV) << 16 700 OMOD_ = uint32(gc.OMOD) << 16 701 OOR_ = uint32(gc.OOR) << 16 702 OAND_ = uint32(gc.OAND) << 16 703 OXOR_ = uint32(gc.OXOR) << 16 704 OEQ_ = uint32(gc.OEQ) << 16 705 ONE_ = uint32(gc.ONE) << 16 706 OLT_ = uint32(gc.OLT) << 16 707 OLE_ = uint32(gc.OLE) << 16 708 OGE_ = uint32(gc.OGE) << 16 709 OGT_ = uint32(gc.OGT) << 16 710 OCMP_ = uint32(gc.OCMP) << 16 711 OPS_ = uint32(gc.OPS) << 16 712 OPC_ = uint32(gc.OPC) << 16 713 OAS_ = uint32(gc.OAS) << 16 714 OHMUL_ = uint32(gc.OHMUL) << 16 715 OSQRT_ = uint32(gc.OSQRT) << 16 716 OADDR_ = uint32(gc.OADDR) << 16 717 OINC_ = uint32(gc.OINC) << 16 718 ODEC_ = uint32(gc.ODEC) << 16 719 OLROT_ = uint32(gc.OLROT) << 16 720 ORROTC_ = uint32(gc.ORROTC) << 16 721 OEXTEND_ = uint32(gc.OEXTEND) << 16 722 ) 723 724 a := obj.AXXX 725 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 726 default: 727 gc.Fatalf("optoas: no entry %v-%v", op, t) 728 729 case OADDR_ | gc.TPTR32: 730 a = x86.ALEAL 731 732 case OADDR_ | gc.TPTR64: 733 a = x86.ALEAQ 734 735 case OEQ_ | gc.TBOOL, 736 OEQ_ | gc.TINT8, 737 OEQ_ | gc.TUINT8, 738 OEQ_ | gc.TINT16, 739 OEQ_ | gc.TUINT16, 740 OEQ_ | gc.TINT32, 741 OEQ_ | gc.TUINT32, 742 OEQ_ | gc.TINT64, 743 OEQ_ | gc.TUINT64, 744 OEQ_ | gc.TPTR32, 745 OEQ_ | gc.TPTR64, 746 OEQ_ | gc.TFLOAT32, 747 OEQ_ | gc.TFLOAT64: 748 a = x86.AJEQ 749 750 case ONE_ | gc.TBOOL, 751 ONE_ | gc.TINT8, 752 ONE_ | gc.TUINT8, 753 ONE_ | gc.TINT16, 754 ONE_ | gc.TUINT16, 755 ONE_ | gc.TINT32, 756 ONE_ | gc.TUINT32, 757 ONE_ | gc.TINT64, 758 ONE_ | gc.TUINT64, 759 ONE_ | gc.TPTR32, 760 ONE_ | gc.TPTR64, 761 ONE_ | gc.TFLOAT32, 762 ONE_ | gc.TFLOAT64: 763 a = x86.AJNE 764 765 case OPS_ | gc.TBOOL, 766 OPS_ | gc.TINT8, 767 OPS_ | gc.TUINT8, 768 OPS_ | gc.TINT16, 769 OPS_ | gc.TUINT16, 770 OPS_ | gc.TINT32, 771 OPS_ | gc.TUINT32, 772 OPS_ | gc.TINT64, 773 OPS_ | gc.TUINT64, 774 OPS_ | gc.TPTR32, 775 OPS_ | gc.TPTR64, 776 OPS_ | gc.TFLOAT32, 777 OPS_ | gc.TFLOAT64: 778 a = x86.AJPS 779 780 case OPC_ | gc.TBOOL, 781 OPC_ | gc.TINT8, 782 OPC_ | gc.TUINT8, 783 OPC_ | gc.TINT16, 784 OPC_ | gc.TUINT16, 785 OPC_ | gc.TINT32, 786 OPC_ | gc.TUINT32, 787 OPC_ | gc.TINT64, 788 OPC_ | gc.TUINT64, 789 OPC_ | gc.TPTR32, 790 OPC_ | gc.TPTR64, 791 OPC_ | gc.TFLOAT32, 792 OPC_ | gc.TFLOAT64: 793 a = x86.AJPC 794 795 case OLT_ | gc.TINT8, 796 OLT_ | gc.TINT16, 797 OLT_ | gc.TINT32, 798 OLT_ | gc.TINT64: 799 a = x86.AJLT 800 801 case OLT_ | gc.TUINT8, 802 OLT_ | gc.TUINT16, 803 OLT_ | gc.TUINT32, 804 OLT_ | gc.TUINT64: 805 a = x86.AJCS 806 807 case OLE_ | gc.TINT8, 808 OLE_ | gc.TINT16, 809 OLE_ | gc.TINT32, 810 OLE_ | gc.TINT64: 811 a = x86.AJLE 812 813 case OLE_ | gc.TUINT8, 814 OLE_ | gc.TUINT16, 815 OLE_ | gc.TUINT32, 816 OLE_ | gc.TUINT64: 817 a = x86.AJLS 818 819 case OGT_ | gc.TINT8, 820 OGT_ | gc.TINT16, 821 OGT_ | gc.TINT32, 822 OGT_ | gc.TINT64: 823 a = x86.AJGT 824 825 case OGT_ | gc.TUINT8, 826 OGT_ | gc.TUINT16, 827 OGT_ | gc.TUINT32, 828 OGT_ | gc.TUINT64, 829 OLT_ | gc.TFLOAT32, 830 OLT_ | gc.TFLOAT64: 831 a = x86.AJHI 832 833 case OGE_ | gc.TINT8, 834 OGE_ | gc.TINT16, 835 OGE_ | gc.TINT32, 836 OGE_ | gc.TINT64: 837 a = x86.AJGE 838 839 case OGE_ | gc.TUINT8, 840 OGE_ | gc.TUINT16, 841 OGE_ | gc.TUINT32, 842 OGE_ | gc.TUINT64, 843 OLE_ | gc.TFLOAT32, 844 OLE_ | gc.TFLOAT64: 845 a = x86.AJCC 846 847 case OCMP_ | gc.TBOOL, 848 OCMP_ | gc.TINT8, 849 OCMP_ | gc.TUINT8: 850 a = x86.ACMPB 851 852 case OCMP_ | gc.TINT16, 853 OCMP_ | gc.TUINT16: 854 a = x86.ACMPW 855 856 case OCMP_ | gc.TINT32, 857 OCMP_ | gc.TUINT32, 858 OCMP_ | gc.TPTR32: 859 a = x86.ACMPL 860 861 case OCMP_ | gc.TINT64, 862 OCMP_ | gc.TUINT64, 863 OCMP_ | gc.TPTR64: 864 a = x86.ACMPQ 865 866 case OCMP_ | gc.TFLOAT32: 867 a = x86.AUCOMISS 868 869 case OCMP_ | gc.TFLOAT64: 870 a = x86.AUCOMISD 871 872 case OAS_ | gc.TBOOL, 873 OAS_ | gc.TINT8, 874 OAS_ | gc.TUINT8: 875 a = x86.AMOVB 876 877 case OAS_ | gc.TINT16, 878 OAS_ | gc.TUINT16: 879 a = x86.AMOVW 880 881 case OAS_ | gc.TINT32, 882 OAS_ | gc.TUINT32, 883 OAS_ | gc.TPTR32: 884 a = x86.AMOVL 885 886 case OAS_ | gc.TINT64, 887 OAS_ | gc.TUINT64, 888 OAS_ | gc.TPTR64: 889 a = x86.AMOVQ 890 891 case OAS_ | gc.TFLOAT32: 892 a = x86.AMOVSS 893 894 case OAS_ | gc.TFLOAT64: 895 a = x86.AMOVSD 896 897 case OADD_ | gc.TINT8, 898 OADD_ | gc.TUINT8: 899 a = x86.AADDB 900 901 case OADD_ | gc.TINT16, 902 OADD_ | gc.TUINT16: 903 a = x86.AADDW 904 905 case OADD_ | gc.TINT32, 906 OADD_ | gc.TUINT32, 907 OADD_ | gc.TPTR32: 908 a = x86.AADDL 909 910 case OADD_ | gc.TINT64, 911 OADD_ | gc.TUINT64, 912 OADD_ | gc.TPTR64: 913 a = x86.AADDQ 914 915 case OADD_ | gc.TFLOAT32: 916 a = x86.AADDSS 917 918 case OADD_ | gc.TFLOAT64: 919 a = x86.AADDSD 920 921 case OSUB_ | gc.TINT8, 922 OSUB_ | gc.TUINT8: 923 a = x86.ASUBB 924 925 case OSUB_ | gc.TINT16, 926 OSUB_ | gc.TUINT16: 927 a = x86.ASUBW 928 929 case OSUB_ | gc.TINT32, 930 OSUB_ | gc.TUINT32, 931 OSUB_ | gc.TPTR32: 932 a = x86.ASUBL 933 934 case OSUB_ | gc.TINT64, 935 OSUB_ | gc.TUINT64, 936 OSUB_ | gc.TPTR64: 937 a = x86.ASUBQ 938 939 case OSUB_ | gc.TFLOAT32: 940 a = x86.ASUBSS 941 942 case OSUB_ | gc.TFLOAT64: 943 a = x86.ASUBSD 944 945 case OINC_ | gc.TINT8, 946 OINC_ | gc.TUINT8: 947 a = x86.AINCB 948 949 case OINC_ | gc.TINT16, 950 OINC_ | gc.TUINT16: 951 a = x86.AINCW 952 953 case OINC_ | gc.TINT32, 954 OINC_ | gc.TUINT32, 955 OINC_ | gc.TPTR32: 956 a = x86.AINCL 957 958 case OINC_ | gc.TINT64, 959 OINC_ | gc.TUINT64, 960 OINC_ | gc.TPTR64: 961 a = x86.AINCQ 962 963 case ODEC_ | gc.TINT8, 964 ODEC_ | gc.TUINT8: 965 a = x86.ADECB 966 967 case ODEC_ | gc.TINT16, 968 ODEC_ | gc.TUINT16: 969 a = x86.ADECW 970 971 case ODEC_ | gc.TINT32, 972 ODEC_ | gc.TUINT32, 973 ODEC_ | gc.TPTR32: 974 a = x86.ADECL 975 976 case ODEC_ | gc.TINT64, 977 ODEC_ | gc.TUINT64, 978 ODEC_ | gc.TPTR64: 979 a = x86.ADECQ 980 981 case OMINUS_ | gc.TINT8, 982 OMINUS_ | gc.TUINT8: 983 a = x86.ANEGB 984 985 case OMINUS_ | gc.TINT16, 986 OMINUS_ | gc.TUINT16: 987 a = x86.ANEGW 988 989 case OMINUS_ | gc.TINT32, 990 OMINUS_ | gc.TUINT32, 991 OMINUS_ | gc.TPTR32: 992 a = x86.ANEGL 993 994 case OMINUS_ | gc.TINT64, 995 OMINUS_ | gc.TUINT64, 996 OMINUS_ | gc.TPTR64: 997 a = x86.ANEGQ 998 999 case OAND_ | gc.TBOOL, 1000 OAND_ | gc.TINT8, 1001 OAND_ | gc.TUINT8: 1002 a = x86.AANDB 1003 1004 case OAND_ | gc.TINT16, 1005 OAND_ | gc.TUINT16: 1006 a = x86.AANDW 1007 1008 case OAND_ | gc.TINT32, 1009 OAND_ | gc.TUINT32, 1010 OAND_ | gc.TPTR32: 1011 a = x86.AANDL 1012 1013 case OAND_ | gc.TINT64, 1014 OAND_ | gc.TUINT64, 1015 OAND_ | gc.TPTR64: 1016 a = x86.AANDQ 1017 1018 case OOR_ | gc.TBOOL, 1019 OOR_ | gc.TINT8, 1020 OOR_ | gc.TUINT8: 1021 a = x86.AORB 1022 1023 case OOR_ | gc.TINT16, 1024 OOR_ | gc.TUINT16: 1025 a = x86.AORW 1026 1027 case OOR_ | gc.TINT32, 1028 OOR_ | gc.TUINT32, 1029 OOR_ | gc.TPTR32: 1030 a = x86.AORL 1031 1032 case OOR_ | gc.TINT64, 1033 OOR_ | gc.TUINT64, 1034 OOR_ | gc.TPTR64: 1035 a = x86.AORQ 1036 1037 case OXOR_ | gc.TINT8, 1038 OXOR_ | gc.TUINT8: 1039 a = x86.AXORB 1040 1041 case OXOR_ | gc.TINT16, 1042 OXOR_ | gc.TUINT16: 1043 a = x86.AXORW 1044 1045 case OXOR_ | gc.TINT32, 1046 OXOR_ | gc.TUINT32, 1047 OXOR_ | gc.TPTR32: 1048 a = x86.AXORL 1049 1050 case OXOR_ | gc.TINT64, 1051 OXOR_ | gc.TUINT64, 1052 OXOR_ | gc.TPTR64: 1053 a = x86.AXORQ 1054 1055 case OLROT_ | gc.TINT8, 1056 OLROT_ | gc.TUINT8: 1057 a = x86.AROLB 1058 1059 case OLROT_ | gc.TINT16, 1060 OLROT_ | gc.TUINT16: 1061 a = x86.AROLW 1062 1063 case OLROT_ | gc.TINT32, 1064 OLROT_ | gc.TUINT32, 1065 OLROT_ | gc.TPTR32: 1066 a = x86.AROLL 1067 1068 case OLROT_ | gc.TINT64, 1069 OLROT_ | gc.TUINT64, 1070 OLROT_ | gc.TPTR64: 1071 a = x86.AROLQ 1072 1073 case OLSH_ | gc.TINT8, 1074 OLSH_ | gc.TUINT8: 1075 a = x86.ASHLB 1076 1077 case OLSH_ | gc.TINT16, 1078 OLSH_ | gc.TUINT16: 1079 a = x86.ASHLW 1080 1081 case OLSH_ | gc.TINT32, 1082 OLSH_ | gc.TUINT32, 1083 OLSH_ | gc.TPTR32: 1084 a = x86.ASHLL 1085 1086 case OLSH_ | gc.TINT64, 1087 OLSH_ | gc.TUINT64, 1088 OLSH_ | gc.TPTR64: 1089 a = x86.ASHLQ 1090 1091 case ORSH_ | gc.TUINT8: 1092 a = x86.ASHRB 1093 1094 case ORSH_ | gc.TUINT16: 1095 a = x86.ASHRW 1096 1097 case ORSH_ | gc.TUINT32, 1098 ORSH_ | gc.TPTR32: 1099 a = x86.ASHRL 1100 1101 case ORSH_ | gc.TUINT64, 1102 ORSH_ | gc.TPTR64: 1103 a = x86.ASHRQ 1104 1105 case ORSH_ | gc.TINT8: 1106 a = x86.ASARB 1107 1108 case ORSH_ | gc.TINT16: 1109 a = x86.ASARW 1110 1111 case ORSH_ | gc.TINT32: 1112 a = x86.ASARL 1113 1114 case ORSH_ | gc.TINT64: 1115 a = x86.ASARQ 1116 1117 case ORROTC_ | gc.TINT8, 1118 ORROTC_ | gc.TUINT8: 1119 a = x86.ARCRB 1120 1121 case ORROTC_ | gc.TINT16, 1122 ORROTC_ | gc.TUINT16: 1123 a = x86.ARCRW 1124 1125 case ORROTC_ | gc.TINT32, 1126 ORROTC_ | gc.TUINT32: 1127 a = x86.ARCRL 1128 1129 case ORROTC_ | gc.TINT64, 1130 ORROTC_ | gc.TUINT64: 1131 a = x86.ARCRQ 1132 1133 case OHMUL_ | gc.TINT8, 1134 OMUL_ | gc.TINT8, 1135 OMUL_ | gc.TUINT8: 1136 a = x86.AIMULB 1137 1138 case OHMUL_ | gc.TINT16, 1139 OMUL_ | gc.TINT16, 1140 OMUL_ | gc.TUINT16: 1141 a = x86.AIMULW 1142 1143 case OHMUL_ | gc.TINT32, 1144 OMUL_ | gc.TINT32, 1145 OMUL_ | gc.TUINT32, 1146 OMUL_ | gc.TPTR32: 1147 a = x86.AIMULL 1148 1149 case OHMUL_ | gc.TINT64, 1150 OMUL_ | gc.TINT64, 1151 OMUL_ | gc.TUINT64, 1152 OMUL_ | gc.TPTR64: 1153 a = x86.AIMULQ 1154 1155 case OHMUL_ | gc.TUINT8: 1156 a = x86.AMULB 1157 1158 case OHMUL_ | gc.TUINT16: 1159 a = x86.AMULW 1160 1161 case OHMUL_ | gc.TUINT32, 1162 OHMUL_ | gc.TPTR32: 1163 a = x86.AMULL 1164 1165 case OHMUL_ | gc.TUINT64, 1166 OHMUL_ | gc.TPTR64: 1167 a = x86.AMULQ 1168 1169 case OMUL_ | gc.TFLOAT32: 1170 a = x86.AMULSS 1171 1172 case OMUL_ | gc.TFLOAT64: 1173 a = x86.AMULSD 1174 1175 case ODIV_ | gc.TINT8, 1176 OMOD_ | gc.TINT8: 1177 a = x86.AIDIVB 1178 1179 case ODIV_ | gc.TUINT8, 1180 OMOD_ | gc.TUINT8: 1181 a = x86.ADIVB 1182 1183 case ODIV_ | gc.TINT16, 1184 OMOD_ | gc.TINT16: 1185 a = x86.AIDIVW 1186 1187 case ODIV_ | gc.TUINT16, 1188 OMOD_ | gc.TUINT16: 1189 a = x86.ADIVW 1190 1191 case ODIV_ | gc.TINT32, 1192 OMOD_ | gc.TINT32: 1193 a = x86.AIDIVL 1194 1195 case ODIV_ | gc.TUINT32, 1196 ODIV_ | gc.TPTR32, 1197 OMOD_ | gc.TUINT32, 1198 OMOD_ | gc.TPTR32: 1199 a = x86.ADIVL 1200 1201 case ODIV_ | gc.TINT64, 1202 OMOD_ | gc.TINT64: 1203 a = x86.AIDIVQ 1204 1205 case ODIV_ | gc.TUINT64, 1206 ODIV_ | gc.TPTR64, 1207 OMOD_ | gc.TUINT64, 1208 OMOD_ | gc.TPTR64: 1209 a = x86.ADIVQ 1210 1211 case OEXTEND_ | gc.TINT16: 1212 a = x86.ACWD 1213 1214 case OEXTEND_ | gc.TINT32: 1215 a = x86.ACDQ 1216 1217 case OEXTEND_ | gc.TINT64: 1218 a = x86.ACQO 1219 1220 case ODIV_ | gc.TFLOAT32: 1221 a = x86.ADIVSS 1222 1223 case ODIV_ | gc.TFLOAT64: 1224 a = x86.ADIVSD 1225 1226 case OSQRT_ | gc.TFLOAT64: 1227 a = x86.ASQRTSD 1228 } 1229 1230 return a 1231 } 1232 1233 // jmptoset returns ASETxx for AJxx. 1234 func jmptoset(jmp obj.As) obj.As { 1235 switch jmp { 1236 case x86.AJEQ: 1237 return x86.ASETEQ 1238 case x86.AJNE: 1239 return x86.ASETNE 1240 case x86.AJLT: 1241 return x86.ASETLT 1242 case x86.AJCS: 1243 return x86.ASETCS 1244 case x86.AJLE: 1245 return x86.ASETLE 1246 case x86.AJLS: 1247 return x86.ASETLS 1248 case x86.AJGT: 1249 return x86.ASETGT 1250 case x86.AJHI: 1251 return x86.ASETHI 1252 case x86.AJGE: 1253 return x86.ASETGE 1254 case x86.AJCC: 1255 return x86.ASETCC 1256 case x86.AJMI: 1257 return x86.ASETMI 1258 case x86.AJOC: 1259 return x86.ASETOC 1260 case x86.AJOS: 1261 return x86.ASETOS 1262 case x86.AJPC: 1263 return x86.ASETPC 1264 case x86.AJPL: 1265 return x86.ASETPL 1266 case x86.AJPS: 1267 return x86.ASETPS 1268 } 1269 gc.Fatalf("jmptoset: no entry for %v", jmp) 1270 panic("unreachable") 1271 } 1272 1273 const ( 1274 ODynam = 1 << 0 1275 OAddable = 1 << 1 1276 ) 1277 1278 var clean [20]gc.Node 1279 1280 var cleani int = 0 1281 1282 func sudoclean() { 1283 if clean[cleani-1].Op != gc.OEMPTY { 1284 gc.Regfree(&clean[cleani-1]) 1285 } 1286 if clean[cleani-2].Op != gc.OEMPTY { 1287 gc.Regfree(&clean[cleani-2]) 1288 } 1289 cleani -= 2 1290 } 1291 1292 /* 1293 * generate code to compute address of n, 1294 * a reference to a (perhaps nested) field inside 1295 * an array or struct. 1296 * return 0 on failure, 1 on success. 1297 * on success, leaves usable address in a. 1298 * 1299 * caller is responsible for calling sudoclean 1300 * after successful sudoaddable, 1301 * to release the register used for a. 1302 */ 1303 func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool { 1304 if n.Type == nil { 1305 return false 1306 } 1307 1308 *a = obj.Addr{} 1309 1310 switch n.Op { 1311 case gc.OLITERAL: 1312 if !gc.Isconst(n, gc.CTINT) { 1313 break 1314 } 1315 v := n.Int64() 1316 if v >= 32000 || v <= -32000 { 1317 break 1318 } 1319 switch as { 1320 default: 1321 return false 1322 1323 case x86.AADDB, 1324 x86.AADDW, 1325 x86.AADDL, 1326 x86.AADDQ, 1327 x86.ASUBB, 1328 x86.ASUBW, 1329 x86.ASUBL, 1330 x86.ASUBQ, 1331 x86.AANDB, 1332 x86.AANDW, 1333 x86.AANDL, 1334 x86.AANDQ, 1335 x86.AORB, 1336 x86.AORW, 1337 x86.AORL, 1338 x86.AORQ, 1339 x86.AXORB, 1340 x86.AXORW, 1341 x86.AXORL, 1342 x86.AXORQ, 1343 x86.AINCB, 1344 x86.AINCW, 1345 x86.AINCL, 1346 x86.AINCQ, 1347 x86.ADECB, 1348 x86.ADECW, 1349 x86.ADECL, 1350 x86.ADECQ, 1351 x86.AMOVB, 1352 x86.AMOVW, 1353 x86.AMOVL, 1354 x86.AMOVQ: 1355 break 1356 } 1357 1358 cleani += 2 1359 reg := &clean[cleani-1] 1360 reg1 := &clean[cleani-2] 1361 reg.Op = gc.OEMPTY 1362 reg1.Op = gc.OEMPTY 1363 gc.Naddr(a, n) 1364 return true 1365 1366 case gc.ODOT, 1367 gc.ODOTPTR: 1368 cleani += 2 1369 reg := &clean[cleani-1] 1370 reg1 := &clean[cleani-2] 1371 reg.Op = gc.OEMPTY 1372 reg1.Op = gc.OEMPTY 1373 var nn *gc.Node 1374 var oary [10]int64 1375 o := gc.Dotoffset(n, oary[:], &nn) 1376 if nn == nil { 1377 sudoclean() 1378 return false 1379 } 1380 1381 if nn.Addable && o == 1 && oary[0] >= 0 { 1382 // directly addressable set of DOTs 1383 n1 := *nn 1384 1385 n1.Type = n.Type 1386 n1.Xoffset += oary[0] 1387 gc.Naddr(a, &n1) 1388 return true 1389 } 1390 1391 gc.Regalloc(reg, gc.Types[gc.Tptr], nil) 1392 n1 := *reg 1393 n1.Op = gc.OINDREG 1394 if oary[0] >= 0 { 1395 gc.Agen(nn, reg) 1396 n1.Xoffset = oary[0] 1397 } else { 1398 gc.Cgen(nn, reg) 1399 gc.Cgen_checknil(reg) 1400 n1.Xoffset = -(oary[0] + 1) 1401 } 1402 1403 for i := 1; i < o; i++ { 1404 if oary[i] >= 0 { 1405 gc.Fatalf("can't happen") 1406 } 1407 gins(movptr, &n1, reg) 1408 gc.Cgen_checknil(reg) 1409 n1.Xoffset = -(oary[i] + 1) 1410 } 1411 1412 a.Type = obj.TYPE_NONE 1413 a.Index = x86.REG_NONE 1414 gc.Fixlargeoffset(&n1) 1415 gc.Naddr(a, &n1) 1416 return true 1417 1418 case gc.OINDEX: 1419 return false 1420 } 1421 1422 return false 1423 }