github.com/sean-/go@v0.0.0-20151219100004-97f854cd7bb6/src/cmd/compile/internal/amd64/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package amd64 32 33 import ( 34 "cmd/compile/internal/big" 35 "cmd/compile/internal/gc" 36 "cmd/internal/obj" 37 "cmd/internal/obj/x86" 38 "fmt" 39 ) 40 41 var resvd = []int{ 42 x86.REG_DI, // for movstring 43 x86.REG_SI, // for movstring 44 45 x86.REG_AX, // for divide 46 x86.REG_CX, // for shift 47 x86.REG_DX, // for divide 48 x86.REG_SP, // for stack 49 } 50 51 /* 52 * generate 53 * as $c, reg 54 */ 55 func gconreg(as int, c int64, reg int) { 56 var nr gc.Node 57 58 switch as { 59 case x86.AADDL, 60 x86.AMOVL, 61 x86.ALEAL: 62 gc.Nodreg(&nr, gc.Types[gc.TINT32], reg) 63 64 default: 65 gc.Nodreg(&nr, gc.Types[gc.TINT64], reg) 66 } 67 68 ginscon(as, c, &nr) 69 } 70 71 /* 72 * generate 73 * as $c, n 74 */ 75 func ginscon(as int, c int64, n2 *gc.Node) { 76 var n1 gc.Node 77 78 switch as { 79 case x86.AADDL, 80 x86.AMOVL, 81 x86.ALEAL: 82 gc.Nodconst(&n1, gc.Types[gc.TINT32], c) 83 84 default: 85 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 86 } 87 88 if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) { 89 // cannot have 64-bit immediate in ADD, etc. 90 // instead, MOV into register first. 91 var ntmp gc.Node 92 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 93 94 gins(x86.AMOVQ, &n1, &ntmp) 95 gins(as, &ntmp, n2) 96 gc.Regfree(&ntmp) 97 return 98 } 99 100 gins(as, &n1, n2) 101 } 102 103 func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 104 if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL { 105 // Reverse comparison to place constant last. 106 op = gc.Brrev(op) 107 n1, n2 = n2, n1 108 } 109 // General case. 110 var r1, r2, g1, g2 gc.Node 111 112 // A special case to make write barriers more efficient. 113 // Comparing the first field of a named struct can be done directly. 114 base := n1 115 if n1.Op == gc.ODOT && n1.Left.Type.Etype == gc.TSTRUCT && n1.Left.Type.Type.Sym == n1.Right.Sym { 116 base = n1.Left 117 } 118 119 if base.Op == gc.ONAME && base.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG { 120 r1 = *n1 121 } else { 122 gc.Regalloc(&r1, t, n1) 123 gc.Regalloc(&g1, n1.Type, &r1) 124 gc.Cgen(n1, &g1) 125 gmove(&g1, &r1) 126 } 127 if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] && gc.Smallintconst(n2) { 128 r2 = *n2 129 } else { 130 gc.Regalloc(&r2, t, n2) 131 gc.Regalloc(&g2, n1.Type, &r2) 132 gc.Cgen(n2, &g2) 133 gmove(&g2, &r2) 134 } 135 gins(optoas(gc.OCMP, t), &r1, &r2) 136 if r1.Op == gc.OREGISTER { 137 gc.Regfree(&g1) 138 gc.Regfree(&r1) 139 } 140 if r2.Op == gc.OREGISTER { 141 gc.Regfree(&g2) 142 gc.Regfree(&r2) 143 } 144 return gc.Gbranch(optoas(op, t), nil, likely) 145 } 146 147 func ginsboolval(a int, n *gc.Node) { 148 gins(jmptoset(a), nil, n) 149 } 150 151 // set up nodes representing 2^63 152 var ( 153 bigi gc.Node 154 bigf gc.Node 155 bignodes_did bool 156 ) 157 158 func bignodes() { 159 if bignodes_did { 160 return 161 } 162 bignodes_did = true 163 164 var i big.Int 165 i.SetInt64(1) 166 i.Lsh(&i, 63) 167 168 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0) 169 bigi.SetBigInt(&i) 170 171 bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64]) 172 } 173 174 /* 175 * generate move: 176 * t = f 177 * hard part is conversions. 178 */ 179 func gmove(f *gc.Node, t *gc.Node) { 180 if gc.Debug['M'] != 0 { 181 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) 182 } 183 184 ft := gc.Simsimtype(f.Type) 185 tt := gc.Simsimtype(t.Type) 186 cvt := t.Type 187 188 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 189 gc.Complexmove(f, t) 190 return 191 } 192 193 // cannot have two memory operands 194 var a int 195 if gc.Ismem(f) && gc.Ismem(t) { 196 goto hard 197 } 198 199 // convert constant to desired type 200 if f.Op == gc.OLITERAL { 201 var con gc.Node 202 f.Convconst(&con, t.Type) 203 f = &con 204 ft = tt // so big switch will choose a simple mov 205 206 // some constants can't move directly to memory. 207 if gc.Ismem(t) { 208 // float constants come from memory. 209 if gc.Isfloat[tt] { 210 goto hard 211 } 212 213 // 64-bit immediates are really 32-bit sign-extended 214 // unless moving into a register. 215 if gc.Isint[tt] { 216 if i := con.Int(); int64(int32(i)) != i { 217 goto hard 218 } 219 } 220 } 221 } 222 223 // value -> value copy, only one memory operand. 224 // figure out the instruction to use. 225 // break out of switch for one-instruction gins. 226 // goto rdst for "destination must be register". 227 // goto hard for "convert to cvt type first". 228 // otherwise handle and return. 229 230 switch uint32(ft)<<16 | uint32(tt) { 231 default: 232 gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) 233 234 /* 235 * integer copy and truncate 236 */ 237 case gc.TINT8<<16 | gc.TINT8, // same size 238 gc.TINT8<<16 | gc.TUINT8, 239 gc.TUINT8<<16 | gc.TINT8, 240 gc.TUINT8<<16 | gc.TUINT8, 241 gc.TINT16<<16 | gc.TINT8, 242 // truncate 243 gc.TUINT16<<16 | gc.TINT8, 244 gc.TINT32<<16 | gc.TINT8, 245 gc.TUINT32<<16 | gc.TINT8, 246 gc.TINT64<<16 | gc.TINT8, 247 gc.TUINT64<<16 | gc.TINT8, 248 gc.TINT16<<16 | gc.TUINT8, 249 gc.TUINT16<<16 | gc.TUINT8, 250 gc.TINT32<<16 | gc.TUINT8, 251 gc.TUINT32<<16 | gc.TUINT8, 252 gc.TINT64<<16 | gc.TUINT8, 253 gc.TUINT64<<16 | gc.TUINT8: 254 a = x86.AMOVB 255 256 case gc.TINT16<<16 | gc.TINT16, // same size 257 gc.TINT16<<16 | gc.TUINT16, 258 gc.TUINT16<<16 | gc.TINT16, 259 gc.TUINT16<<16 | gc.TUINT16, 260 gc.TINT32<<16 | gc.TINT16, 261 // truncate 262 gc.TUINT32<<16 | gc.TINT16, 263 gc.TINT64<<16 | gc.TINT16, 264 gc.TUINT64<<16 | gc.TINT16, 265 gc.TINT32<<16 | gc.TUINT16, 266 gc.TUINT32<<16 | gc.TUINT16, 267 gc.TINT64<<16 | gc.TUINT16, 268 gc.TUINT64<<16 | gc.TUINT16: 269 a = x86.AMOVW 270 271 case gc.TINT32<<16 | gc.TINT32, // same size 272 gc.TINT32<<16 | gc.TUINT32, 273 gc.TUINT32<<16 | gc.TINT32, 274 gc.TUINT32<<16 | gc.TUINT32: 275 a = x86.AMOVL 276 277 case gc.TINT64<<16 | gc.TINT32, // truncate 278 gc.TUINT64<<16 | gc.TINT32, 279 gc.TINT64<<16 | gc.TUINT32, 280 gc.TUINT64<<16 | gc.TUINT32: 281 a = x86.AMOVQL 282 283 case gc.TINT64<<16 | gc.TINT64, // same size 284 gc.TINT64<<16 | gc.TUINT64, 285 gc.TUINT64<<16 | gc.TINT64, 286 gc.TUINT64<<16 | gc.TUINT64: 287 a = x86.AMOVQ 288 289 /* 290 * integer up-conversions 291 */ 292 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 293 gc.TINT8<<16 | gc.TUINT16: 294 a = x86.AMOVBWSX 295 296 goto rdst 297 298 case gc.TINT8<<16 | gc.TINT32, 299 gc.TINT8<<16 | gc.TUINT32: 300 a = x86.AMOVBLSX 301 goto rdst 302 303 case gc.TINT8<<16 | gc.TINT64, 304 gc.TINT8<<16 | gc.TUINT64: 305 a = x86.AMOVBQSX 306 goto rdst 307 308 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 309 gc.TUINT8<<16 | gc.TUINT16: 310 a = x86.AMOVBWZX 311 312 goto rdst 313 314 case gc.TUINT8<<16 | gc.TINT32, 315 gc.TUINT8<<16 | gc.TUINT32: 316 a = x86.AMOVBLZX 317 goto rdst 318 319 case gc.TUINT8<<16 | gc.TINT64, 320 gc.TUINT8<<16 | gc.TUINT64: 321 a = x86.AMOVBQZX 322 goto rdst 323 324 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 325 gc.TINT16<<16 | gc.TUINT32: 326 a = x86.AMOVWLSX 327 328 goto rdst 329 330 case gc.TINT16<<16 | gc.TINT64, 331 gc.TINT16<<16 | gc.TUINT64: 332 a = x86.AMOVWQSX 333 goto rdst 334 335 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 336 gc.TUINT16<<16 | gc.TUINT32: 337 a = x86.AMOVWLZX 338 339 goto rdst 340 341 case gc.TUINT16<<16 | gc.TINT64, 342 gc.TUINT16<<16 | gc.TUINT64: 343 a = x86.AMOVWQZX 344 goto rdst 345 346 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 347 gc.TINT32<<16 | gc.TUINT64: 348 a = x86.AMOVLQSX 349 350 goto rdst 351 352 // AMOVL into a register zeros the top of the register, 353 // so this is not always necessary, but if we rely on AMOVL 354 // the optimizer is almost certain to screw with us. 355 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 356 gc.TUINT32<<16 | gc.TUINT64: 357 a = x86.AMOVLQZX 358 359 goto rdst 360 361 /* 362 * float to integer 363 */ 364 case gc.TFLOAT32<<16 | gc.TINT32: 365 a = x86.ACVTTSS2SL 366 367 goto rdst 368 369 case gc.TFLOAT64<<16 | gc.TINT32: 370 a = x86.ACVTTSD2SL 371 goto rdst 372 373 case gc.TFLOAT32<<16 | gc.TINT64: 374 a = x86.ACVTTSS2SQ 375 goto rdst 376 377 case gc.TFLOAT64<<16 | gc.TINT64: 378 a = x86.ACVTTSD2SQ 379 goto rdst 380 381 // convert via int32. 382 case gc.TFLOAT32<<16 | gc.TINT16, 383 gc.TFLOAT32<<16 | gc.TINT8, 384 gc.TFLOAT32<<16 | gc.TUINT16, 385 gc.TFLOAT32<<16 | gc.TUINT8, 386 gc.TFLOAT64<<16 | gc.TINT16, 387 gc.TFLOAT64<<16 | gc.TINT8, 388 gc.TFLOAT64<<16 | gc.TUINT16, 389 gc.TFLOAT64<<16 | gc.TUINT8: 390 cvt = gc.Types[gc.TINT32] 391 392 goto hard 393 394 // convert via int64. 395 case gc.TFLOAT32<<16 | gc.TUINT32, 396 gc.TFLOAT64<<16 | gc.TUINT32: 397 cvt = gc.Types[gc.TINT64] 398 399 goto hard 400 401 // algorithm is: 402 // if small enough, use native float64 -> int64 conversion. 403 // otherwise, subtract 2^63, convert, and add it back. 404 case gc.TFLOAT32<<16 | gc.TUINT64, 405 gc.TFLOAT64<<16 | gc.TUINT64: 406 a := x86.ACVTTSS2SQ 407 408 if ft == gc.TFLOAT64 { 409 a = x86.ACVTTSD2SQ 410 } 411 bignodes() 412 var r1 gc.Node 413 gc.Regalloc(&r1, gc.Types[ft], nil) 414 var r2 gc.Node 415 gc.Regalloc(&r2, gc.Types[tt], t) 416 var r3 gc.Node 417 gc.Regalloc(&r3, gc.Types[ft], nil) 418 var r4 gc.Node 419 gc.Regalloc(&r4, gc.Types[tt], nil) 420 gins(optoas(gc.OAS, f.Type), f, &r1) 421 gins(optoas(gc.OCMP, f.Type), &bigf, &r1) 422 p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1) 423 gins(a, &r1, &r2) 424 p2 := gc.Gbranch(obj.AJMP, nil, 0) 425 gc.Patch(p1, gc.Pc) 426 gins(optoas(gc.OAS, f.Type), &bigf, &r3) 427 gins(optoas(gc.OSUB, f.Type), &r3, &r1) 428 gins(a, &r1, &r2) 429 gins(x86.AMOVQ, &bigi, &r4) 430 gins(x86.AXORQ, &r4, &r2) 431 gc.Patch(p2, gc.Pc) 432 gmove(&r2, t) 433 gc.Regfree(&r4) 434 gc.Regfree(&r3) 435 gc.Regfree(&r2) 436 gc.Regfree(&r1) 437 return 438 439 /* 440 * integer to float 441 */ 442 case gc.TINT32<<16 | gc.TFLOAT32: 443 a = x86.ACVTSL2SS 444 445 goto rdst 446 447 case gc.TINT32<<16 | gc.TFLOAT64: 448 a = x86.ACVTSL2SD 449 goto rdst 450 451 case gc.TINT64<<16 | gc.TFLOAT32: 452 a = x86.ACVTSQ2SS 453 goto rdst 454 455 case gc.TINT64<<16 | gc.TFLOAT64: 456 a = x86.ACVTSQ2SD 457 goto rdst 458 459 // convert via int32 460 case gc.TINT16<<16 | gc.TFLOAT32, 461 gc.TINT16<<16 | gc.TFLOAT64, 462 gc.TINT8<<16 | gc.TFLOAT32, 463 gc.TINT8<<16 | gc.TFLOAT64, 464 gc.TUINT16<<16 | gc.TFLOAT32, 465 gc.TUINT16<<16 | gc.TFLOAT64, 466 gc.TUINT8<<16 | gc.TFLOAT32, 467 gc.TUINT8<<16 | gc.TFLOAT64: 468 cvt = gc.Types[gc.TINT32] 469 470 goto hard 471 472 // convert via int64. 473 case gc.TUINT32<<16 | gc.TFLOAT32, 474 gc.TUINT32<<16 | gc.TFLOAT64: 475 cvt = gc.Types[gc.TINT64] 476 477 goto hard 478 479 // algorithm is: 480 // if small enough, use native int64 -> uint64 conversion. 481 // otherwise, halve (rounding to odd?), convert, and double. 482 case gc.TUINT64<<16 | gc.TFLOAT32, 483 gc.TUINT64<<16 | gc.TFLOAT64: 484 a := x86.ACVTSQ2SS 485 486 if tt == gc.TFLOAT64 { 487 a = x86.ACVTSQ2SD 488 } 489 var zero gc.Node 490 gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0) 491 var one gc.Node 492 gc.Nodconst(&one, gc.Types[gc.TUINT64], 1) 493 var r1 gc.Node 494 gc.Regalloc(&r1, f.Type, f) 495 var r2 gc.Node 496 gc.Regalloc(&r2, t.Type, t) 497 var r3 gc.Node 498 gc.Regalloc(&r3, f.Type, nil) 499 var r4 gc.Node 500 gc.Regalloc(&r4, f.Type, nil) 501 gmove(f, &r1) 502 gins(x86.ACMPQ, &r1, &zero) 503 p1 := gc.Gbranch(x86.AJLT, nil, +1) 504 gins(a, &r1, &r2) 505 p2 := gc.Gbranch(obj.AJMP, nil, 0) 506 gc.Patch(p1, gc.Pc) 507 gmove(&r1, &r3) 508 gins(x86.ASHRQ, &one, &r3) 509 gmove(&r1, &r4) 510 gins(x86.AANDL, &one, &r4) 511 gins(x86.AORQ, &r4, &r3) 512 gins(a, &r3, &r2) 513 gins(optoas(gc.OADD, t.Type), &r2, &r2) 514 gc.Patch(p2, gc.Pc) 515 gmove(&r2, t) 516 gc.Regfree(&r4) 517 gc.Regfree(&r3) 518 gc.Regfree(&r2) 519 gc.Regfree(&r1) 520 return 521 522 /* 523 * float to float 524 */ 525 case gc.TFLOAT32<<16 | gc.TFLOAT32: 526 a = x86.AMOVSS 527 528 case gc.TFLOAT64<<16 | gc.TFLOAT64: 529 a = x86.AMOVSD 530 531 case gc.TFLOAT32<<16 | gc.TFLOAT64: 532 a = x86.ACVTSS2SD 533 goto rdst 534 535 case gc.TFLOAT64<<16 | gc.TFLOAT32: 536 a = x86.ACVTSD2SS 537 goto rdst 538 } 539 540 gins(a, f, t) 541 return 542 543 // requires register destination 544 rdst: 545 { 546 var r1 gc.Node 547 gc.Regalloc(&r1, t.Type, t) 548 549 gins(a, f, &r1) 550 gmove(&r1, t) 551 gc.Regfree(&r1) 552 return 553 } 554 555 // requires register intermediate 556 hard: 557 var r1 gc.Node 558 gc.Regalloc(&r1, cvt, t) 559 560 gmove(f, &r1) 561 gmove(&r1, t) 562 gc.Regfree(&r1) 563 return 564 } 565 566 func samaddr(f *gc.Node, t *gc.Node) bool { 567 if f.Op != t.Op { 568 return false 569 } 570 571 switch f.Op { 572 case gc.OREGISTER: 573 if f.Reg != t.Reg { 574 break 575 } 576 return true 577 } 578 579 return false 580 } 581 582 /* 583 * generate one instruction: 584 * as f, t 585 */ 586 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 587 // Node nod; 588 589 // if(f != N && f->op == OINDEX) { 590 // gc.Regalloc(&nod, ®node, Z); 591 // v = constnode.vconst; 592 // gc.Cgen(f->right, &nod); 593 // constnode.vconst = v; 594 // idx.reg = nod.reg; 595 // gc.Regfree(&nod); 596 // } 597 // if(t != N && t->op == OINDEX) { 598 // gc.Regalloc(&nod, ®node, Z); 599 // v = constnode.vconst; 600 // gc.Cgen(t->right, &nod); 601 // constnode.vconst = v; 602 // idx.reg = nod.reg; 603 // gc.Regfree(&nod); 604 // } 605 606 if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) { 607 // Turn MOVL $xxx into LEAL xxx. 608 // These should be equivalent but most of the backend 609 // only expects to see LEAL, because that's what we had 610 // historically generated. Various hidden assumptions are baked in by now. 611 if as == x86.AMOVL { 612 as = x86.ALEAL 613 } else { 614 as = x86.ALEAQ 615 } 616 f = f.Left 617 } 618 619 switch as { 620 case x86.AMOVB, 621 x86.AMOVW, 622 x86.AMOVL, 623 x86.AMOVQ, 624 x86.AMOVSS, 625 x86.AMOVSD: 626 if f != nil && t != nil && samaddr(f, t) { 627 return nil 628 } 629 630 case x86.ALEAQ: 631 if f != nil && gc.Isconst(f, gc.CTNIL) { 632 gc.Fatalf("gins LEAQ nil %v", f.Type) 633 } 634 } 635 636 p := gc.Prog(as) 637 gc.Naddr(&p.From, f) 638 gc.Naddr(&p.To, t) 639 640 if gc.Debug['g'] != 0 { 641 fmt.Printf("%v\n", p) 642 } 643 644 w := int32(0) 645 switch as { 646 case x86.AMOVB: 647 w = 1 648 649 case x86.AMOVW: 650 w = 2 651 652 case x86.AMOVL: 653 w = 4 654 655 case x86.AMOVQ: 656 w = 8 657 } 658 659 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) { 660 gc.Dump("f", f) 661 gc.Dump("t", t) 662 gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) 663 } 664 665 if p.To.Type == obj.TYPE_ADDR && w > 0 { 666 gc.Fatalf("bad use of addr: %v", p) 667 } 668 669 return p 670 } 671 672 func ginsnop() { 673 // This is actually not the x86 NOP anymore, 674 // but at the point where it gets used, AX is dead 675 // so it's okay if we lose the high bits. 676 var reg gc.Node 677 gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX) 678 gins(x86.AXCHGL, ®, ®) 679 } 680 681 /* 682 * return Axxx for Oxxx on type t. 683 */ 684 func optoas(op gc.Op, t *gc.Type) int { 685 if t == nil { 686 gc.Fatalf("optoas: t is nil") 687 } 688 689 // avoid constant conversions in switches below 690 const ( 691 OMINUS_ = uint32(gc.OMINUS) << 16 692 OLSH_ = uint32(gc.OLSH) << 16 693 ORSH_ = uint32(gc.ORSH) << 16 694 OADD_ = uint32(gc.OADD) << 16 695 OSUB_ = uint32(gc.OSUB) << 16 696 OMUL_ = uint32(gc.OMUL) << 16 697 ODIV_ = uint32(gc.ODIV) << 16 698 OMOD_ = uint32(gc.OMOD) << 16 699 OOR_ = uint32(gc.OOR) << 16 700 OAND_ = uint32(gc.OAND) << 16 701 OXOR_ = uint32(gc.OXOR) << 16 702 OEQ_ = uint32(gc.OEQ) << 16 703 ONE_ = uint32(gc.ONE) << 16 704 OLT_ = uint32(gc.OLT) << 16 705 OLE_ = uint32(gc.OLE) << 16 706 OGE_ = uint32(gc.OGE) << 16 707 OGT_ = uint32(gc.OGT) << 16 708 OCMP_ = uint32(gc.OCMP) << 16 709 OPS_ = uint32(gc.OPS) << 16 710 OPC_ = uint32(gc.OPC) << 16 711 OAS_ = uint32(gc.OAS) << 16 712 OHMUL_ = uint32(gc.OHMUL) << 16 713 OSQRT_ = uint32(gc.OSQRT) << 16 714 OADDR_ = uint32(gc.OADDR) << 16 715 OINC_ = uint32(gc.OINC) << 16 716 ODEC_ = uint32(gc.ODEC) << 16 717 OLROT_ = uint32(gc.OLROT) << 16 718 ORROTC_ = uint32(gc.ORROTC) << 16 719 OEXTEND_ = uint32(gc.OEXTEND) << 16 720 ) 721 722 a := obj.AXXX 723 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 724 default: 725 gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t) 726 727 case OADDR_ | gc.TPTR32: 728 a = x86.ALEAL 729 730 case OADDR_ | gc.TPTR64: 731 a = x86.ALEAQ 732 733 case OEQ_ | gc.TBOOL, 734 OEQ_ | gc.TINT8, 735 OEQ_ | gc.TUINT8, 736 OEQ_ | gc.TINT16, 737 OEQ_ | gc.TUINT16, 738 OEQ_ | gc.TINT32, 739 OEQ_ | gc.TUINT32, 740 OEQ_ | gc.TINT64, 741 OEQ_ | gc.TUINT64, 742 OEQ_ | gc.TPTR32, 743 OEQ_ | gc.TPTR64, 744 OEQ_ | gc.TFLOAT32, 745 OEQ_ | gc.TFLOAT64: 746 a = x86.AJEQ 747 748 case ONE_ | gc.TBOOL, 749 ONE_ | gc.TINT8, 750 ONE_ | gc.TUINT8, 751 ONE_ | gc.TINT16, 752 ONE_ | gc.TUINT16, 753 ONE_ | gc.TINT32, 754 ONE_ | gc.TUINT32, 755 ONE_ | gc.TINT64, 756 ONE_ | gc.TUINT64, 757 ONE_ | gc.TPTR32, 758 ONE_ | gc.TPTR64, 759 ONE_ | gc.TFLOAT32, 760 ONE_ | gc.TFLOAT64: 761 a = x86.AJNE 762 763 case OPS_ | gc.TBOOL, 764 OPS_ | gc.TINT8, 765 OPS_ | gc.TUINT8, 766 OPS_ | gc.TINT16, 767 OPS_ | gc.TUINT16, 768 OPS_ | gc.TINT32, 769 OPS_ | gc.TUINT32, 770 OPS_ | gc.TINT64, 771 OPS_ | gc.TUINT64, 772 OPS_ | gc.TPTR32, 773 OPS_ | gc.TPTR64, 774 OPS_ | gc.TFLOAT32, 775 OPS_ | gc.TFLOAT64: 776 a = x86.AJPS 777 778 case OPC_ | gc.TBOOL, 779 OPC_ | gc.TINT8, 780 OPC_ | gc.TUINT8, 781 OPC_ | gc.TINT16, 782 OPC_ | gc.TUINT16, 783 OPC_ | gc.TINT32, 784 OPC_ | gc.TUINT32, 785 OPC_ | gc.TINT64, 786 OPC_ | gc.TUINT64, 787 OPC_ | gc.TPTR32, 788 OPC_ | gc.TPTR64, 789 OPC_ | gc.TFLOAT32, 790 OPC_ | gc.TFLOAT64: 791 a = x86.AJPC 792 793 case OLT_ | gc.TINT8, 794 OLT_ | gc.TINT16, 795 OLT_ | gc.TINT32, 796 OLT_ | gc.TINT64: 797 a = x86.AJLT 798 799 case OLT_ | gc.TUINT8, 800 OLT_ | gc.TUINT16, 801 OLT_ | gc.TUINT32, 802 OLT_ | gc.TUINT64: 803 a = x86.AJCS 804 805 case OLE_ | gc.TINT8, 806 OLE_ | gc.TINT16, 807 OLE_ | gc.TINT32, 808 OLE_ | gc.TINT64: 809 a = x86.AJLE 810 811 case OLE_ | gc.TUINT8, 812 OLE_ | gc.TUINT16, 813 OLE_ | gc.TUINT32, 814 OLE_ | gc.TUINT64: 815 a = x86.AJLS 816 817 case OGT_ | gc.TINT8, 818 OGT_ | gc.TINT16, 819 OGT_ | gc.TINT32, 820 OGT_ | gc.TINT64: 821 a = x86.AJGT 822 823 case OGT_ | gc.TUINT8, 824 OGT_ | gc.TUINT16, 825 OGT_ | gc.TUINT32, 826 OGT_ | gc.TUINT64, 827 OLT_ | gc.TFLOAT32, 828 OLT_ | gc.TFLOAT64: 829 a = x86.AJHI 830 831 case OGE_ | gc.TINT8, 832 OGE_ | gc.TINT16, 833 OGE_ | gc.TINT32, 834 OGE_ | gc.TINT64: 835 a = x86.AJGE 836 837 case OGE_ | gc.TUINT8, 838 OGE_ | gc.TUINT16, 839 OGE_ | gc.TUINT32, 840 OGE_ | gc.TUINT64, 841 OLE_ | gc.TFLOAT32, 842 OLE_ | gc.TFLOAT64: 843 a = x86.AJCC 844 845 case OCMP_ | gc.TBOOL, 846 OCMP_ | gc.TINT8, 847 OCMP_ | gc.TUINT8: 848 a = x86.ACMPB 849 850 case OCMP_ | gc.TINT16, 851 OCMP_ | gc.TUINT16: 852 a = x86.ACMPW 853 854 case OCMP_ | gc.TINT32, 855 OCMP_ | gc.TUINT32, 856 OCMP_ | gc.TPTR32: 857 a = x86.ACMPL 858 859 case OCMP_ | gc.TINT64, 860 OCMP_ | gc.TUINT64, 861 OCMP_ | gc.TPTR64: 862 a = x86.ACMPQ 863 864 case OCMP_ | gc.TFLOAT32: 865 a = x86.AUCOMISS 866 867 case OCMP_ | gc.TFLOAT64: 868 a = x86.AUCOMISD 869 870 case OAS_ | gc.TBOOL, 871 OAS_ | gc.TINT8, 872 OAS_ | gc.TUINT8: 873 a = x86.AMOVB 874 875 case OAS_ | gc.TINT16, 876 OAS_ | gc.TUINT16: 877 a = x86.AMOVW 878 879 case OAS_ | gc.TINT32, 880 OAS_ | gc.TUINT32, 881 OAS_ | gc.TPTR32: 882 a = x86.AMOVL 883 884 case OAS_ | gc.TINT64, 885 OAS_ | gc.TUINT64, 886 OAS_ | gc.TPTR64: 887 a = x86.AMOVQ 888 889 case OAS_ | gc.TFLOAT32: 890 a = x86.AMOVSS 891 892 case OAS_ | gc.TFLOAT64: 893 a = x86.AMOVSD 894 895 case OADD_ | gc.TINT8, 896 OADD_ | gc.TUINT8: 897 a = x86.AADDB 898 899 case OADD_ | gc.TINT16, 900 OADD_ | gc.TUINT16: 901 a = x86.AADDW 902 903 case OADD_ | gc.TINT32, 904 OADD_ | gc.TUINT32, 905 OADD_ | gc.TPTR32: 906 a = x86.AADDL 907 908 case OADD_ | gc.TINT64, 909 OADD_ | gc.TUINT64, 910 OADD_ | gc.TPTR64: 911 a = x86.AADDQ 912 913 case OADD_ | gc.TFLOAT32: 914 a = x86.AADDSS 915 916 case OADD_ | gc.TFLOAT64: 917 a = x86.AADDSD 918 919 case OSUB_ | gc.TINT8, 920 OSUB_ | gc.TUINT8: 921 a = x86.ASUBB 922 923 case OSUB_ | gc.TINT16, 924 OSUB_ | gc.TUINT16: 925 a = x86.ASUBW 926 927 case OSUB_ | gc.TINT32, 928 OSUB_ | gc.TUINT32, 929 OSUB_ | gc.TPTR32: 930 a = x86.ASUBL 931 932 case OSUB_ | gc.TINT64, 933 OSUB_ | gc.TUINT64, 934 OSUB_ | gc.TPTR64: 935 a = x86.ASUBQ 936 937 case OSUB_ | gc.TFLOAT32: 938 a = x86.ASUBSS 939 940 case OSUB_ | gc.TFLOAT64: 941 a = x86.ASUBSD 942 943 case OINC_ | gc.TINT8, 944 OINC_ | gc.TUINT8: 945 a = x86.AINCB 946 947 case OINC_ | gc.TINT16, 948 OINC_ | gc.TUINT16: 949 a = x86.AINCW 950 951 case OINC_ | gc.TINT32, 952 OINC_ | gc.TUINT32, 953 OINC_ | gc.TPTR32: 954 a = x86.AINCL 955 956 case OINC_ | gc.TINT64, 957 OINC_ | gc.TUINT64, 958 OINC_ | gc.TPTR64: 959 a = x86.AINCQ 960 961 case ODEC_ | gc.TINT8, 962 ODEC_ | gc.TUINT8: 963 a = x86.ADECB 964 965 case ODEC_ | gc.TINT16, 966 ODEC_ | gc.TUINT16: 967 a = x86.ADECW 968 969 case ODEC_ | gc.TINT32, 970 ODEC_ | gc.TUINT32, 971 ODEC_ | gc.TPTR32: 972 a = x86.ADECL 973 974 case ODEC_ | gc.TINT64, 975 ODEC_ | gc.TUINT64, 976 ODEC_ | gc.TPTR64: 977 a = x86.ADECQ 978 979 case OMINUS_ | gc.TINT8, 980 OMINUS_ | gc.TUINT8: 981 a = x86.ANEGB 982 983 case OMINUS_ | gc.TINT16, 984 OMINUS_ | gc.TUINT16: 985 a = x86.ANEGW 986 987 case OMINUS_ | gc.TINT32, 988 OMINUS_ | gc.TUINT32, 989 OMINUS_ | gc.TPTR32: 990 a = x86.ANEGL 991 992 case OMINUS_ | gc.TINT64, 993 OMINUS_ | gc.TUINT64, 994 OMINUS_ | gc.TPTR64: 995 a = x86.ANEGQ 996 997 case OAND_ | gc.TBOOL, 998 OAND_ | gc.TINT8, 999 OAND_ | gc.TUINT8: 1000 a = x86.AANDB 1001 1002 case OAND_ | gc.TINT16, 1003 OAND_ | gc.TUINT16: 1004 a = x86.AANDW 1005 1006 case OAND_ | gc.TINT32, 1007 OAND_ | gc.TUINT32, 1008 OAND_ | gc.TPTR32: 1009 a = x86.AANDL 1010 1011 case OAND_ | gc.TINT64, 1012 OAND_ | gc.TUINT64, 1013 OAND_ | gc.TPTR64: 1014 a = x86.AANDQ 1015 1016 case OOR_ | gc.TBOOL, 1017 OOR_ | gc.TINT8, 1018 OOR_ | gc.TUINT8: 1019 a = x86.AORB 1020 1021 case OOR_ | gc.TINT16, 1022 OOR_ | gc.TUINT16: 1023 a = x86.AORW 1024 1025 case OOR_ | gc.TINT32, 1026 OOR_ | gc.TUINT32, 1027 OOR_ | gc.TPTR32: 1028 a = x86.AORL 1029 1030 case OOR_ | gc.TINT64, 1031 OOR_ | gc.TUINT64, 1032 OOR_ | gc.TPTR64: 1033 a = x86.AORQ 1034 1035 case OXOR_ | gc.TINT8, 1036 OXOR_ | gc.TUINT8: 1037 a = x86.AXORB 1038 1039 case OXOR_ | gc.TINT16, 1040 OXOR_ | gc.TUINT16: 1041 a = x86.AXORW 1042 1043 case OXOR_ | gc.TINT32, 1044 OXOR_ | gc.TUINT32, 1045 OXOR_ | gc.TPTR32: 1046 a = x86.AXORL 1047 1048 case OXOR_ | gc.TINT64, 1049 OXOR_ | gc.TUINT64, 1050 OXOR_ | gc.TPTR64: 1051 a = x86.AXORQ 1052 1053 case OLROT_ | gc.TINT8, 1054 OLROT_ | gc.TUINT8: 1055 a = x86.AROLB 1056 1057 case OLROT_ | gc.TINT16, 1058 OLROT_ | gc.TUINT16: 1059 a = x86.AROLW 1060 1061 case OLROT_ | gc.TINT32, 1062 OLROT_ | gc.TUINT32, 1063 OLROT_ | gc.TPTR32: 1064 a = x86.AROLL 1065 1066 case OLROT_ | gc.TINT64, 1067 OLROT_ | gc.TUINT64, 1068 OLROT_ | gc.TPTR64: 1069 a = x86.AROLQ 1070 1071 case OLSH_ | gc.TINT8, 1072 OLSH_ | gc.TUINT8: 1073 a = x86.ASHLB 1074 1075 case OLSH_ | gc.TINT16, 1076 OLSH_ | gc.TUINT16: 1077 a = x86.ASHLW 1078 1079 case OLSH_ | gc.TINT32, 1080 OLSH_ | gc.TUINT32, 1081 OLSH_ | gc.TPTR32: 1082 a = x86.ASHLL 1083 1084 case OLSH_ | gc.TINT64, 1085 OLSH_ | gc.TUINT64, 1086 OLSH_ | gc.TPTR64: 1087 a = x86.ASHLQ 1088 1089 case ORSH_ | gc.TUINT8: 1090 a = x86.ASHRB 1091 1092 case ORSH_ | gc.TUINT16: 1093 a = x86.ASHRW 1094 1095 case ORSH_ | gc.TUINT32, 1096 ORSH_ | gc.TPTR32: 1097 a = x86.ASHRL 1098 1099 case ORSH_ | gc.TUINT64, 1100 ORSH_ | gc.TPTR64: 1101 a = x86.ASHRQ 1102 1103 case ORSH_ | gc.TINT8: 1104 a = x86.ASARB 1105 1106 case ORSH_ | gc.TINT16: 1107 a = x86.ASARW 1108 1109 case ORSH_ | gc.TINT32: 1110 a = x86.ASARL 1111 1112 case ORSH_ | gc.TINT64: 1113 a = x86.ASARQ 1114 1115 case ORROTC_ | gc.TINT8, 1116 ORROTC_ | gc.TUINT8: 1117 a = x86.ARCRB 1118 1119 case ORROTC_ | gc.TINT16, 1120 ORROTC_ | gc.TUINT16: 1121 a = x86.ARCRW 1122 1123 case ORROTC_ | gc.TINT32, 1124 ORROTC_ | gc.TUINT32: 1125 a = x86.ARCRL 1126 1127 case ORROTC_ | gc.TINT64, 1128 ORROTC_ | gc.TUINT64: 1129 a = x86.ARCRQ 1130 1131 case OHMUL_ | gc.TINT8, 1132 OMUL_ | gc.TINT8, 1133 OMUL_ | gc.TUINT8: 1134 a = x86.AIMULB 1135 1136 case OHMUL_ | gc.TINT16, 1137 OMUL_ | gc.TINT16, 1138 OMUL_ | gc.TUINT16: 1139 a = x86.AIMULW 1140 1141 case OHMUL_ | gc.TINT32, 1142 OMUL_ | gc.TINT32, 1143 OMUL_ | gc.TUINT32, 1144 OMUL_ | gc.TPTR32: 1145 a = x86.AIMULL 1146 1147 case OHMUL_ | gc.TINT64, 1148 OMUL_ | gc.TINT64, 1149 OMUL_ | gc.TUINT64, 1150 OMUL_ | gc.TPTR64: 1151 a = x86.AIMULQ 1152 1153 case OHMUL_ | gc.TUINT8: 1154 a = x86.AMULB 1155 1156 case OHMUL_ | gc.TUINT16: 1157 a = x86.AMULW 1158 1159 case OHMUL_ | gc.TUINT32, 1160 OHMUL_ | gc.TPTR32: 1161 a = x86.AMULL 1162 1163 case OHMUL_ | gc.TUINT64, 1164 OHMUL_ | gc.TPTR64: 1165 a = x86.AMULQ 1166 1167 case OMUL_ | gc.TFLOAT32: 1168 a = x86.AMULSS 1169 1170 case OMUL_ | gc.TFLOAT64: 1171 a = x86.AMULSD 1172 1173 case ODIV_ | gc.TINT8, 1174 OMOD_ | gc.TINT8: 1175 a = x86.AIDIVB 1176 1177 case ODIV_ | gc.TUINT8, 1178 OMOD_ | gc.TUINT8: 1179 a = x86.ADIVB 1180 1181 case ODIV_ | gc.TINT16, 1182 OMOD_ | gc.TINT16: 1183 a = x86.AIDIVW 1184 1185 case ODIV_ | gc.TUINT16, 1186 OMOD_ | gc.TUINT16: 1187 a = x86.ADIVW 1188 1189 case ODIV_ | gc.TINT32, 1190 OMOD_ | gc.TINT32: 1191 a = x86.AIDIVL 1192 1193 case ODIV_ | gc.TUINT32, 1194 ODIV_ | gc.TPTR32, 1195 OMOD_ | gc.TUINT32, 1196 OMOD_ | gc.TPTR32: 1197 a = x86.ADIVL 1198 1199 case ODIV_ | gc.TINT64, 1200 OMOD_ | gc.TINT64: 1201 a = x86.AIDIVQ 1202 1203 case ODIV_ | gc.TUINT64, 1204 ODIV_ | gc.TPTR64, 1205 OMOD_ | gc.TUINT64, 1206 OMOD_ | gc.TPTR64: 1207 a = x86.ADIVQ 1208 1209 case OEXTEND_ | gc.TINT16: 1210 a = x86.ACWD 1211 1212 case OEXTEND_ | gc.TINT32: 1213 a = x86.ACDQ 1214 1215 case OEXTEND_ | gc.TINT64: 1216 a = x86.ACQO 1217 1218 case ODIV_ | gc.TFLOAT32: 1219 a = x86.ADIVSS 1220 1221 case ODIV_ | gc.TFLOAT64: 1222 a = x86.ADIVSD 1223 1224 case OSQRT_ | gc.TFLOAT64: 1225 a = x86.ASQRTSD 1226 } 1227 1228 return a 1229 } 1230 1231 // jmptoset returns ASETxx for AJxx. 1232 func jmptoset(jmp int) int { 1233 switch jmp { 1234 case x86.AJEQ: 1235 return x86.ASETEQ 1236 case x86.AJNE: 1237 return x86.ASETNE 1238 case x86.AJLT: 1239 return x86.ASETLT 1240 case x86.AJCS: 1241 return x86.ASETCS 1242 case x86.AJLE: 1243 return x86.ASETLE 1244 case x86.AJLS: 1245 return x86.ASETLS 1246 case x86.AJGT: 1247 return x86.ASETGT 1248 case x86.AJHI: 1249 return x86.ASETHI 1250 case x86.AJGE: 1251 return x86.ASETGE 1252 case x86.AJCC: 1253 return x86.ASETCC 1254 case x86.AJMI: 1255 return x86.ASETMI 1256 case x86.AJOC: 1257 return x86.ASETOC 1258 case x86.AJOS: 1259 return x86.ASETOS 1260 case x86.AJPC: 1261 return x86.ASETPC 1262 case x86.AJPL: 1263 return x86.ASETPL 1264 case x86.AJPS: 1265 return x86.ASETPS 1266 } 1267 gc.Fatalf("jmptoset: no entry for %v", gc.Oconv(jmp, 0)) 1268 panic("unreachable") 1269 } 1270 1271 const ( 1272 ODynam = 1 << 0 1273 OAddable = 1 << 1 1274 ) 1275 1276 var clean [20]gc.Node 1277 1278 var cleani int = 0 1279 1280 func sudoclean() { 1281 if clean[cleani-1].Op != gc.OEMPTY { 1282 gc.Regfree(&clean[cleani-1]) 1283 } 1284 if clean[cleani-2].Op != gc.OEMPTY { 1285 gc.Regfree(&clean[cleani-2]) 1286 } 1287 cleani -= 2 1288 } 1289 1290 /* 1291 * generate code to compute address of n, 1292 * a reference to a (perhaps nested) field inside 1293 * an array or struct. 1294 * return 0 on failure, 1 on success. 1295 * on success, leaves usable address in a. 1296 * 1297 * caller is responsible for calling sudoclean 1298 * after successful sudoaddable, 1299 * to release the register used for a. 1300 */ 1301 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 1302 if n.Type == nil { 1303 return false 1304 } 1305 1306 *a = obj.Addr{} 1307 1308 switch n.Op { 1309 case gc.OLITERAL: 1310 if !gc.Isconst(n, gc.CTINT) { 1311 break 1312 } 1313 v := n.Int() 1314 if v >= 32000 || v <= -32000 { 1315 break 1316 } 1317 switch as { 1318 default: 1319 return false 1320 1321 case x86.AADDB, 1322 x86.AADDW, 1323 x86.AADDL, 1324 x86.AADDQ, 1325 x86.ASUBB, 1326 x86.ASUBW, 1327 x86.ASUBL, 1328 x86.ASUBQ, 1329 x86.AANDB, 1330 x86.AANDW, 1331 x86.AANDL, 1332 x86.AANDQ, 1333 x86.AORB, 1334 x86.AORW, 1335 x86.AORL, 1336 x86.AORQ, 1337 x86.AXORB, 1338 x86.AXORW, 1339 x86.AXORL, 1340 x86.AXORQ, 1341 x86.AINCB, 1342 x86.AINCW, 1343 x86.AINCL, 1344 x86.AINCQ, 1345 x86.ADECB, 1346 x86.ADECW, 1347 x86.ADECL, 1348 x86.ADECQ, 1349 x86.AMOVB, 1350 x86.AMOVW, 1351 x86.AMOVL, 1352 x86.AMOVQ: 1353 break 1354 } 1355 1356 cleani += 2 1357 reg := &clean[cleani-1] 1358 reg1 := &clean[cleani-2] 1359 reg.Op = gc.OEMPTY 1360 reg1.Op = gc.OEMPTY 1361 gc.Naddr(a, n) 1362 return true 1363 1364 case gc.ODOT, 1365 gc.ODOTPTR: 1366 cleani += 2 1367 reg := &clean[cleani-1] 1368 reg1 := &clean[cleani-2] 1369 reg.Op = gc.OEMPTY 1370 reg1.Op = gc.OEMPTY 1371 var nn *gc.Node 1372 var oary [10]int64 1373 o := gc.Dotoffset(n, oary[:], &nn) 1374 if nn == nil { 1375 sudoclean() 1376 return false 1377 } 1378 1379 if nn.Addable && o == 1 && oary[0] >= 0 { 1380 // directly addressable set of DOTs 1381 n1 := *nn 1382 1383 n1.Type = n.Type 1384 n1.Xoffset += oary[0] 1385 gc.Naddr(a, &n1) 1386 return true 1387 } 1388 1389 gc.Regalloc(reg, gc.Types[gc.Tptr], nil) 1390 n1 := *reg 1391 n1.Op = gc.OINDREG 1392 if oary[0] >= 0 { 1393 gc.Agen(nn, reg) 1394 n1.Xoffset = oary[0] 1395 } else { 1396 gc.Cgen(nn, reg) 1397 gc.Cgen_checknil(reg) 1398 n1.Xoffset = -(oary[0] + 1) 1399 } 1400 1401 for i := 1; i < o; i++ { 1402 if oary[i] >= 0 { 1403 gc.Fatalf("can't happen") 1404 } 1405 gins(movptr, &n1, reg) 1406 gc.Cgen_checknil(reg) 1407 n1.Xoffset = -(oary[i] + 1) 1408 } 1409 1410 a.Type = obj.TYPE_NONE 1411 a.Index = obj.TYPE_NONE 1412 gc.Fixlargeoffset(&n1) 1413 gc.Naddr(a, &n1) 1414 return true 1415 1416 case gc.OINDEX: 1417 return false 1418 } 1419 1420 return false 1421 }