github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/cmd/compile/internal/amd64/gsubr.go (about) 1 // Derived from Inferno utils/6c/txt.c 2 // http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c 3 // 4 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. 5 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) 6 // Portions Copyright © 1997-1999 Vita Nuova Limited 7 // Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com) 8 // Portions Copyright © 2004,2006 Bruce Ellis 9 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) 10 // Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others 11 // Portions Copyright © 2009 The Go Authors. All rights reserved. 12 // 13 // Permission is hereby granted, free of charge, to any person obtaining a copy 14 // of this software and associated documentation files (the "Software"), to deal 15 // in the Software without restriction, including without limitation the rights 16 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 17 // copies of the Software, and to permit persons to whom the Software is 18 // furnished to do so, subject to the following conditions: 19 // 20 // The above copyright notice and this permission notice shall be included in 21 // all copies or substantial portions of the Software. 22 // 23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 28 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 29 // THE SOFTWARE. 30 31 package amd64 32 33 import ( 34 "cmd/compile/internal/big" 35 "cmd/compile/internal/gc" 36 "cmd/internal/obj" 37 "cmd/internal/obj/x86" 38 "fmt" 39 ) 40 41 var resvd = []int{ 42 x86.REG_DI, // for movstring 43 x86.REG_SI, // for movstring 44 45 x86.REG_AX, // for divide 46 x86.REG_CX, // for shift 47 x86.REG_DX, // for divide 48 x86.REG_SP, // for stack 49 } 50 51 /* 52 * generate 53 * as $c, reg 54 */ 55 func gconreg(as int, c int64, reg int) { 56 var nr gc.Node 57 58 switch as { 59 case x86.AADDL, 60 x86.AMOVL, 61 x86.ALEAL: 62 gc.Nodreg(&nr, gc.Types[gc.TINT32], reg) 63 64 default: 65 gc.Nodreg(&nr, gc.Types[gc.TINT64], reg) 66 } 67 68 ginscon(as, c, &nr) 69 } 70 71 /* 72 * generate 73 * as $c, n 74 */ 75 func ginscon(as int, c int64, n2 *gc.Node) { 76 var n1 gc.Node 77 78 switch as { 79 case x86.AADDL, 80 x86.AMOVL, 81 x86.ALEAL: 82 gc.Nodconst(&n1, gc.Types[gc.TINT32], c) 83 84 default: 85 gc.Nodconst(&n1, gc.Types[gc.TINT64], c) 86 } 87 88 if as != x86.AMOVQ && (c < -(1<<31) || c >= 1<<31) { 89 // cannot have 64-bit immediate in ADD, etc. 90 // instead, MOV into register first. 91 var ntmp gc.Node 92 gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil) 93 94 gins(x86.AMOVQ, &n1, &ntmp) 95 gins(as, &ntmp, n2) 96 gc.Regfree(&ntmp) 97 return 98 } 99 100 gins(as, &n1, n2) 101 } 102 103 func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog { 104 if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && gc.Smallintconst(n1) && n2.Op != gc.OLITERAL { 105 // Reverse comparison to place constant last. 106 op = gc.Brrev(op) 107 n1, n2 = n2, n1 108 } 109 // General case. 110 var r1, r2, g1, g2 gc.Node 111 if n1.Op == gc.ONAME && n1.Class&gc.PHEAP == 0 || n1.Op == gc.OINDREG { 112 r1 = *n1 113 } else { 114 gc.Regalloc(&r1, t, n1) 115 gc.Regalloc(&g1, n1.Type, &r1) 116 gc.Cgen(n1, &g1) 117 gmove(&g1, &r1) 118 } 119 if n2.Op == gc.OLITERAL && gc.Isint[t.Etype] && gc.Smallintconst(n2) { 120 r2 = *n2 121 } else { 122 gc.Regalloc(&r2, t, n2) 123 gc.Regalloc(&g2, n1.Type, &r2) 124 gc.Cgen(n2, &g2) 125 gmove(&g2, &r2) 126 } 127 gins(optoas(gc.OCMP, t), &r1, &r2) 128 if r1.Op == gc.OREGISTER { 129 gc.Regfree(&g1) 130 gc.Regfree(&r1) 131 } 132 if r2.Op == gc.OREGISTER { 133 gc.Regfree(&g2) 134 gc.Regfree(&r2) 135 } 136 return gc.Gbranch(optoas(op, t), nil, likely) 137 } 138 139 func ginsboolval(a int, n *gc.Node) { 140 gins(jmptoset(a), nil, n) 141 } 142 143 // set up nodes representing 2^63 144 var ( 145 bigi gc.Node 146 bigf gc.Node 147 bignodes_did bool 148 ) 149 150 func bignodes() { 151 if bignodes_did { 152 return 153 } 154 bignodes_did = true 155 156 var i big.Int 157 i.SetInt64(1) 158 i.Lsh(&i, 63) 159 160 gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0) 161 bigi.SetBigInt(&i) 162 163 bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64]) 164 } 165 166 /* 167 * generate move: 168 * t = f 169 * hard part is conversions. 170 */ 171 func gmove(f *gc.Node, t *gc.Node) { 172 if gc.Debug['M'] != 0 { 173 fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong)) 174 } 175 176 ft := gc.Simsimtype(f.Type) 177 tt := gc.Simsimtype(t.Type) 178 cvt := t.Type 179 180 if gc.Iscomplex[ft] || gc.Iscomplex[tt] { 181 gc.Complexmove(f, t) 182 return 183 } 184 185 // cannot have two memory operands 186 var a int 187 if gc.Ismem(f) && gc.Ismem(t) { 188 goto hard 189 } 190 191 // convert constant to desired type 192 if f.Op == gc.OLITERAL { 193 var con gc.Node 194 f.Convconst(&con, t.Type) 195 f = &con 196 ft = tt // so big switch will choose a simple mov 197 198 // some constants can't move directly to memory. 199 if gc.Ismem(t) { 200 // float constants come from memory. 201 if gc.Isfloat[tt] { 202 goto hard 203 } 204 205 // 64-bit immediates are really 32-bit sign-extended 206 // unless moving into a register. 207 if gc.Isint[tt] { 208 if i := con.Int(); int64(int32(i)) != i { 209 goto hard 210 } 211 } 212 } 213 } 214 215 // value -> value copy, only one memory operand. 216 // figure out the instruction to use. 217 // break out of switch for one-instruction gins. 218 // goto rdst for "destination must be register". 219 // goto hard for "convert to cvt type first". 220 // otherwise handle and return. 221 222 switch uint32(ft)<<16 | uint32(tt) { 223 default: 224 gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong)) 225 226 /* 227 * integer copy and truncate 228 */ 229 case gc.TINT8<<16 | gc.TINT8, // same size 230 gc.TINT8<<16 | gc.TUINT8, 231 gc.TUINT8<<16 | gc.TINT8, 232 gc.TUINT8<<16 | gc.TUINT8, 233 gc.TINT16<<16 | gc.TINT8, 234 // truncate 235 gc.TUINT16<<16 | gc.TINT8, 236 gc.TINT32<<16 | gc.TINT8, 237 gc.TUINT32<<16 | gc.TINT8, 238 gc.TINT64<<16 | gc.TINT8, 239 gc.TUINT64<<16 | gc.TINT8, 240 gc.TINT16<<16 | gc.TUINT8, 241 gc.TUINT16<<16 | gc.TUINT8, 242 gc.TINT32<<16 | gc.TUINT8, 243 gc.TUINT32<<16 | gc.TUINT8, 244 gc.TINT64<<16 | gc.TUINT8, 245 gc.TUINT64<<16 | gc.TUINT8: 246 a = x86.AMOVB 247 248 case gc.TINT16<<16 | gc.TINT16, // same size 249 gc.TINT16<<16 | gc.TUINT16, 250 gc.TUINT16<<16 | gc.TINT16, 251 gc.TUINT16<<16 | gc.TUINT16, 252 gc.TINT32<<16 | gc.TINT16, 253 // truncate 254 gc.TUINT32<<16 | gc.TINT16, 255 gc.TINT64<<16 | gc.TINT16, 256 gc.TUINT64<<16 | gc.TINT16, 257 gc.TINT32<<16 | gc.TUINT16, 258 gc.TUINT32<<16 | gc.TUINT16, 259 gc.TINT64<<16 | gc.TUINT16, 260 gc.TUINT64<<16 | gc.TUINT16: 261 a = x86.AMOVW 262 263 case gc.TINT32<<16 | gc.TINT32, // same size 264 gc.TINT32<<16 | gc.TUINT32, 265 gc.TUINT32<<16 | gc.TINT32, 266 gc.TUINT32<<16 | gc.TUINT32: 267 a = x86.AMOVL 268 269 case gc.TINT64<<16 | gc.TINT32, // truncate 270 gc.TUINT64<<16 | gc.TINT32, 271 gc.TINT64<<16 | gc.TUINT32, 272 gc.TUINT64<<16 | gc.TUINT32: 273 a = x86.AMOVQL 274 275 case gc.TINT64<<16 | gc.TINT64, // same size 276 gc.TINT64<<16 | gc.TUINT64, 277 gc.TUINT64<<16 | gc.TINT64, 278 gc.TUINT64<<16 | gc.TUINT64: 279 a = x86.AMOVQ 280 281 /* 282 * integer up-conversions 283 */ 284 case gc.TINT8<<16 | gc.TINT16, // sign extend int8 285 gc.TINT8<<16 | gc.TUINT16: 286 a = x86.AMOVBWSX 287 288 goto rdst 289 290 case gc.TINT8<<16 | gc.TINT32, 291 gc.TINT8<<16 | gc.TUINT32: 292 a = x86.AMOVBLSX 293 goto rdst 294 295 case gc.TINT8<<16 | gc.TINT64, 296 gc.TINT8<<16 | gc.TUINT64: 297 a = x86.AMOVBQSX 298 goto rdst 299 300 case gc.TUINT8<<16 | gc.TINT16, // zero extend uint8 301 gc.TUINT8<<16 | gc.TUINT16: 302 a = x86.AMOVBWZX 303 304 goto rdst 305 306 case gc.TUINT8<<16 | gc.TINT32, 307 gc.TUINT8<<16 | gc.TUINT32: 308 a = x86.AMOVBLZX 309 goto rdst 310 311 case gc.TUINT8<<16 | gc.TINT64, 312 gc.TUINT8<<16 | gc.TUINT64: 313 a = x86.AMOVBQZX 314 goto rdst 315 316 case gc.TINT16<<16 | gc.TINT32, // sign extend int16 317 gc.TINT16<<16 | gc.TUINT32: 318 a = x86.AMOVWLSX 319 320 goto rdst 321 322 case gc.TINT16<<16 | gc.TINT64, 323 gc.TINT16<<16 | gc.TUINT64: 324 a = x86.AMOVWQSX 325 goto rdst 326 327 case gc.TUINT16<<16 | gc.TINT32, // zero extend uint16 328 gc.TUINT16<<16 | gc.TUINT32: 329 a = x86.AMOVWLZX 330 331 goto rdst 332 333 case gc.TUINT16<<16 | gc.TINT64, 334 gc.TUINT16<<16 | gc.TUINT64: 335 a = x86.AMOVWQZX 336 goto rdst 337 338 case gc.TINT32<<16 | gc.TINT64, // sign extend int32 339 gc.TINT32<<16 | gc.TUINT64: 340 a = x86.AMOVLQSX 341 342 goto rdst 343 344 // AMOVL into a register zeros the top of the register, 345 // so this is not always necessary, but if we rely on AMOVL 346 // the optimizer is almost certain to screw with us. 347 case gc.TUINT32<<16 | gc.TINT64, // zero extend uint32 348 gc.TUINT32<<16 | gc.TUINT64: 349 a = x86.AMOVLQZX 350 351 goto rdst 352 353 /* 354 * float to integer 355 */ 356 case gc.TFLOAT32<<16 | gc.TINT32: 357 a = x86.ACVTTSS2SL 358 359 goto rdst 360 361 case gc.TFLOAT64<<16 | gc.TINT32: 362 a = x86.ACVTTSD2SL 363 goto rdst 364 365 case gc.TFLOAT32<<16 | gc.TINT64: 366 a = x86.ACVTTSS2SQ 367 goto rdst 368 369 case gc.TFLOAT64<<16 | gc.TINT64: 370 a = x86.ACVTTSD2SQ 371 goto rdst 372 373 // convert via int32. 374 case gc.TFLOAT32<<16 | gc.TINT16, 375 gc.TFLOAT32<<16 | gc.TINT8, 376 gc.TFLOAT32<<16 | gc.TUINT16, 377 gc.TFLOAT32<<16 | gc.TUINT8, 378 gc.TFLOAT64<<16 | gc.TINT16, 379 gc.TFLOAT64<<16 | gc.TINT8, 380 gc.TFLOAT64<<16 | gc.TUINT16, 381 gc.TFLOAT64<<16 | gc.TUINT8: 382 cvt = gc.Types[gc.TINT32] 383 384 goto hard 385 386 // convert via int64. 387 case gc.TFLOAT32<<16 | gc.TUINT32, 388 gc.TFLOAT64<<16 | gc.TUINT32: 389 cvt = gc.Types[gc.TINT64] 390 391 goto hard 392 393 // algorithm is: 394 // if small enough, use native float64 -> int64 conversion. 395 // otherwise, subtract 2^63, convert, and add it back. 396 case gc.TFLOAT32<<16 | gc.TUINT64, 397 gc.TFLOAT64<<16 | gc.TUINT64: 398 a := x86.ACVTTSS2SQ 399 400 if ft == gc.TFLOAT64 { 401 a = x86.ACVTTSD2SQ 402 } 403 bignodes() 404 var r1 gc.Node 405 gc.Regalloc(&r1, gc.Types[ft], nil) 406 var r2 gc.Node 407 gc.Regalloc(&r2, gc.Types[tt], t) 408 var r3 gc.Node 409 gc.Regalloc(&r3, gc.Types[ft], nil) 410 var r4 gc.Node 411 gc.Regalloc(&r4, gc.Types[tt], nil) 412 gins(optoas(gc.OAS, f.Type), f, &r1) 413 gins(optoas(gc.OCMP, f.Type), &bigf, &r1) 414 p1 := gc.Gbranch(optoas(gc.OLE, f.Type), nil, +1) 415 gins(a, &r1, &r2) 416 p2 := gc.Gbranch(obj.AJMP, nil, 0) 417 gc.Patch(p1, gc.Pc) 418 gins(optoas(gc.OAS, f.Type), &bigf, &r3) 419 gins(optoas(gc.OSUB, f.Type), &r3, &r1) 420 gins(a, &r1, &r2) 421 gins(x86.AMOVQ, &bigi, &r4) 422 gins(x86.AXORQ, &r4, &r2) 423 gc.Patch(p2, gc.Pc) 424 gmove(&r2, t) 425 gc.Regfree(&r4) 426 gc.Regfree(&r3) 427 gc.Regfree(&r2) 428 gc.Regfree(&r1) 429 return 430 431 /* 432 * integer to float 433 */ 434 case gc.TINT32<<16 | gc.TFLOAT32: 435 a = x86.ACVTSL2SS 436 437 goto rdst 438 439 case gc.TINT32<<16 | gc.TFLOAT64: 440 a = x86.ACVTSL2SD 441 goto rdst 442 443 case gc.TINT64<<16 | gc.TFLOAT32: 444 a = x86.ACVTSQ2SS 445 goto rdst 446 447 case gc.TINT64<<16 | gc.TFLOAT64: 448 a = x86.ACVTSQ2SD 449 goto rdst 450 451 // convert via int32 452 case gc.TINT16<<16 | gc.TFLOAT32, 453 gc.TINT16<<16 | gc.TFLOAT64, 454 gc.TINT8<<16 | gc.TFLOAT32, 455 gc.TINT8<<16 | gc.TFLOAT64, 456 gc.TUINT16<<16 | gc.TFLOAT32, 457 gc.TUINT16<<16 | gc.TFLOAT64, 458 gc.TUINT8<<16 | gc.TFLOAT32, 459 gc.TUINT8<<16 | gc.TFLOAT64: 460 cvt = gc.Types[gc.TINT32] 461 462 goto hard 463 464 // convert via int64. 465 case gc.TUINT32<<16 | gc.TFLOAT32, 466 gc.TUINT32<<16 | gc.TFLOAT64: 467 cvt = gc.Types[gc.TINT64] 468 469 goto hard 470 471 // algorithm is: 472 // if small enough, use native int64 -> uint64 conversion. 473 // otherwise, halve (rounding to odd?), convert, and double. 474 case gc.TUINT64<<16 | gc.TFLOAT32, 475 gc.TUINT64<<16 | gc.TFLOAT64: 476 a := x86.ACVTSQ2SS 477 478 if tt == gc.TFLOAT64 { 479 a = x86.ACVTSQ2SD 480 } 481 var zero gc.Node 482 gc.Nodconst(&zero, gc.Types[gc.TUINT64], 0) 483 var one gc.Node 484 gc.Nodconst(&one, gc.Types[gc.TUINT64], 1) 485 var r1 gc.Node 486 gc.Regalloc(&r1, f.Type, f) 487 var r2 gc.Node 488 gc.Regalloc(&r2, t.Type, t) 489 var r3 gc.Node 490 gc.Regalloc(&r3, f.Type, nil) 491 var r4 gc.Node 492 gc.Regalloc(&r4, f.Type, nil) 493 gmove(f, &r1) 494 gins(x86.ACMPQ, &r1, &zero) 495 p1 := gc.Gbranch(x86.AJLT, nil, +1) 496 gins(a, &r1, &r2) 497 p2 := gc.Gbranch(obj.AJMP, nil, 0) 498 gc.Patch(p1, gc.Pc) 499 gmove(&r1, &r3) 500 gins(x86.ASHRQ, &one, &r3) 501 gmove(&r1, &r4) 502 gins(x86.AANDL, &one, &r4) 503 gins(x86.AORQ, &r4, &r3) 504 gins(a, &r3, &r2) 505 gins(optoas(gc.OADD, t.Type), &r2, &r2) 506 gc.Patch(p2, gc.Pc) 507 gmove(&r2, t) 508 gc.Regfree(&r4) 509 gc.Regfree(&r3) 510 gc.Regfree(&r2) 511 gc.Regfree(&r1) 512 return 513 514 /* 515 * float to float 516 */ 517 case gc.TFLOAT32<<16 | gc.TFLOAT32: 518 a = x86.AMOVSS 519 520 case gc.TFLOAT64<<16 | gc.TFLOAT64: 521 a = x86.AMOVSD 522 523 case gc.TFLOAT32<<16 | gc.TFLOAT64: 524 a = x86.ACVTSS2SD 525 goto rdst 526 527 case gc.TFLOAT64<<16 | gc.TFLOAT32: 528 a = x86.ACVTSD2SS 529 goto rdst 530 } 531 532 gins(a, f, t) 533 return 534 535 // requires register destination 536 rdst: 537 { 538 var r1 gc.Node 539 gc.Regalloc(&r1, t.Type, t) 540 541 gins(a, f, &r1) 542 gmove(&r1, t) 543 gc.Regfree(&r1) 544 return 545 } 546 547 // requires register intermediate 548 hard: 549 var r1 gc.Node 550 gc.Regalloc(&r1, cvt, t) 551 552 gmove(f, &r1) 553 gmove(&r1, t) 554 gc.Regfree(&r1) 555 return 556 } 557 558 func samaddr(f *gc.Node, t *gc.Node) bool { 559 if f.Op != t.Op { 560 return false 561 } 562 563 switch f.Op { 564 case gc.OREGISTER: 565 if f.Reg != t.Reg { 566 break 567 } 568 return true 569 } 570 571 return false 572 } 573 574 /* 575 * generate one instruction: 576 * as f, t 577 */ 578 func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog { 579 // Node nod; 580 581 // if(f != N && f->op == OINDEX) { 582 // gc.Regalloc(&nod, ®node, Z); 583 // v = constnode.vconst; 584 // gc.Cgen(f->right, &nod); 585 // constnode.vconst = v; 586 // idx.reg = nod.reg; 587 // gc.Regfree(&nod); 588 // } 589 // if(t != N && t->op == OINDEX) { 590 // gc.Regalloc(&nod, ®node, Z); 591 // v = constnode.vconst; 592 // gc.Cgen(t->right, &nod); 593 // constnode.vconst = v; 594 // idx.reg = nod.reg; 595 // gc.Regfree(&nod); 596 // } 597 598 if f != nil && f.Op == gc.OADDR && (as == x86.AMOVL || as == x86.AMOVQ) { 599 // Turn MOVL $xxx into LEAL xxx. 600 // These should be equivalent but most of the backend 601 // only expects to see LEAL, because that's what we had 602 // historically generated. Various hidden assumptions are baked in by now. 603 if as == x86.AMOVL { 604 as = x86.ALEAL 605 } else { 606 as = x86.ALEAQ 607 } 608 f = f.Left 609 } 610 611 switch as { 612 case x86.AMOVB, 613 x86.AMOVW, 614 x86.AMOVL, 615 x86.AMOVQ, 616 x86.AMOVSS, 617 x86.AMOVSD: 618 if f != nil && t != nil && samaddr(f, t) { 619 return nil 620 } 621 622 case x86.ALEAQ: 623 if f != nil && gc.Isconst(f, gc.CTNIL) { 624 gc.Fatalf("gins LEAQ nil %v", f.Type) 625 } 626 } 627 628 p := gc.Prog(as) 629 gc.Naddr(&p.From, f) 630 gc.Naddr(&p.To, t) 631 632 if gc.Debug['g'] != 0 { 633 fmt.Printf("%v\n", p) 634 } 635 636 w := int32(0) 637 switch as { 638 case x86.AMOVB: 639 w = 1 640 641 case x86.AMOVW: 642 w = 2 643 644 case x86.AMOVL: 645 w = 4 646 647 case x86.AMOVQ: 648 w = 8 649 } 650 651 if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Width > int64(w))) { 652 gc.Dump("f", f) 653 gc.Dump("t", t) 654 gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width) 655 } 656 657 if p.To.Type == obj.TYPE_ADDR && w > 0 { 658 gc.Fatalf("bad use of addr: %v", p) 659 } 660 661 return p 662 } 663 664 func ginsnop() { 665 // This is actually not the x86 NOP anymore, 666 // but at the point where it gets used, AX is dead 667 // so it's okay if we lose the high bits. 668 var reg gc.Node 669 gc.Nodreg(®, gc.Types[gc.TINT], x86.REG_AX) 670 gins(x86.AXCHGL, ®, ®) 671 } 672 673 /* 674 * return Axxx for Oxxx on type t. 675 */ 676 func optoas(op gc.Op, t *gc.Type) int { 677 if t == nil { 678 gc.Fatalf("optoas: t is nil") 679 } 680 681 // avoid constant conversions in switches below 682 const ( 683 OMINUS_ = uint32(gc.OMINUS) << 16 684 OLSH_ = uint32(gc.OLSH) << 16 685 ORSH_ = uint32(gc.ORSH) << 16 686 OADD_ = uint32(gc.OADD) << 16 687 OSUB_ = uint32(gc.OSUB) << 16 688 OMUL_ = uint32(gc.OMUL) << 16 689 ODIV_ = uint32(gc.ODIV) << 16 690 OMOD_ = uint32(gc.OMOD) << 16 691 OOR_ = uint32(gc.OOR) << 16 692 OAND_ = uint32(gc.OAND) << 16 693 OXOR_ = uint32(gc.OXOR) << 16 694 OEQ_ = uint32(gc.OEQ) << 16 695 ONE_ = uint32(gc.ONE) << 16 696 OLT_ = uint32(gc.OLT) << 16 697 OLE_ = uint32(gc.OLE) << 16 698 OGE_ = uint32(gc.OGE) << 16 699 OGT_ = uint32(gc.OGT) << 16 700 OCMP_ = uint32(gc.OCMP) << 16 701 OPS_ = uint32(gc.OPS) << 16 702 OPC_ = uint32(gc.OPC) << 16 703 OAS_ = uint32(gc.OAS) << 16 704 OHMUL_ = uint32(gc.OHMUL) << 16 705 OSQRT_ = uint32(gc.OSQRT) << 16 706 OADDR_ = uint32(gc.OADDR) << 16 707 OINC_ = uint32(gc.OINC) << 16 708 ODEC_ = uint32(gc.ODEC) << 16 709 OLROT_ = uint32(gc.OLROT) << 16 710 ORROTC_ = uint32(gc.ORROTC) << 16 711 OEXTEND_ = uint32(gc.OEXTEND) << 16 712 ) 713 714 a := obj.AXXX 715 switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) { 716 default: 717 gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t) 718 719 case OADDR_ | gc.TPTR32: 720 a = x86.ALEAL 721 722 case OADDR_ | gc.TPTR64: 723 a = x86.ALEAQ 724 725 case OEQ_ | gc.TBOOL, 726 OEQ_ | gc.TINT8, 727 OEQ_ | gc.TUINT8, 728 OEQ_ | gc.TINT16, 729 OEQ_ | gc.TUINT16, 730 OEQ_ | gc.TINT32, 731 OEQ_ | gc.TUINT32, 732 OEQ_ | gc.TINT64, 733 OEQ_ | gc.TUINT64, 734 OEQ_ | gc.TPTR32, 735 OEQ_ | gc.TPTR64, 736 OEQ_ | gc.TFLOAT32, 737 OEQ_ | gc.TFLOAT64: 738 a = x86.AJEQ 739 740 case ONE_ | gc.TBOOL, 741 ONE_ | gc.TINT8, 742 ONE_ | gc.TUINT8, 743 ONE_ | gc.TINT16, 744 ONE_ | gc.TUINT16, 745 ONE_ | gc.TINT32, 746 ONE_ | gc.TUINT32, 747 ONE_ | gc.TINT64, 748 ONE_ | gc.TUINT64, 749 ONE_ | gc.TPTR32, 750 ONE_ | gc.TPTR64, 751 ONE_ | gc.TFLOAT32, 752 ONE_ | gc.TFLOAT64: 753 a = x86.AJNE 754 755 case OPS_ | gc.TBOOL, 756 OPS_ | gc.TINT8, 757 OPS_ | gc.TUINT8, 758 OPS_ | gc.TINT16, 759 OPS_ | gc.TUINT16, 760 OPS_ | gc.TINT32, 761 OPS_ | gc.TUINT32, 762 OPS_ | gc.TINT64, 763 OPS_ | gc.TUINT64, 764 OPS_ | gc.TPTR32, 765 OPS_ | gc.TPTR64, 766 OPS_ | gc.TFLOAT32, 767 OPS_ | gc.TFLOAT64: 768 a = x86.AJPS 769 770 case OPC_ | gc.TBOOL, 771 OPC_ | gc.TINT8, 772 OPC_ | gc.TUINT8, 773 OPC_ | gc.TINT16, 774 OPC_ | gc.TUINT16, 775 OPC_ | gc.TINT32, 776 OPC_ | gc.TUINT32, 777 OPC_ | gc.TINT64, 778 OPC_ | gc.TUINT64, 779 OPC_ | gc.TPTR32, 780 OPC_ | gc.TPTR64, 781 OPC_ | gc.TFLOAT32, 782 OPC_ | gc.TFLOAT64: 783 a = x86.AJPC 784 785 case OLT_ | gc.TINT8, 786 OLT_ | gc.TINT16, 787 OLT_ | gc.TINT32, 788 OLT_ | gc.TINT64: 789 a = x86.AJLT 790 791 case OLT_ | gc.TUINT8, 792 OLT_ | gc.TUINT16, 793 OLT_ | gc.TUINT32, 794 OLT_ | gc.TUINT64: 795 a = x86.AJCS 796 797 case OLE_ | gc.TINT8, 798 OLE_ | gc.TINT16, 799 OLE_ | gc.TINT32, 800 OLE_ | gc.TINT64: 801 a = x86.AJLE 802 803 case OLE_ | gc.TUINT8, 804 OLE_ | gc.TUINT16, 805 OLE_ | gc.TUINT32, 806 OLE_ | gc.TUINT64: 807 a = x86.AJLS 808 809 case OGT_ | gc.TINT8, 810 OGT_ | gc.TINT16, 811 OGT_ | gc.TINT32, 812 OGT_ | gc.TINT64: 813 a = x86.AJGT 814 815 case OGT_ | gc.TUINT8, 816 OGT_ | gc.TUINT16, 817 OGT_ | gc.TUINT32, 818 OGT_ | gc.TUINT64, 819 OLT_ | gc.TFLOAT32, 820 OLT_ | gc.TFLOAT64: 821 a = x86.AJHI 822 823 case OGE_ | gc.TINT8, 824 OGE_ | gc.TINT16, 825 OGE_ | gc.TINT32, 826 OGE_ | gc.TINT64: 827 a = x86.AJGE 828 829 case OGE_ | gc.TUINT8, 830 OGE_ | gc.TUINT16, 831 OGE_ | gc.TUINT32, 832 OGE_ | gc.TUINT64, 833 OLE_ | gc.TFLOAT32, 834 OLE_ | gc.TFLOAT64: 835 a = x86.AJCC 836 837 case OCMP_ | gc.TBOOL, 838 OCMP_ | gc.TINT8, 839 OCMP_ | gc.TUINT8: 840 a = x86.ACMPB 841 842 case OCMP_ | gc.TINT16, 843 OCMP_ | gc.TUINT16: 844 a = x86.ACMPW 845 846 case OCMP_ | gc.TINT32, 847 OCMP_ | gc.TUINT32, 848 OCMP_ | gc.TPTR32: 849 a = x86.ACMPL 850 851 case OCMP_ | gc.TINT64, 852 OCMP_ | gc.TUINT64, 853 OCMP_ | gc.TPTR64: 854 a = x86.ACMPQ 855 856 case OCMP_ | gc.TFLOAT32: 857 a = x86.AUCOMISS 858 859 case OCMP_ | gc.TFLOAT64: 860 a = x86.AUCOMISD 861 862 case OAS_ | gc.TBOOL, 863 OAS_ | gc.TINT8, 864 OAS_ | gc.TUINT8: 865 a = x86.AMOVB 866 867 case OAS_ | gc.TINT16, 868 OAS_ | gc.TUINT16: 869 a = x86.AMOVW 870 871 case OAS_ | gc.TINT32, 872 OAS_ | gc.TUINT32, 873 OAS_ | gc.TPTR32: 874 a = x86.AMOVL 875 876 case OAS_ | gc.TINT64, 877 OAS_ | gc.TUINT64, 878 OAS_ | gc.TPTR64: 879 a = x86.AMOVQ 880 881 case OAS_ | gc.TFLOAT32: 882 a = x86.AMOVSS 883 884 case OAS_ | gc.TFLOAT64: 885 a = x86.AMOVSD 886 887 case OADD_ | gc.TINT8, 888 OADD_ | gc.TUINT8: 889 a = x86.AADDB 890 891 case OADD_ | gc.TINT16, 892 OADD_ | gc.TUINT16: 893 a = x86.AADDW 894 895 case OADD_ | gc.TINT32, 896 OADD_ | gc.TUINT32, 897 OADD_ | gc.TPTR32: 898 a = x86.AADDL 899 900 case OADD_ | gc.TINT64, 901 OADD_ | gc.TUINT64, 902 OADD_ | gc.TPTR64: 903 a = x86.AADDQ 904 905 case OADD_ | gc.TFLOAT32: 906 a = x86.AADDSS 907 908 case OADD_ | gc.TFLOAT64: 909 a = x86.AADDSD 910 911 case OSUB_ | gc.TINT8, 912 OSUB_ | gc.TUINT8: 913 a = x86.ASUBB 914 915 case OSUB_ | gc.TINT16, 916 OSUB_ | gc.TUINT16: 917 a = x86.ASUBW 918 919 case OSUB_ | gc.TINT32, 920 OSUB_ | gc.TUINT32, 921 OSUB_ | gc.TPTR32: 922 a = x86.ASUBL 923 924 case OSUB_ | gc.TINT64, 925 OSUB_ | gc.TUINT64, 926 OSUB_ | gc.TPTR64: 927 a = x86.ASUBQ 928 929 case OSUB_ | gc.TFLOAT32: 930 a = x86.ASUBSS 931 932 case OSUB_ | gc.TFLOAT64: 933 a = x86.ASUBSD 934 935 case OINC_ | gc.TINT8, 936 OINC_ | gc.TUINT8: 937 a = x86.AINCB 938 939 case OINC_ | gc.TINT16, 940 OINC_ | gc.TUINT16: 941 a = x86.AINCW 942 943 case OINC_ | gc.TINT32, 944 OINC_ | gc.TUINT32, 945 OINC_ | gc.TPTR32: 946 a = x86.AINCL 947 948 case OINC_ | gc.TINT64, 949 OINC_ | gc.TUINT64, 950 OINC_ | gc.TPTR64: 951 a = x86.AINCQ 952 953 case ODEC_ | gc.TINT8, 954 ODEC_ | gc.TUINT8: 955 a = x86.ADECB 956 957 case ODEC_ | gc.TINT16, 958 ODEC_ | gc.TUINT16: 959 a = x86.ADECW 960 961 case ODEC_ | gc.TINT32, 962 ODEC_ | gc.TUINT32, 963 ODEC_ | gc.TPTR32: 964 a = x86.ADECL 965 966 case ODEC_ | gc.TINT64, 967 ODEC_ | gc.TUINT64, 968 ODEC_ | gc.TPTR64: 969 a = x86.ADECQ 970 971 case OMINUS_ | gc.TINT8, 972 OMINUS_ | gc.TUINT8: 973 a = x86.ANEGB 974 975 case OMINUS_ | gc.TINT16, 976 OMINUS_ | gc.TUINT16: 977 a = x86.ANEGW 978 979 case OMINUS_ | gc.TINT32, 980 OMINUS_ | gc.TUINT32, 981 OMINUS_ | gc.TPTR32: 982 a = x86.ANEGL 983 984 case OMINUS_ | gc.TINT64, 985 OMINUS_ | gc.TUINT64, 986 OMINUS_ | gc.TPTR64: 987 a = x86.ANEGQ 988 989 case OAND_ | gc.TBOOL, 990 OAND_ | gc.TINT8, 991 OAND_ | gc.TUINT8: 992 a = x86.AANDB 993 994 case OAND_ | gc.TINT16, 995 OAND_ | gc.TUINT16: 996 a = x86.AANDW 997 998 case OAND_ | gc.TINT32, 999 OAND_ | gc.TUINT32, 1000 OAND_ | gc.TPTR32: 1001 a = x86.AANDL 1002 1003 case OAND_ | gc.TINT64, 1004 OAND_ | gc.TUINT64, 1005 OAND_ | gc.TPTR64: 1006 a = x86.AANDQ 1007 1008 case OOR_ | gc.TBOOL, 1009 OOR_ | gc.TINT8, 1010 OOR_ | gc.TUINT8: 1011 a = x86.AORB 1012 1013 case OOR_ | gc.TINT16, 1014 OOR_ | gc.TUINT16: 1015 a = x86.AORW 1016 1017 case OOR_ | gc.TINT32, 1018 OOR_ | gc.TUINT32, 1019 OOR_ | gc.TPTR32: 1020 a = x86.AORL 1021 1022 case OOR_ | gc.TINT64, 1023 OOR_ | gc.TUINT64, 1024 OOR_ | gc.TPTR64: 1025 a = x86.AORQ 1026 1027 case OXOR_ | gc.TINT8, 1028 OXOR_ | gc.TUINT8: 1029 a = x86.AXORB 1030 1031 case OXOR_ | gc.TINT16, 1032 OXOR_ | gc.TUINT16: 1033 a = x86.AXORW 1034 1035 case OXOR_ | gc.TINT32, 1036 OXOR_ | gc.TUINT32, 1037 OXOR_ | gc.TPTR32: 1038 a = x86.AXORL 1039 1040 case OXOR_ | gc.TINT64, 1041 OXOR_ | gc.TUINT64, 1042 OXOR_ | gc.TPTR64: 1043 a = x86.AXORQ 1044 1045 case OLROT_ | gc.TINT8, 1046 OLROT_ | gc.TUINT8: 1047 a = x86.AROLB 1048 1049 case OLROT_ | gc.TINT16, 1050 OLROT_ | gc.TUINT16: 1051 a = x86.AROLW 1052 1053 case OLROT_ | gc.TINT32, 1054 OLROT_ | gc.TUINT32, 1055 OLROT_ | gc.TPTR32: 1056 a = x86.AROLL 1057 1058 case OLROT_ | gc.TINT64, 1059 OLROT_ | gc.TUINT64, 1060 OLROT_ | gc.TPTR64: 1061 a = x86.AROLQ 1062 1063 case OLSH_ | gc.TINT8, 1064 OLSH_ | gc.TUINT8: 1065 a = x86.ASHLB 1066 1067 case OLSH_ | gc.TINT16, 1068 OLSH_ | gc.TUINT16: 1069 a = x86.ASHLW 1070 1071 case OLSH_ | gc.TINT32, 1072 OLSH_ | gc.TUINT32, 1073 OLSH_ | gc.TPTR32: 1074 a = x86.ASHLL 1075 1076 case OLSH_ | gc.TINT64, 1077 OLSH_ | gc.TUINT64, 1078 OLSH_ | gc.TPTR64: 1079 a = x86.ASHLQ 1080 1081 case ORSH_ | gc.TUINT8: 1082 a = x86.ASHRB 1083 1084 case ORSH_ | gc.TUINT16: 1085 a = x86.ASHRW 1086 1087 case ORSH_ | gc.TUINT32, 1088 ORSH_ | gc.TPTR32: 1089 a = x86.ASHRL 1090 1091 case ORSH_ | gc.TUINT64, 1092 ORSH_ | gc.TPTR64: 1093 a = x86.ASHRQ 1094 1095 case ORSH_ | gc.TINT8: 1096 a = x86.ASARB 1097 1098 case ORSH_ | gc.TINT16: 1099 a = x86.ASARW 1100 1101 case ORSH_ | gc.TINT32: 1102 a = x86.ASARL 1103 1104 case ORSH_ | gc.TINT64: 1105 a = x86.ASARQ 1106 1107 case ORROTC_ | gc.TINT8, 1108 ORROTC_ | gc.TUINT8: 1109 a = x86.ARCRB 1110 1111 case ORROTC_ | gc.TINT16, 1112 ORROTC_ | gc.TUINT16: 1113 a = x86.ARCRW 1114 1115 case ORROTC_ | gc.TINT32, 1116 ORROTC_ | gc.TUINT32: 1117 a = x86.ARCRL 1118 1119 case ORROTC_ | gc.TINT64, 1120 ORROTC_ | gc.TUINT64: 1121 a = x86.ARCRQ 1122 1123 case OHMUL_ | gc.TINT8, 1124 OMUL_ | gc.TINT8, 1125 OMUL_ | gc.TUINT8: 1126 a = x86.AIMULB 1127 1128 case OHMUL_ | gc.TINT16, 1129 OMUL_ | gc.TINT16, 1130 OMUL_ | gc.TUINT16: 1131 a = x86.AIMULW 1132 1133 case OHMUL_ | gc.TINT32, 1134 OMUL_ | gc.TINT32, 1135 OMUL_ | gc.TUINT32, 1136 OMUL_ | gc.TPTR32: 1137 a = x86.AIMULL 1138 1139 case OHMUL_ | gc.TINT64, 1140 OMUL_ | gc.TINT64, 1141 OMUL_ | gc.TUINT64, 1142 OMUL_ | gc.TPTR64: 1143 a = x86.AIMULQ 1144 1145 case OHMUL_ | gc.TUINT8: 1146 a = x86.AMULB 1147 1148 case OHMUL_ | gc.TUINT16: 1149 a = x86.AMULW 1150 1151 case OHMUL_ | gc.TUINT32, 1152 OHMUL_ | gc.TPTR32: 1153 a = x86.AMULL 1154 1155 case OHMUL_ | gc.TUINT64, 1156 OHMUL_ | gc.TPTR64: 1157 a = x86.AMULQ 1158 1159 case OMUL_ | gc.TFLOAT32: 1160 a = x86.AMULSS 1161 1162 case OMUL_ | gc.TFLOAT64: 1163 a = x86.AMULSD 1164 1165 case ODIV_ | gc.TINT8, 1166 OMOD_ | gc.TINT8: 1167 a = x86.AIDIVB 1168 1169 case ODIV_ | gc.TUINT8, 1170 OMOD_ | gc.TUINT8: 1171 a = x86.ADIVB 1172 1173 case ODIV_ | gc.TINT16, 1174 OMOD_ | gc.TINT16: 1175 a = x86.AIDIVW 1176 1177 case ODIV_ | gc.TUINT16, 1178 OMOD_ | gc.TUINT16: 1179 a = x86.ADIVW 1180 1181 case ODIV_ | gc.TINT32, 1182 OMOD_ | gc.TINT32: 1183 a = x86.AIDIVL 1184 1185 case ODIV_ | gc.TUINT32, 1186 ODIV_ | gc.TPTR32, 1187 OMOD_ | gc.TUINT32, 1188 OMOD_ | gc.TPTR32: 1189 a = x86.ADIVL 1190 1191 case ODIV_ | gc.TINT64, 1192 OMOD_ | gc.TINT64: 1193 a = x86.AIDIVQ 1194 1195 case ODIV_ | gc.TUINT64, 1196 ODIV_ | gc.TPTR64, 1197 OMOD_ | gc.TUINT64, 1198 OMOD_ | gc.TPTR64: 1199 a = x86.ADIVQ 1200 1201 case OEXTEND_ | gc.TINT16: 1202 a = x86.ACWD 1203 1204 case OEXTEND_ | gc.TINT32: 1205 a = x86.ACDQ 1206 1207 case OEXTEND_ | gc.TINT64: 1208 a = x86.ACQO 1209 1210 case ODIV_ | gc.TFLOAT32: 1211 a = x86.ADIVSS 1212 1213 case ODIV_ | gc.TFLOAT64: 1214 a = x86.ADIVSD 1215 1216 case OSQRT_ | gc.TFLOAT64: 1217 a = x86.ASQRTSD 1218 } 1219 1220 return a 1221 } 1222 1223 // jmptoset returns ASETxx for AJxx. 1224 func jmptoset(jmp int) int { 1225 switch jmp { 1226 case x86.AJEQ: 1227 return x86.ASETEQ 1228 case x86.AJNE: 1229 return x86.ASETNE 1230 case x86.AJLT: 1231 return x86.ASETLT 1232 case x86.AJCS: 1233 return x86.ASETCS 1234 case x86.AJLE: 1235 return x86.ASETLE 1236 case x86.AJLS: 1237 return x86.ASETLS 1238 case x86.AJGT: 1239 return x86.ASETGT 1240 case x86.AJHI: 1241 return x86.ASETHI 1242 case x86.AJGE: 1243 return x86.ASETGE 1244 case x86.AJCC: 1245 return x86.ASETCC 1246 case x86.AJMI: 1247 return x86.ASETMI 1248 case x86.AJOC: 1249 return x86.ASETOC 1250 case x86.AJOS: 1251 return x86.ASETOS 1252 case x86.AJPC: 1253 return x86.ASETPC 1254 case x86.AJPL: 1255 return x86.ASETPL 1256 case x86.AJPS: 1257 return x86.ASETPS 1258 } 1259 gc.Fatalf("jmptoset: no entry for %v", gc.Oconv(jmp, 0)) 1260 panic("unreachable") 1261 } 1262 1263 const ( 1264 ODynam = 1 << 0 1265 OAddable = 1 << 1 1266 ) 1267 1268 var clean [20]gc.Node 1269 1270 var cleani int = 0 1271 1272 func sudoclean() { 1273 if clean[cleani-1].Op != gc.OEMPTY { 1274 gc.Regfree(&clean[cleani-1]) 1275 } 1276 if clean[cleani-2].Op != gc.OEMPTY { 1277 gc.Regfree(&clean[cleani-2]) 1278 } 1279 cleani -= 2 1280 } 1281 1282 /* 1283 * generate code to compute address of n, 1284 * a reference to a (perhaps nested) field inside 1285 * an array or struct. 1286 * return 0 on failure, 1 on success. 1287 * on success, leaves usable address in a. 1288 * 1289 * caller is responsible for calling sudoclean 1290 * after successful sudoaddable, 1291 * to release the register used for a. 1292 */ 1293 func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool { 1294 if n.Type == nil { 1295 return false 1296 } 1297 1298 *a = obj.Addr{} 1299 1300 switch n.Op { 1301 case gc.OLITERAL: 1302 if !gc.Isconst(n, gc.CTINT) { 1303 break 1304 } 1305 v := n.Int() 1306 if v >= 32000 || v <= -32000 { 1307 break 1308 } 1309 switch as { 1310 default: 1311 return false 1312 1313 case x86.AADDB, 1314 x86.AADDW, 1315 x86.AADDL, 1316 x86.AADDQ, 1317 x86.ASUBB, 1318 x86.ASUBW, 1319 x86.ASUBL, 1320 x86.ASUBQ, 1321 x86.AANDB, 1322 x86.AANDW, 1323 x86.AANDL, 1324 x86.AANDQ, 1325 x86.AORB, 1326 x86.AORW, 1327 x86.AORL, 1328 x86.AORQ, 1329 x86.AXORB, 1330 x86.AXORW, 1331 x86.AXORL, 1332 x86.AXORQ, 1333 x86.AINCB, 1334 x86.AINCW, 1335 x86.AINCL, 1336 x86.AINCQ, 1337 x86.ADECB, 1338 x86.ADECW, 1339 x86.ADECL, 1340 x86.ADECQ, 1341 x86.AMOVB, 1342 x86.AMOVW, 1343 x86.AMOVL, 1344 x86.AMOVQ: 1345 break 1346 } 1347 1348 cleani += 2 1349 reg := &clean[cleani-1] 1350 reg1 := &clean[cleani-2] 1351 reg.Op = gc.OEMPTY 1352 reg1.Op = gc.OEMPTY 1353 gc.Naddr(a, n) 1354 return true 1355 1356 case gc.ODOT, 1357 gc.ODOTPTR: 1358 cleani += 2 1359 reg := &clean[cleani-1] 1360 reg1 := &clean[cleani-2] 1361 reg.Op = gc.OEMPTY 1362 reg1.Op = gc.OEMPTY 1363 var nn *gc.Node 1364 var oary [10]int64 1365 o := gc.Dotoffset(n, oary[:], &nn) 1366 if nn == nil { 1367 sudoclean() 1368 return false 1369 } 1370 1371 if nn.Addable && o == 1 && oary[0] >= 0 { 1372 // directly addressable set of DOTs 1373 n1 := *nn 1374 1375 n1.Type = n.Type 1376 n1.Xoffset += oary[0] 1377 gc.Naddr(a, &n1) 1378 return true 1379 } 1380 1381 gc.Regalloc(reg, gc.Types[gc.Tptr], nil) 1382 n1 := *reg 1383 n1.Op = gc.OINDREG 1384 if oary[0] >= 0 { 1385 gc.Agen(nn, reg) 1386 n1.Xoffset = oary[0] 1387 } else { 1388 gc.Cgen(nn, reg) 1389 gc.Cgen_checknil(reg) 1390 n1.Xoffset = -(oary[0] + 1) 1391 } 1392 1393 for i := 1; i < o; i++ { 1394 if oary[i] >= 0 { 1395 gc.Fatalf("can't happen") 1396 } 1397 gins(movptr, &n1, reg) 1398 gc.Cgen_checknil(reg) 1399 n1.Xoffset = -(oary[i] + 1) 1400 } 1401 1402 a.Type = obj.TYPE_NONE 1403 a.Index = obj.TYPE_NONE 1404 gc.Fixlargeoffset(&n1) 1405 gc.Naddr(a, &n1) 1406 return true 1407 1408 case gc.OINDEX: 1409 return false 1410 } 1411 1412 return false 1413 }