github.com/goproxy0/go@v0.0.0-20171111080102-49cc0c489d2c/src/cmd/compile/internal/gc/range.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package gc 6 7 import ( 8 "cmd/compile/internal/types" 9 "cmd/internal/objabi" 10 "cmd/internal/sys" 11 "unicode/utf8" 12 ) 13 14 // range 15 func typecheckrange(n *Node) { 16 var toomany bool 17 var why string 18 var t1 *types.Type 19 var t2 *types.Type 20 var v1 *Node 21 var v2 *Node 22 var ls []*Node 23 24 // Typechecking order is important here: 25 // 0. first typecheck range expression (slice/map/chan), 26 // it is evaluated only once and so logically it is not part of the loop. 27 // 1. typcheck produced values, 28 // this part can declare new vars and so it must be typechecked before body, 29 // because body can contain a closure that captures the vars. 30 // 2. decldepth++ to denote loop body. 31 // 3. typecheck body. 32 // 4. decldepth--. 33 34 n.Right = typecheck(n.Right, Erv) 35 36 t := n.Right.Type 37 if t == nil { 38 goto out 39 } 40 // delicate little dance. see typecheckas2 41 ls = n.List.Slice() 42 for i1, n1 := range ls { 43 if n1.Name == nil || n1.Name.Defn != n { 44 ls[i1] = typecheck(ls[i1], Erv|Easgn) 45 } 46 } 47 48 if t.IsPtr() && t.Elem().IsArray() { 49 t = t.Elem() 50 } 51 n.Type = t 52 53 toomany = false 54 switch t.Etype { 55 default: 56 yyerrorl(n.Pos, "cannot range over %L", n.Right) 57 goto out 58 59 case TARRAY, TSLICE: 60 t1 = types.Types[TINT] 61 t2 = t.Elem() 62 63 case TMAP: 64 t1 = t.Key() 65 t2 = t.Val() 66 67 case TCHAN: 68 if !t.ChanDir().CanRecv() { 69 yyerrorl(n.Pos, "invalid operation: range %v (receive from send-only type %v)", n.Right, n.Right.Type) 70 goto out 71 } 72 73 t1 = t.Elem() 74 t2 = nil 75 if n.List.Len() == 2 { 76 toomany = true 77 } 78 79 case TSTRING: 80 t1 = types.Types[TINT] 81 t2 = types.Runetype 82 } 83 84 if n.List.Len() > 2 || toomany { 85 yyerrorl(n.Pos, "too many variables in range") 86 } 87 88 v1 = nil 89 if n.List.Len() != 0 { 90 v1 = n.List.First() 91 } 92 v2 = nil 93 if n.List.Len() > 1 { 94 v2 = n.List.Second() 95 } 96 97 // this is not only a optimization but also a requirement in the spec. 98 // "if the second iteration variable is the blank identifier, the range 99 // clause is equivalent to the same clause with only the first variable 100 // present." 101 if isblank(v2) { 102 if v1 != nil { 103 n.List.Set1(v1) 104 } 105 v2 = nil 106 } 107 108 if v1 != nil { 109 if v1.Name != nil && v1.Name.Defn == n { 110 v1.Type = t1 111 } else if v1.Type != nil && assignop(t1, v1.Type, &why) == 0 { 112 yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t1, v1, why) 113 } 114 checkassign(n, v1) 115 } 116 117 if v2 != nil { 118 if v2.Name != nil && v2.Name.Defn == n { 119 v2.Type = t2 120 } else if v2.Type != nil && assignop(t2, v2.Type, &why) == 0 { 121 yyerrorl(n.Pos, "cannot assign type %v to %L in range%s", t2, v2, why) 122 } 123 checkassign(n, v2) 124 } 125 126 // second half of dance 127 out: 128 n.SetTypecheck(1) 129 ls = n.List.Slice() 130 for i1, n1 := range ls { 131 if n1.Typecheck() == 0 { 132 ls[i1] = typecheck(ls[i1], Erv|Easgn) 133 } 134 } 135 136 decldepth++ 137 typecheckslice(n.Nbody.Slice(), Etop) 138 decldepth-- 139 } 140 141 func cheapComputableIndex(width int64) bool { 142 switch thearch.LinkArch.Family { 143 // MIPS does not have R+R addressing 144 // Arm64 may lack ability to generate this code in our assembler, 145 // but the architecture supports it. 146 case sys.PPC64, sys.S390X: 147 return width == 1 148 case sys.AMD64, sys.I386, sys.ARM64, sys.ARM: 149 switch width { 150 case 1, 2, 4, 8: 151 return true 152 } 153 } 154 return false 155 } 156 157 // walkrange transforms various forms of ORANGE into 158 // simpler forms. The result must be assigned back to n. 159 // Node n may also be modified in place, and may also be 160 // the returned node. 161 func walkrange(n *Node) *Node { 162 // variable name conventions: 163 // ohv1, hv1, hv2: hidden (old) val 1, 2 164 // ha, hit: hidden aggregate, iterator 165 // hn, hp: hidden len, pointer 166 // hb: hidden bool 167 // a, v1, v2: not hidden aggregate, val 1, 2 168 169 t := n.Type 170 171 a := n.Right 172 lno := setlineno(a) 173 n.Right = nil 174 175 var v1, v2 *Node 176 l := n.List.Len() 177 if l > 0 { 178 v1 = n.List.First() 179 } 180 181 if l > 1 { 182 v2 = n.List.Second() 183 } 184 185 if isblank(v2) { 186 v2 = nil 187 } 188 189 if isblank(v1) && v2 == nil { 190 v1 = nil 191 } 192 193 if v1 == nil && v2 != nil { 194 Fatalf("walkrange: v2 != nil while v1 == nil") 195 } 196 197 // n.List has no meaning anymore, clear it 198 // to avoid erroneous processing by racewalk. 199 n.List.Set(nil) 200 201 var ifGuard *Node 202 203 translatedLoopOp := OFOR 204 205 var body []*Node 206 var init []*Node 207 switch t.Etype { 208 default: 209 Fatalf("walkrange") 210 211 case TARRAY, TSLICE: 212 if memclrrange(n, v1, v2, a) { 213 lineno = lno 214 return n 215 } 216 217 // orderstmt arranged for a copy of the array/slice variable if needed. 218 ha := a 219 220 hv1 := temp(types.Types[TINT]) 221 hn := temp(types.Types[TINT]) 222 223 init = append(init, nod(OAS, hv1, nil)) 224 init = append(init, nod(OAS, hn, nod(OLEN, ha, nil))) 225 226 n.Left = nod(OLT, hv1, hn) 227 n.Right = nod(OAS, hv1, nod(OADD, hv1, nodintconst(1))) 228 229 // for range ha { body } 230 if v1 == nil { 231 break 232 } 233 234 // for v1 := range ha { body } 235 if v2 == nil { 236 body = []*Node{nod(OAS, v1, hv1)} 237 break 238 } 239 240 // for v1, v2 := range ha { body } 241 if cheapComputableIndex(n.Type.Elem().Width) { 242 // v1, v2 = hv1, ha[hv1] 243 tmp := nod(OINDEX, ha, hv1) 244 tmp.SetBounded(true) 245 // Use OAS2 to correctly handle assignments 246 // of the form "v1, a[v1] := range". 247 a := nod(OAS2, nil, nil) 248 a.List.Set2(v1, v2) 249 a.Rlist.Set2(hv1, tmp) 250 body = []*Node{a} 251 break 252 } 253 254 if objabi.Preemptibleloops_enabled != 0 { 255 // Doing this transformation makes a bounds check removal less trivial; see #20711 256 // TODO enhance the preemption check insertion so that this transformation is not necessary. 257 ifGuard = nod(OIF, nil, nil) 258 ifGuard.Left = nod(OLT, hv1, hn) 259 translatedLoopOp = OFORUNTIL 260 } 261 262 hp := temp(types.NewPtr(n.Type.Elem())) 263 tmp := nod(OINDEX, ha, nodintconst(0)) 264 tmp.SetBounded(true) 265 init = append(init, nod(OAS, hp, nod(OADDR, tmp, nil))) 266 267 // Use OAS2 to correctly handle assignments 268 // of the form "v1, a[v1] := range". 269 a := nod(OAS2, nil, nil) 270 a.List.Set2(v1, v2) 271 a.Rlist.Set2(hv1, nod(OIND, hp, nil)) 272 body = append(body, a) 273 274 // Advance pointer as part of increment. 275 // We used to advance the pointer before executing the loop body, 276 // but doing so would make the pointer point past the end of the 277 // array during the final iteration, possibly causing another unrelated 278 // piece of memory not to be garbage collected until the loop finished. 279 // Advancing during the increment ensures that the pointer p only points 280 // pass the end of the array during the final "p++; i++; if(i >= len(x)) break;", 281 // after which p is dead, so it cannot confuse the collector. 282 tmp = nod(OADD, hp, nodintconst(t.Elem().Width)) 283 284 tmp.Type = hp.Type 285 tmp.SetTypecheck(1) 286 tmp.Right.Type = types.Types[types.Tptr] 287 tmp.Right.SetTypecheck(1) 288 a = nod(OAS, hp, tmp) 289 a = typecheck(a, Etop) 290 n.Right.Ninit.Set1(a) 291 292 case TMAP: 293 // orderstmt allocated the iterator for us. 294 // we only use a once, so no copy needed. 295 ha := a 296 297 hit := prealloc[n] 298 th := hit.Type 299 n.Left = nil 300 keysym := th.Field(0).Sym // depends on layout of iterator struct. See reflect.go:hiter 301 valsym := th.Field(1).Sym // ditto 302 303 fn := syslook("mapiterinit") 304 305 fn = substArgTypes(fn, t.Key(), t.Val(), th) 306 init = append(init, mkcall1(fn, nil, nil, typename(t), ha, nod(OADDR, hit, nil))) 307 n.Left = nod(ONE, nodSym(ODOT, hit, keysym), nodnil()) 308 309 fn = syslook("mapiternext") 310 fn = substArgTypes(fn, th) 311 n.Right = mkcall1(fn, nil, nil, nod(OADDR, hit, nil)) 312 313 key := nodSym(ODOT, hit, keysym) 314 key = nod(OIND, key, nil) 315 if v1 == nil { 316 body = nil 317 } else if v2 == nil { 318 body = []*Node{nod(OAS, v1, key)} 319 } else { 320 val := nodSym(ODOT, hit, valsym) 321 val = nod(OIND, val, nil) 322 a := nod(OAS2, nil, nil) 323 a.List.Set2(v1, v2) 324 a.Rlist.Set2(key, val) 325 body = []*Node{a} 326 } 327 328 case TCHAN: 329 // orderstmt arranged for a copy of the channel variable. 330 ha := a 331 332 n.Left = nil 333 334 hv1 := temp(t.Elem()) 335 hv1.SetTypecheck(1) 336 if types.Haspointers(t.Elem()) { 337 init = append(init, nod(OAS, hv1, nil)) 338 } 339 hb := temp(types.Types[TBOOL]) 340 341 n.Left = nod(ONE, hb, nodbool(false)) 342 a := nod(OAS2RECV, nil, nil) 343 a.SetTypecheck(1) 344 a.List.Set2(hv1, hb) 345 a.Rlist.Set1(nod(ORECV, ha, nil)) 346 n.Left.Ninit.Set1(a) 347 if v1 == nil { 348 body = nil 349 } else { 350 body = []*Node{nod(OAS, v1, hv1)} 351 } 352 // Zero hv1. This prevents hv1 from being the sole, inaccessible 353 // reference to an otherwise GC-able value during the next channel receive. 354 // See issue 15281. 355 body = append(body, nod(OAS, hv1, nil)) 356 357 case TSTRING: 358 // Transform string range statements like "for v1, v2 = range a" into 359 // 360 // ha := a 361 // for hv1 := 0; hv1 < len(ha); { 362 // hv1t := hv1 363 // hv2 := rune(ha[hv1]) 364 // if hv2 < utf8.RuneSelf { 365 // hv1++ 366 // } else { 367 // hv2, hv1 = decoderune(ha, hv1) 368 // } 369 // v1, v2 = hv1t, hv2 370 // // original body 371 // } 372 373 // orderstmt arranged for a copy of the string variable. 374 ha := a 375 376 hv1 := temp(types.Types[TINT]) 377 hv1t := temp(types.Types[TINT]) 378 hv2 := temp(types.Runetype) 379 380 // hv1 := 0 381 init = append(init, nod(OAS, hv1, nil)) 382 383 // hv1 < len(ha) 384 n.Left = nod(OLT, hv1, nod(OLEN, ha, nil)) 385 386 if v1 != nil { 387 // hv1t = hv1 388 body = append(body, nod(OAS, hv1t, hv1)) 389 } 390 391 // hv2 := rune(ha[hv1]) 392 nind := nod(OINDEX, ha, hv1) 393 nind.SetBounded(true) 394 body = append(body, nod(OAS, hv2, conv(nind, types.Runetype))) 395 396 // if hv2 < utf8.RuneSelf 397 nif := nod(OIF, nil, nil) 398 nif.Left = nod(OLT, hv2, nodintconst(utf8.RuneSelf)) 399 400 // hv1++ 401 nif.Nbody.Set1(nod(OAS, hv1, nod(OADD, hv1, nodintconst(1)))) 402 403 // } else { 404 eif := nod(OAS2, nil, nil) 405 nif.Rlist.Set1(eif) 406 407 // hv2, hv1 = decoderune(ha, hv1) 408 eif.List.Set2(hv2, hv1) 409 fn := syslook("decoderune") 410 eif.Rlist.Set1(mkcall1(fn, fn.Type.Results(), nil, ha, hv1)) 411 412 body = append(body, nif) 413 414 if v1 != nil { 415 if v2 != nil { 416 // v1, v2 = hv1t, hv2 417 a := nod(OAS2, nil, nil) 418 a.List.Set2(v1, v2) 419 a.Rlist.Set2(hv1t, hv2) 420 body = append(body, a) 421 } else { 422 // v1 = hv1t 423 body = append(body, nod(OAS, v1, hv1t)) 424 } 425 } 426 } 427 428 n.Op = translatedLoopOp 429 typecheckslice(init, Etop) 430 431 if ifGuard != nil { 432 ifGuard.Ninit.Append(init...) 433 typecheckslice(ifGuard.Left.Ninit.Slice(), Etop) 434 ifGuard.Left = typecheck(ifGuard.Left, Erv) 435 } else { 436 n.Ninit.Append(init...) 437 } 438 439 typecheckslice(n.Left.Ninit.Slice(), Etop) 440 441 n.Left = typecheck(n.Left, Erv) 442 n.Right = typecheck(n.Right, Etop) 443 typecheckslice(body, Etop) 444 n.Nbody.Prepend(body...) 445 446 if ifGuard != nil { 447 ifGuard.Nbody.Set1(n) 448 n = ifGuard 449 } 450 451 n = walkstmt(n) 452 453 lineno = lno 454 return n 455 } 456 457 // Lower n into runtime·memclr if possible, for 458 // fast zeroing of slices and arrays (issue 5373). 459 // Look for instances of 460 // 461 // for i := range a { 462 // a[i] = zero 463 // } 464 // 465 // in which the evaluation of a is side-effect-free. 466 // 467 // Parameters are as in walkrange: "for v1, v2 = range a". 468 func memclrrange(n, v1, v2, a *Node) bool { 469 if Debug['N'] != 0 || instrumenting { 470 return false 471 } 472 if v1 == nil || v2 != nil { 473 return false 474 } 475 if n.Nbody.Len() == 0 || n.Nbody.First() == nil || n.Nbody.Len() > 1 { 476 return false 477 } 478 stmt := n.Nbody.First() // only stmt in body 479 if stmt.Op != OAS || stmt.Left.Op != OINDEX { 480 return false 481 } 482 if !samesafeexpr(stmt.Left.Left, a) || !samesafeexpr(stmt.Left.Right, v1) { 483 return false 484 } 485 elemsize := n.Type.Elem().Width 486 if elemsize <= 0 || !iszero(stmt.Right) { 487 return false 488 } 489 490 // Convert to 491 // if len(a) != 0 { 492 // hp = &a[0] 493 // hn = len(a)*sizeof(elem(a)) 494 // memclr{NoHeap,Has}Pointers(hp, hn) 495 // i = len(a) - 1 496 // } 497 n.Op = OIF 498 499 n.Nbody.Set(nil) 500 n.Left = nod(ONE, nod(OLEN, a, nil), nodintconst(0)) 501 502 // hp = &a[0] 503 hp := temp(types.Types[TUNSAFEPTR]) 504 505 tmp := nod(OINDEX, a, nodintconst(0)) 506 tmp.SetBounded(true) 507 tmp = nod(OADDR, tmp, nil) 508 tmp = nod(OCONVNOP, tmp, nil) 509 tmp.Type = types.Types[TUNSAFEPTR] 510 n.Nbody.Append(nod(OAS, hp, tmp)) 511 512 // hn = len(a) * sizeof(elem(a)) 513 hn := temp(types.Types[TUINTPTR]) 514 515 tmp = nod(OLEN, a, nil) 516 tmp = nod(OMUL, tmp, nodintconst(elemsize)) 517 tmp = conv(tmp, types.Types[TUINTPTR]) 518 n.Nbody.Append(nod(OAS, hn, tmp)) 519 520 var fn *Node 521 if types.Haspointers(a.Type.Elem()) { 522 // memclrHasPointers(hp, hn) 523 fn = mkcall("memclrHasPointers", nil, nil, hp, hn) 524 } else { 525 // memclrNoHeapPointers(hp, hn) 526 fn = mkcall("memclrNoHeapPointers", nil, nil, hp, hn) 527 } 528 529 n.Nbody.Append(fn) 530 531 // i = len(a) - 1 532 v1 = nod(OAS, v1, nod(OSUB, nod(OLEN, a, nil), nodintconst(1))) 533 534 n.Nbody.Append(v1) 535 536 n.Left = typecheck(n.Left, Erv) 537 typecheckslice(n.Nbody.Slice(), Etop) 538 n = walkstmt(n) 539 return true 540 }