github.com/q45/go@v0.0.0-20151101211701-a4fb8c13db3f/src/cmd/compile/internal/mips64/cgen.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package ppc64 6 7 import ( 8 "cmd/compile/internal/gc" 9 "cmd/internal/obj" 10 "cmd/internal/obj/ppc64" 11 ) 12 13 func blockcopy(n, res *gc.Node, osrc, odst, w int64) { 14 // determine alignment. 15 // want to avoid unaligned access, so have to use 16 // smaller operations for less aligned types. 17 // for example moving [4]byte must use 4 MOVB not 1 MOVW. 18 align := int(n.Type.Align) 19 20 var op int 21 switch align { 22 default: 23 gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type) 24 25 case 1: 26 op = ppc64.AMOVBU 27 28 case 2: 29 op = ppc64.AMOVHU 30 31 case 4: 32 op = ppc64.AMOVWZU // there is no lwau, only lwaux 33 34 case 8: 35 op = ppc64.AMOVDU 36 } 37 38 if w%int64(align) != 0 { 39 gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type) 40 } 41 c := int32(w / int64(align)) 42 43 // if we are copying forward on the stack and 44 // the src and dst overlap, then reverse direction 45 dir := align 46 47 if osrc < odst && int64(odst) < int64(osrc)+w { 48 dir = -dir 49 } 50 51 var dst gc.Node 52 var src gc.Node 53 if n.Ullman >= res.Ullman { 54 gc.Agenr(n, &dst, res) // temporarily use dst 55 gc.Regalloc(&src, gc.Types[gc.Tptr], nil) 56 gins(ppc64.AMOVD, &dst, &src) 57 if res.Op == gc.ONAME { 58 gc.Gvardef(res) 59 } 60 gc.Agen(res, &dst) 61 } else { 62 if res.Op == gc.ONAME { 63 gc.Gvardef(res) 64 } 65 gc.Agenr(res, &dst, res) 66 gc.Agenr(n, &src, nil) 67 } 68 69 var tmp gc.Node 70 gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil) 71 72 // set up end marker 73 var nend gc.Node 74 75 // move src and dest to the end of block if necessary 76 if dir < 0 { 77 if c >= 4 { 78 gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) 79 gins(ppc64.AMOVD, &src, &nend) 80 } 81 82 p := gins(ppc64.AADD, nil, &src) 83 p.From.Type = obj.TYPE_CONST 84 p.From.Offset = w 85 86 p = gins(ppc64.AADD, nil, &dst) 87 p.From.Type = obj.TYPE_CONST 88 p.From.Offset = w 89 } else { 90 p := gins(ppc64.AADD, nil, &src) 91 p.From.Type = obj.TYPE_CONST 92 p.From.Offset = int64(-dir) 93 94 p = gins(ppc64.AADD, nil, &dst) 95 p.From.Type = obj.TYPE_CONST 96 p.From.Offset = int64(-dir) 97 98 if c >= 4 { 99 gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) 100 p := gins(ppc64.AMOVD, &src, &nend) 101 p.From.Type = obj.TYPE_ADDR 102 p.From.Offset = w 103 } 104 } 105 106 // move 107 // TODO: enable duffcopy for larger copies. 108 if c >= 4 { 109 p := gins(op, &src, &tmp) 110 p.From.Type = obj.TYPE_MEM 111 p.From.Offset = int64(dir) 112 ploop := p 113 114 p = gins(op, &tmp, &dst) 115 p.To.Type = obj.TYPE_MEM 116 p.To.Offset = int64(dir) 117 118 p = gins(ppc64.ACMP, &src, &nend) 119 120 gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), ploop) 121 gc.Regfree(&nend) 122 } else { 123 // TODO(austin): Instead of generating ADD $-8,R8; ADD 124 // $-8,R7; n*(MOVDU 8(R8),R9; MOVDU R9,8(R7);) just 125 // generate the offsets directly and eliminate the 126 // ADDs. That will produce shorter, more 127 // pipeline-able code. 128 var p *obj.Prog 129 for { 130 tmp14 := c 131 c-- 132 if tmp14 <= 0 { 133 break 134 } 135 136 p = gins(op, &src, &tmp) 137 p.From.Type = obj.TYPE_MEM 138 p.From.Offset = int64(dir) 139 140 p = gins(op, &tmp, &dst) 141 p.To.Type = obj.TYPE_MEM 142 p.To.Offset = int64(dir) 143 } 144 } 145 146 gc.Regfree(&dst) 147 gc.Regfree(&src) 148 gc.Regfree(&tmp) 149 }