github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/cmd/compile/internal/sparc64/cgen.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sparc64 6 7 import ( 8 "cmd/compile/internal/gc" 9 "cmd/internal/obj" 10 "cmd/internal/obj/sparc64" 11 ) 12 13 func blockcopy(n, res *gc.Node, osrc, odst, w int64) { 14 // determine alignment. 15 // want to avoid unaligned access, so have to use 16 // smaller operations for less aligned types. 17 // for example moving [4]byte must use 4 MOVB not 1 MOVW. 18 align := int(n.Type.Align) 19 20 var op obj.As 21 switch align { 22 default: 23 gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type) 24 25 case 1: 26 op = sparc64.AMOVB 27 28 case 2: 29 op = sparc64.AMOVH 30 31 case 4: 32 op = sparc64.AMOVW 33 34 case 8: 35 op = sparc64.AMOVD 36 } 37 38 if w%int64(align) != 0 { 39 gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type) 40 } 41 c := int32(w / int64(align)) 42 43 if osrc%int64(align) != 0 || odst%int64(align) != 0 { 44 gc.Fatalf("sgen: unaligned offset src %d or dst %d (align %d)", osrc, odst, align) 45 } 46 47 // if we are copying forward on the stack and 48 // the src and dst overlap, then reverse direction 49 dir := align 50 51 if osrc < odst && odst < osrc+w { 52 dir = -dir 53 } 54 55 var dst gc.Node 56 var src gc.Node 57 if n.Ullman >= res.Ullman { 58 gc.Agenr(n, &dst, res) // temporarily use dst 59 gc.Regalloc(&src, gc.Types[gc.Tptr], nil) 60 gins(sparc64.AMOVD, &dst, &src) 61 if res.Op == gc.ONAME { 62 gc.Gvardef(res) 63 } 64 gc.Agen(res, &dst) 65 } else { 66 if res.Op == gc.ONAME { 67 gc.Gvardef(res) 68 } 69 gc.Agenr(res, &dst, res) 70 gc.Agenr(n, &src, nil) 71 } 72 73 var tmp, tmp1 gc.Node 74 gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil) 75 gc.Regalloc(&tmp1, gc.Types[gc.Tptr], nil) 76 77 // set up end marker 78 var nend gc.Node 79 80 // move src and dest to the end of block if necessary 81 if dir < 0 { 82 if c >= 4 { 83 gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) 84 gins(sparc64.AMOVD, &src, &nend) 85 } 86 87 p := gins(sparc64.AADD, nil, &src) 88 p.From.Type = obj.TYPE_CONST 89 p.From.Offset = w 90 91 p = gins(sparc64.AADD, nil, &dst) 92 p.From.Type = obj.TYPE_CONST 93 p.From.Offset = w 94 } else { 95 p := gins(sparc64.AADD, nil, &src) 96 p.From.Type = obj.TYPE_CONST 97 p.From.Offset = int64(-dir) 98 99 p = gins(sparc64.AADD, nil, &dst) 100 p.From.Type = obj.TYPE_CONST 101 p.From.Offset = int64(-dir) 102 103 if c >= 4 { 104 gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) 105 p := gins(sparc64.AMOVD, &src, &nend) 106 p.From.Type = obj.TYPE_ADDR 107 p.From.Offset = w 108 } 109 } 110 111 // move 112 // TODO: enable duffcopy for larger copies. 113 if c >= 4 { 114 // TODO(aram): instead of manually updating both src and dst, update 115 // only the index register and change the comparison. 116 ginscon(sparc64.AMOVD, int64(dir), &tmp1) 117 118 p := gins(op, &src, &tmp) 119 p.From.Type = obj.TYPE_MEM 120 p.From.Index = tmp1.Reg 121 p.From.Scale = 1 122 ploop := p 123 124 p = gins(sparc64.AADD, &tmp1, &src) 125 126 p = gins(op, &tmp, &dst) 127 p.To.Type = obj.TYPE_MEM 128 p.To.Index = tmp1.Reg 129 p.To.Scale = 1 130 131 p = gins(sparc64.AADD, &tmp1, &dst) 132 133 p = gcmp(sparc64.ACMP, &src, &nend) 134 135 gc.Patch(gc.Gbranch(sparc64.ABNED, nil, 0), ploop) 136 gc.Regfree(&nend) 137 } else { 138 // TODO(aram): instead of manually updating both src and dst, update 139 // only the index register. 140 ginscon(sparc64.AMOVD, int64(dir), &tmp1) 141 var p *obj.Prog 142 for ; c > 0; c-- { 143 p = gins(op, &src, &tmp) 144 p.From.Type = obj.TYPE_MEM 145 p.From.Index = tmp1.Reg 146 p.From.Scale = 1 147 148 p = gins(sparc64.AADD, &tmp1, &src) 149 150 p = gins(op, &tmp, &dst) 151 p.To.Type = obj.TYPE_MEM 152 p.To.Index = tmp1.Reg 153 p.To.Scale = 1 154 155 p = gins(sparc64.AADD, &tmp1, &dst) 156 } 157 } 158 159 gc.Regfree(&dst) 160 gc.Regfree(&src) 161 gc.Regfree(&tmp) 162 gc.Regfree(&tmp1) 163 }