github.com/sean-/go@v0.0.0-20151219100004-97f854cd7bb6/src/cmd/compile/internal/mips64/cgen.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package mips64 6 7 import ( 8 "cmd/compile/internal/gc" 9 "cmd/internal/obj" 10 "cmd/internal/obj/mips" 11 ) 12 13 func blockcopy(n, res *gc.Node, osrc, odst, w int64) { 14 // determine alignment. 15 // want to avoid unaligned access, so have to use 16 // smaller operations for less aligned types. 17 // for example moving [4]byte must use 4 MOVB not 1 MOVW. 18 align := int(n.Type.Align) 19 20 var op int 21 switch align { 22 default: 23 gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type) 24 25 case 1: 26 op = mips.AMOVB 27 28 case 2: 29 op = mips.AMOVH 30 31 case 4: 32 op = mips.AMOVW 33 34 case 8: 35 op = mips.AMOVV 36 } 37 38 if w%int64(align) != 0 { 39 gc.Fatalf("sgen: unaligned size %d (align=%d) for %v", w, align, n.Type) 40 } 41 c := int32(w / int64(align)) 42 43 // if we are copying forward on the stack and 44 // the src and dst overlap, then reverse direction 45 dir := align 46 47 if osrc < odst && int64(odst) < int64(osrc)+w { 48 dir = -dir 49 } 50 51 var dst gc.Node 52 var src gc.Node 53 if n.Ullman >= res.Ullman { 54 gc.Agenr(n, &dst, res) // temporarily use dst 55 gc.Regalloc(&src, gc.Types[gc.Tptr], nil) 56 gins(mips.AMOVV, &dst, &src) 57 if res.Op == gc.ONAME { 58 gc.Gvardef(res) 59 } 60 gc.Agen(res, &dst) 61 } else { 62 if res.Op == gc.ONAME { 63 gc.Gvardef(res) 64 } 65 gc.Agenr(res, &dst, res) 66 gc.Agenr(n, &src, nil) 67 } 68 69 var tmp gc.Node 70 gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil) 71 72 // set up end marker 73 var nend gc.Node 74 75 // move src and dest to the end of block if necessary 76 if dir < 0 { 77 if c >= 4 { 78 gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) 79 gins(mips.AMOVV, &src, &nend) 80 } 81 82 p := gins(mips.AADDV, nil, &src) 83 p.From.Type = obj.TYPE_CONST 84 p.From.Offset = w 85 86 p = gins(mips.AADDV, nil, &dst) 87 p.From.Type = obj.TYPE_CONST 88 p.From.Offset = w 89 } else { 90 p := gins(mips.AADDV, nil, &src) 91 p.From.Type = obj.TYPE_CONST 92 p.From.Offset = int64(-dir) 93 94 p = gins(mips.AADDV, nil, &dst) 95 p.From.Type = obj.TYPE_CONST 96 p.From.Offset = int64(-dir) 97 98 if c >= 4 { 99 gc.Regalloc(&nend, gc.Types[gc.Tptr], nil) 100 p := gins(mips.AMOVV, &src, &nend) 101 p.From.Type = obj.TYPE_ADDR 102 p.From.Offset = w 103 } 104 } 105 106 // move 107 // TODO: enable duffcopy for larger copies. 108 if c >= 4 { 109 p := gins(op, &src, &tmp) 110 p.From.Type = obj.TYPE_MEM 111 p.From.Offset = int64(dir) 112 ploop := p 113 114 p = gins(mips.AADDV, nil, &src) 115 p.From.Type = obj.TYPE_CONST 116 p.From.Offset = int64(dir) 117 118 p = gins(op, &tmp, &dst) 119 p.To.Type = obj.TYPE_MEM 120 p.To.Offset = int64(dir) 121 122 p = gins(mips.AADDV, nil, &dst) 123 p.From.Type = obj.TYPE_CONST 124 p.From.Offset = int64(dir) 125 126 gc.Patch(ginsbranch(mips.ABNE, nil, &src, &nend, 0), ploop) 127 gc.Regfree(&nend) 128 } else { 129 // TODO: Instead of generating ADDV $-8,R8; ADDV 130 // $-8,R7; n*(MOVV 8(R8),R9; ADDV $8,R8; MOVV R9,8(R7); 131 // ADDV $8,R7;) just generate the offsets directly and 132 // eliminate the ADDs. That will produce shorter, more 133 // pipeline-able code. 134 var p *obj.Prog 135 for ; c > 0; c-- { 136 p = gins(op, &src, &tmp) 137 p.From.Type = obj.TYPE_MEM 138 p.From.Offset = int64(dir) 139 140 p = gins(mips.AADDV, nil, &src) 141 p.From.Type = obj.TYPE_CONST 142 p.From.Offset = int64(dir) 143 144 p = gins(op, &tmp, &dst) 145 p.To.Type = obj.TYPE_MEM 146 p.To.Offset = int64(dir) 147 148 p = gins(mips.AADDV, nil, &dst) 149 p.From.Type = obj.TYPE_CONST 150 p.From.Offset = int64(dir) 151 } 152 } 153 154 gc.Regfree(&dst) 155 gc.Regfree(&src) 156 gc.Regfree(&tmp) 157 }