github.com/bgentry/go@v0.0.0-20150121062915-6cf5a733d54d/src/runtime/vlop_arm.s (about)

     1  // Inferno's libkern/vlop-arm.s
     2  // http://code.google.com/p/inferno-os/source/browse/libkern/vlop-arm.s
     3  //
     4  //         Copyright © 1994-1999 Lucent Technologies Inc.  All rights reserved.
     5  //         Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com).  All rights reserved.
     6  //         Portions Copyright 2009 The Go Authors. All rights reserved.
     7  //
     8  // Permission is hereby granted, free of charge, to any person obtaining a copy
     9  // of this software and associated documentation files (the "Software"), to deal
    10  // in the Software without restriction, including without limitation the rights
    11  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    12  // copies of the Software, and to permit persons to whom the Software is
    13  // furnished to do so, subject to the following conditions:
    14  //
    15  // The above copyright notice and this permission notice shall be included in
    16  // all copies or substantial portions of the Software.
    17  //
    18  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    19  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    20  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
    21  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    22  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    23  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    24  // THE SOFTWARE.
    25  
    26  #include "go_asm.h"
    27  #include "go_tls.h"
    28  #include "textflag.h"
    29  
    30  arg=0
    31  
    32  /* replaced use of R10 by R11 because the former can be the data segment base register */
    33  
    34  TEXT _mulv(SB), NOSPLIT, $0
    35  	MOVW	l0+0(FP), R2	/* l0 */
    36  	MOVW	h0+4(FP), R11	/* h0 */
    37  	MOVW	l1+8(FP), R4	/* l1 */
    38  	MOVW	h1+12(FP), R5	/* h1 */
    39  	MULLU	R4, R2, (R7,R6)
    40  	MUL	R11, R4, R8
    41  	ADD	R8, R7
    42  	MUL	R2, R5, R8
    43  	ADD	R8, R7
    44  	MOVW	R6, ret_lo+16(FP)
    45  	MOVW	R7, ret_hi+20(FP)
    46  	RET
    47  
    48  // trampoline for _sfloat2. passes LR as arg0 and
    49  // saves registers R0-R13 and CPSR on the stack. R0-R12 and CPSR flags can
    50  // be changed by _sfloat2.
    51  TEXT _sfloat(SB), NOSPLIT, $68-0 // 4 arg + 14*4 saved regs + cpsr + return value
    52  	MOVW	R14, 4(R13)
    53  	MOVW	R0, 8(R13)
    54  	MOVW	$12(R13), R0
    55  	MOVM.IA.W	[R1-R12], (R0)
    56  	MOVW	$72(R13), R1 // correct for frame size
    57  	MOVW	R1, 60(R13)
    58  	WORD	$0xe10f1000 // mrs r1, cpsr
    59  	MOVW	R1, 64(R13)
    60  	// Disable preemption of this goroutine during _sfloat2 by
    61  	// m->locks++ and m->locks-- around the call.
    62  	// Rescheduling this goroutine may cause the loss of the
    63  	// contents of the software floating point registers in 
    64  	// m->freghi, m->freglo, m->fflag, if the goroutine is moved
    65  	// to a different m or another goroutine runs on this m.
    66  	// Rescheduling at ordinary function calls is okay because
    67  	// all registers are caller save, but _sfloat2 and the things
    68  	// that it runs are simulating the execution of individual
    69  	// program instructions, and those instructions do not expect
    70  	// the floating point registers to be lost.
    71  	// An alternative would be to move the software floating point
    72  	// registers into G, but they do not need to be kept at the 
    73  	// usual places a goroutine reschedules (at function calls),
    74  	// so it would be a waste of 132 bytes per G.
    75  	MOVW	g_m(g), R8
    76  	MOVW	m_locks(R8), R1
    77  	ADD	$1, R1
    78  	MOVW	R1, m_locks(R8)
    79  	MOVW	$1, R1
    80  	MOVW	R1, m_softfloat(R8)
    81  	BL	runtime·_sfloat2(SB)
    82  	MOVW	68(R13), R0
    83  	MOVW	g_m(g), R8
    84  	MOVW	m_locks(R8), R1
    85  	SUB	$1, R1
    86  	MOVW	R1, m_locks(R8)
    87  	MOVW	$0, R1
    88  	MOVW	R1, m_softfloat(R8)
    89  	MOVW	R0, 0(R13)
    90  	MOVW	64(R13), R1
    91  	WORD	$0xe128f001	// msr cpsr_f, r1
    92  	MOVW	$12(R13), R0
    93  	// Restore R1-R12, R0.
    94  	MOVM.IA.W	(R0), [R1-R12]
    95  	MOVW	8(R13), R0
    96  	RET
    97  
    98  // trampoline for _sfloat2 panic.
    99  // _sfloat2 instructs _sfloat to return here.
   100  // We need to push a fake saved LR onto the stack,
   101  // load the signal fault address into LR, and jump
   102  // to the real sigpanic.
   103  // This simulates what sighandler does for a memory fault.
   104  TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4
   105  	MOVW	$0, R0
   106  	MOVW.W	R0, -4(R13)
   107  	MOVW	g_sigpc(g), LR
   108  	B	runtime·sigpanic(SB)
   109  
   110  // func udiv(n, d uint32) (q, r uint32)
   111  // Reference: 
   112  // Sloss, Andrew et. al; ARM System Developer's Guide: Designing and Optimizing System Software
   113  // Morgan Kaufmann; 1 edition (April 8, 2004), ISBN 978-1558608740
   114  q = 0 // input d, output q
   115  r = 1 // input n, output r
   116  s = 2 // three temporary variables
   117  M = 3
   118  a = 11
   119  // Be careful: R(a) == R11 will be used by the linker for synthesized instructions.
   120  TEXT udiv<>(SB),NOSPLIT,$-4
   121  	CLZ 	R(q), R(s) // find normalizing shift
   122  	MOVW.S	R(q)<<R(s), R(a)
   123  	MOVW	$fast_udiv_tab<>-64(SB), R(M)
   124  	ADD.NE	R(a)>>25, R(M), R(a) // index by most significant 7 bits of divisor
   125  	MOVBU.NE	(R(a)), R(a)
   126  
   127  	SUB.S	$7, R(s)
   128  	RSB 	$0, R(q), R(M) // M = -q
   129  	MOVW.PL	R(a)<<R(s), R(q)
   130  
   131  	// 1st Newton iteration
   132  	MUL.PL	R(M), R(q), R(a) // a = -q*d
   133  	BMI 	udiv_by_large_d
   134  	MULAWT	R(a), R(q), R(q), R(q) // q approx q-(q*q*d>>32)
   135  	TEQ 	R(M)->1, R(M) // check for d=0 or d=1
   136  
   137  	// 2nd Newton iteration
   138  	MUL.NE	R(M), R(q), R(a)
   139  	MOVW.NE	$0, R(s)
   140  	MULAL.NE R(q), R(a), (R(q),R(s))
   141  	BEQ 	udiv_by_0_or_1
   142  
   143  	// q now accurate enough for a remainder r, 0<=r<3*d
   144  	MULLU	R(q), R(r), (R(q),R(s)) // q = (r * q) >> 32	
   145  	ADD 	R(M), R(r), R(r) // r = n - d
   146  	MULA	R(M), R(q), R(r), R(r) // r = n - (q+1)*d
   147  
   148  	// since 0 <= n-q*d < 3*d; thus -d <= r < 2*d
   149  	CMN 	R(M), R(r) // t = r-d
   150  	SUB.CS	R(M), R(r), R(r) // if (t<-d || t>=0) r=r+d
   151  	ADD.CC	$1, R(q)
   152  	ADD.PL	R(M)<<1, R(r)
   153  	ADD.PL	$2, R(q)
   154  	RET
   155  
   156  udiv_by_large_d:
   157  	// at this point we know d>=2^(31-6)=2^25
   158  	SUB 	$4, R(a), R(a)
   159  	RSB 	$0, R(s), R(s)
   160  	MOVW	R(a)>>R(s), R(q)
   161  	MULLU	R(q), R(r), (R(q),R(s))
   162  	MULA	R(M), R(q), R(r), R(r)
   163  
   164  	// q now accurate enough for a remainder r, 0<=r<4*d
   165  	CMN 	R(r)>>1, R(M) // if(r/2 >= d)
   166  	ADD.CS	R(M)<<1, R(r)
   167  	ADD.CS	$2, R(q)
   168  	CMN 	R(r), R(M)
   169  	ADD.CS	R(M), R(r)
   170  	ADD.CS	$1, R(q)
   171  	RET
   172  
   173  udiv_by_0_or_1:
   174  	// carry set if d==1, carry clear if d==0
   175  	BCC udiv_by_0
   176  	MOVW	R(r), R(q)
   177  	MOVW	$0, R(r)
   178  	RET
   179  
   180  udiv_by_0:
   181  	// The ARM toolchain expects it can emit references to DIV and MOD
   182  	// instructions. The linker rewrites each pseudo-instruction into
   183  	// a sequence that pushes two values onto the stack and then calls
   184  	// _divu, _modu, _div, or _mod (below), all of which have a 16-byte
   185  	// frame plus the saved LR. The traceback routine knows the expanded
   186  	// stack frame size at the pseudo-instruction call site, but it
   187  	// doesn't know that the frame has a non-standard layout. In particular,
   188  	// it expects to find a saved LR in the bottom word of the frame.
   189  	// Unwind the stack back to the pseudo-instruction call site, copy the
   190  	// saved LR where the traceback routine will look for it, and make it
   191  	// appear that panicdivide was called from that PC.
   192  	MOVW	0(R13), LR
   193  	ADD	$20, R13
   194  	MOVW	8(R13), R1 // actual saved LR
   195  	MOVW	R1, 0(R13) // expected here for traceback
   196  	B 	runtime·panicdivide(SB)
   197  
   198  // var tab [64]byte
   199  // tab[0] = 255; for i := 1; i <= 63; i++ { tab[i] = (1<<14)/(64+i) }
   200  // laid out here as little-endian uint32s
   201  DATA fast_udiv_tab<>+0x00(SB)/4, $0xf4f8fcff
   202  DATA fast_udiv_tab<>+0x04(SB)/4, $0xe6eaedf0
   203  DATA fast_udiv_tab<>+0x08(SB)/4, $0xdadde0e3
   204  DATA fast_udiv_tab<>+0x0c(SB)/4, $0xcfd2d4d7
   205  DATA fast_udiv_tab<>+0x10(SB)/4, $0xc5c7cacc
   206  DATA fast_udiv_tab<>+0x14(SB)/4, $0xbcbec0c3
   207  DATA fast_udiv_tab<>+0x18(SB)/4, $0xb4b6b8ba
   208  DATA fast_udiv_tab<>+0x1c(SB)/4, $0xacaeb0b2
   209  DATA fast_udiv_tab<>+0x20(SB)/4, $0xa5a7a8aa
   210  DATA fast_udiv_tab<>+0x24(SB)/4, $0x9fa0a2a3
   211  DATA fast_udiv_tab<>+0x28(SB)/4, $0x999a9c9d
   212  DATA fast_udiv_tab<>+0x2c(SB)/4, $0x93949697
   213  DATA fast_udiv_tab<>+0x30(SB)/4, $0x8e8f9092
   214  DATA fast_udiv_tab<>+0x34(SB)/4, $0x898a8c8d
   215  DATA fast_udiv_tab<>+0x38(SB)/4, $0x85868788
   216  DATA fast_udiv_tab<>+0x3c(SB)/4, $0x81828384
   217  GLOBL fast_udiv_tab<>(SB), RODATA, $64
   218  
   219  // The linker will pass numerator in R(TMP), and it also
   220  // expects the result in R(TMP)
   221  TMP = 11
   222  
   223  TEXT _divu(SB), NOSPLIT, $16
   224  	MOVW	R(q), 4(R13)
   225  	MOVW	R(r), 8(R13)
   226  	MOVW	R(s), 12(R13)
   227  	MOVW	R(M), 16(R13)
   228  
   229  	MOVW	R(TMP), R(r)		/* numerator */
   230  	MOVW	0(FP), R(q) 		/* denominator */
   231  	BL  	udiv<>(SB)
   232  	MOVW	R(q), R(TMP)
   233  	MOVW	4(R13), R(q)
   234  	MOVW	8(R13), R(r)
   235  	MOVW	12(R13), R(s)
   236  	MOVW	16(R13), R(M)
   237  	RET
   238  
   239  TEXT _modu(SB), NOSPLIT, $16
   240  	MOVW	R(q), 4(R13)
   241  	MOVW	R(r), 8(R13)
   242  	MOVW	R(s), 12(R13)
   243  	MOVW	R(M), 16(R13)
   244  
   245  	MOVW	R(TMP), R(r)		/* numerator */
   246  	MOVW	0(FP), R(q) 		/* denominator */
   247  	BL  	udiv<>(SB)
   248  	MOVW	R(r), R(TMP)
   249  	MOVW	4(R13), R(q)
   250  	MOVW	8(R13), R(r)
   251  	MOVW	12(R13), R(s)
   252  	MOVW	16(R13), R(M)
   253  	RET
   254  
   255  TEXT _div(SB),NOSPLIT,$16
   256  	MOVW	R(q), 4(R13)
   257  	MOVW	R(r), 8(R13)
   258  	MOVW	R(s), 12(R13)
   259  	MOVW	R(M), 16(R13)
   260  	MOVW	R(TMP), R(r)		/* numerator */
   261  	MOVW	0(FP), R(q) 		/* denominator */
   262  	CMP 	$0, R(r)
   263  	BGE 	d1
   264  	RSB 	$0, R(r), R(r)
   265  	CMP 	$0, R(q)
   266  	BGE 	d2
   267  	RSB 	$0, R(q), R(q)
   268  d0:
   269  	BL  	udiv<>(SB)  		/* none/both neg */
   270  	MOVW	R(q), R(TMP)
   271  	B		out1
   272  d1:
   273  	CMP 	$0, R(q)
   274  	BGE 	d0
   275  	RSB 	$0, R(q), R(q)
   276  d2:
   277  	BL  	udiv<>(SB)  		/* one neg */
   278  	RSB		$0, R(q), R(TMP)
   279  out1:
   280  	MOVW	4(R13), R(q)
   281  	MOVW	8(R13), R(r)
   282  	MOVW	12(R13), R(s)
   283  	MOVW	16(R13), R(M)
   284  	RET
   285  
   286  TEXT _mod(SB),NOSPLIT,$16
   287  	MOVW	R(q), 4(R13)
   288  	MOVW	R(r), 8(R13)
   289  	MOVW	R(s), 12(R13)
   290  	MOVW	R(M), 16(R13)
   291  	MOVW	R(TMP), R(r)		/* numerator */
   292  	MOVW	0(FP), R(q) 		/* denominator */
   293  	CMP 	$0, R(q)
   294  	RSB.LT	$0, R(q), R(q)
   295  	CMP 	$0, R(r)
   296  	BGE 	m1
   297  	RSB 	$0, R(r), R(r)
   298  	BL  	udiv<>(SB)  		/* neg numerator */
   299  	RSB 	$0, R(r), R(TMP)
   300  	B   	out
   301  m1:
   302  	BL  	udiv<>(SB)  		/* pos numerator */
   303  	MOVW	R(r), R(TMP)
   304  out:
   305  	MOVW	4(R13), R(q)
   306  	MOVW	8(R13), R(r)
   307  	MOVW	12(R13), R(s)
   308  	MOVW	16(R13), R(M)
   309  	RET
   310  
   311  // _mul64by32 and _div64by32 not implemented on arm
   312  TEXT runtime·_mul64by32(SB), NOSPLIT, $0
   313  	MOVW	$0, R0
   314  	MOVW	(R0), R1 // crash
   315  
   316  TEXT runtime·_div64by32(SB), NOSPLIT, $0
   317  	MOVW	$0, R0
   318  	MOVW	(R0), R1 // crash