github.com/freddyisaac/sicortex-golang@v0.0.0-20231019035217-e03519e66f60/src/runtime/vlop_arm.s (about)

     1  // Inferno's libkern/vlop-arm.s
     2  // https://bitbucket.org/inferno-os/inferno-os/src/default/libkern/vlop-arm.s
     3  //
     4  //         Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
     5  //         Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com).  All rights reserved.
     6  //         Portions Copyright 2009 The Go Authors. All rights reserved.
     7  //
     8  // Permission is hereby granted, free of charge, to any person obtaining a copy
     9  // of this software and associated documentation files (the "Software"), to deal
    10  // in the Software without restriction, including without limitation the rights
    11  // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
    12  // copies of the Software, and to permit persons to whom the Software is
    13  // furnished to do so, subject to the following conditions:
    14  //
    15  // The above copyright notice and this permission notice shall be included in
    16  // all copies or substantial portions of the Software.
    17  //
    18  // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
    19  // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
    20  // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
    21  // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
    22  // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
    23  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
    24  // THE SOFTWARE.
    25  
    26  #include "go_asm.h"
    27  #include "go_tls.h"
    28  #include "funcdata.h"
    29  #include "textflag.h"
    30  
    31  /* replaced use of R10 by R11 because the former can be the data segment base register */
    32  
    33  TEXT _mulv(SB), NOSPLIT, $0
    34  	MOVW	l0+0(FP), R2	/* l0 */
    35  	MOVW	h0+4(FP), R11	/* h0 */
    36  	MOVW	l1+8(FP), R4	/* l1 */
    37  	MOVW	h1+12(FP), R5	/* h1 */
    38  	MULLU	R4, R2, (R7,R6)
    39  	MUL	R11, R4, R8
    40  	ADD	R8, R7
    41  	MUL	R2, R5, R8
    42  	ADD	R8, R7
    43  	MOVW	R6, ret_lo+16(FP)
    44  	MOVW	R7, ret_hi+20(FP)
    45  	RET
    46  
    47  // trampoline for _sfloat2. passes LR as arg0 and
    48  // saves registers R0-R13 and CPSR on the stack. R0-R12 and CPSR flags can
    49  // be changed by _sfloat2.
    50  TEXT _sfloat(SB), NOSPLIT, $68-0 // 4 arg + 14*4 saved regs + cpsr + return value
    51  	MOVW	R14, 4(R13)
    52  	MOVW	R0, 8(R13)
    53  	MOVW	$12(R13), R0
    54  	MOVM.IA.W	[R1-R12], (R0)
    55  	MOVW	$72(R13), R1 // correct for frame size
    56  	MOVW	R1, 60(R13)
    57  	WORD	$0xe10f1000 // mrs r1, cpsr
    58  	MOVW	R1, 64(R13)
    59  	// Disable preemption of this goroutine during _sfloat2 by
    60  	// m->locks++ and m->locks-- around the call.
    61  	// Rescheduling this goroutine may cause the loss of the
    62  	// contents of the software floating point registers in 
    63  	// m->freghi, m->freglo, m->fflag, if the goroutine is moved
    64  	// to a different m or another goroutine runs on this m.
    65  	// Rescheduling at ordinary function calls is okay because
    66  	// all registers are caller save, but _sfloat2 and the things
    67  	// that it runs are simulating the execution of individual
    68  	// program instructions, and those instructions do not expect
    69  	// the floating point registers to be lost.
    70  	// An alternative would be to move the software floating point
    71  	// registers into G, but they do not need to be kept at the 
    72  	// usual places a goroutine reschedules (at function calls),
    73  	// so it would be a waste of 132 bytes per G.
    74  	MOVW	g_m(g), R8
    75  	MOVW	m_locks(R8), R1
    76  	ADD	$1, R1
    77  	MOVW	R1, m_locks(R8)
    78  	MOVW	$1, R1
    79  	MOVW	R1, m_softfloat(R8)
    80  	BL	runtime·_sfloat2(SB)
    81  	MOVW	68(R13), R0
    82  	MOVW	g_m(g), R8
    83  	MOVW	m_locks(R8), R1
    84  	SUB	$1, R1
    85  	MOVW	R1, m_locks(R8)
    86  	MOVW	$0, R1
    87  	MOVW	R1, m_softfloat(R8)
    88  	MOVW	R0, 0(R13)
    89  	MOVW	64(R13), R1
    90  	WORD	$0xe128f001	// msr cpsr_f, r1
    91  	MOVW	$12(R13), R0
    92  	// Restore R1-R12, R0.
    93  	MOVM.IA.W	(R0), [R1-R12]
    94  	MOVW	8(R13), R0
    95  	RET
    96  
    97  // trampoline for _sfloat2 panic.
    98  // _sfloat2 instructs _sfloat to return here.
    99  // We need to push a fake saved LR onto the stack,
   100  // load the signal fault address into LR, and jump
   101  // to the real sigpanic.
   102  // This simulates what sighandler does for a memory fault.
   103  TEXT runtime·_sfloatpanic(SB),NOSPLIT,$-4
   104  	MOVW	$0, R0
   105  	MOVW.W	R0, -4(R13)
   106  	MOVW	g_sigpc(g), LR
   107  	B	runtime·sigpanic(SB)
   108  
   109  // func udiv(n, d uint32) (q, r uint32)
   110  // compiler knowns the register usage of this function
   111  // Reference: 
   112  // Sloss, Andrew et. al; ARM System Developer's Guide: Designing and Optimizing System Software
   113  // Morgan Kaufmann; 1 edition (April 8, 2004), ISBN 978-1558608740
   114  #define Rq	R0 // input d, output q
   115  #define Rr	R1 // input n, output r
   116  #define Rs	R2 // three temporary variables
   117  #define RM	R3
   118  #define Ra	R11
   119  
   120  // Be careful: Ra == R11 will be used by the linker for synthesized instructions.
   121  TEXT udiv(SB),NOSPLIT,$-4
   122  	CLZ 	Rq, Rs // find normalizing shift
   123  	MOVW.S	Rq<<Rs, Ra
   124  	MOVW	$fast_udiv_tab<>-64(SB), RM
   125  	ADD.NE	Ra>>25, RM, Ra // index by most significant 7 bits of divisor
   126  	MOVBU.NE	(Ra), Ra
   127  
   128  	SUB.S	$7, Rs
   129  	RSB 	$0, Rq, RM // M = -q
   130  	MOVW.PL	Ra<<Rs, Rq
   131  
   132  	// 1st Newton iteration
   133  	MUL.PL	RM, Rq, Ra // a = -q*d
   134  	BMI 	udiv_by_large_d
   135  	MULAWT	Ra, Rq, Rq, Rq // q approx q-(q*q*d>>32)
   136  	TEQ 	RM->1, RM // check for d=0 or d=1
   137  
   138  	// 2nd Newton iteration
   139  	MUL.NE	RM, Rq, Ra
   140  	MOVW.NE	$0, Rs
   141  	MULAL.NE Rq, Ra, (Rq,Rs)
   142  	BEQ 	udiv_by_0_or_1
   143  
   144  	// q now accurate enough for a remainder r, 0<=r<3*d
   145  	MULLU	Rq, Rr, (Rq,Rs) // q = (r * q) >> 32
   146  	ADD 	RM, Rr, Rr // r = n - d
   147  	MULA	RM, Rq, Rr, Rr // r = n - (q+1)*d
   148  
   149  	// since 0 <= n-q*d < 3*d; thus -d <= r < 2*d
   150  	CMN 	RM, Rr // t = r-d
   151  	SUB.CS	RM, Rr, Rr // if (t<-d || t>=0) r=r+d
   152  	ADD.CC	$1, Rq
   153  	ADD.PL	RM<<1, Rr
   154  	ADD.PL	$2, Rq
   155  	RET
   156  
   157  udiv_by_large_d:
   158  	// at this point we know d>=2^(31-6)=2^25
   159  	SUB 	$4, Ra, Ra
   160  	RSB 	$0, Rs, Rs
   161  	MOVW	Ra>>Rs, Rq
   162  	MULLU	Rq, Rr, (Rq,Rs)
   163  	MULA	RM, Rq, Rr, Rr
   164  
   165  	// q now accurate enough for a remainder r, 0<=r<4*d
   166  	CMN 	Rr>>1, RM // if(r/2 >= d)
   167  	ADD.CS	RM<<1, Rr
   168  	ADD.CS	$2, Rq
   169  	CMN 	Rr, RM
   170  	ADD.CS	RM, Rr
   171  	ADD.CS	$1, Rq
   172  	RET
   173  
   174  udiv_by_0_or_1:
   175  	// carry set if d==1, carry clear if d==0
   176  	BCC udiv_by_0
   177  	MOVW	Rr, Rq
   178  	MOVW	$0, Rr
   179  	RET
   180  
   181  udiv_by_0:
   182  	MOVW	$runtime·panicdivide(SB), R11
   183  	B	(R11)
   184  
   185  // var tab [64]byte
   186  // tab[0] = 255; for i := 1; i <= 63; i++ { tab[i] = (1<<14)/(64+i) }
   187  // laid out here as little-endian uint32s
   188  DATA fast_udiv_tab<>+0x00(SB)/4, $0xf4f8fcff
   189  DATA fast_udiv_tab<>+0x04(SB)/4, $0xe6eaedf0
   190  DATA fast_udiv_tab<>+0x08(SB)/4, $0xdadde0e3
   191  DATA fast_udiv_tab<>+0x0c(SB)/4, $0xcfd2d4d7
   192  DATA fast_udiv_tab<>+0x10(SB)/4, $0xc5c7cacc
   193  DATA fast_udiv_tab<>+0x14(SB)/4, $0xbcbec0c3
   194  DATA fast_udiv_tab<>+0x18(SB)/4, $0xb4b6b8ba
   195  DATA fast_udiv_tab<>+0x1c(SB)/4, $0xacaeb0b2
   196  DATA fast_udiv_tab<>+0x20(SB)/4, $0xa5a7a8aa
   197  DATA fast_udiv_tab<>+0x24(SB)/4, $0x9fa0a2a3
   198  DATA fast_udiv_tab<>+0x28(SB)/4, $0x999a9c9d
   199  DATA fast_udiv_tab<>+0x2c(SB)/4, $0x93949697
   200  DATA fast_udiv_tab<>+0x30(SB)/4, $0x8e8f9092
   201  DATA fast_udiv_tab<>+0x34(SB)/4, $0x898a8c8d
   202  DATA fast_udiv_tab<>+0x38(SB)/4, $0x85868788
   203  DATA fast_udiv_tab<>+0x3c(SB)/4, $0x81828384
   204  GLOBL fast_udiv_tab<>(SB), RODATA, $64
   205  
   206  // The linker will pass numerator in R8
   207  #define Rn R8
   208  // The linker expects the result in RTMP
   209  #define RTMP R11
   210  
   211  TEXT _divu(SB), NOSPLIT, $16-0
   212  	// It's not strictly true that there are no local pointers.
   213  	// It could be that the saved registers Rq, Rr, Rs, and Rm
   214  	// contain pointers. However, the only way this can matter
   215  	// is if the stack grows (which it can't, udiv is nosplit)
   216  	// or if a fault happens and more frames are added to
   217  	// the stack due to deferred functions.
   218  	// In the latter case, the stack can grow arbitrarily,
   219  	// and garbage collection can happen, and those
   220  	// operations care about pointers, but in that case
   221  	// the calling frame is dead, and so are the saved
   222  	// registers. So we can claim there are no pointers here.
   223  	NO_LOCAL_POINTERS
   224  	MOVW	Rq, 4(R13)
   225  	MOVW	Rr, 8(R13)
   226  	MOVW	Rs, 12(R13)
   227  	MOVW	RM, 16(R13)
   228  
   229  	MOVW	Rn, Rr			/* numerator */
   230  	MOVW	g_m(g), Rq
   231  	MOVW	m_divmod(Rq), Rq	/* denominator */
   232  	BL  	udiv(SB)
   233  	MOVW	Rq, RTMP
   234  	MOVW	4(R13), Rq
   235  	MOVW	8(R13), Rr
   236  	MOVW	12(R13), Rs
   237  	MOVW	16(R13), RM
   238  	RET
   239  
   240  TEXT _modu(SB), NOSPLIT, $16-0
   241  	NO_LOCAL_POINTERS
   242  	MOVW	Rq, 4(R13)
   243  	MOVW	Rr, 8(R13)
   244  	MOVW	Rs, 12(R13)
   245  	MOVW	RM, 16(R13)
   246  
   247  	MOVW	Rn, Rr			/* numerator */
   248  	MOVW	g_m(g), Rq
   249  	MOVW	m_divmod(Rq), Rq	/* denominator */
   250  	BL  	udiv(SB)
   251  	MOVW	Rr, RTMP
   252  	MOVW	4(R13), Rq
   253  	MOVW	8(R13), Rr
   254  	MOVW	12(R13), Rs
   255  	MOVW	16(R13), RM
   256  	RET
   257  
   258  TEXT _div(SB),NOSPLIT,$16-0
   259  	NO_LOCAL_POINTERS
   260  	MOVW	Rq, 4(R13)
   261  	MOVW	Rr, 8(R13)
   262  	MOVW	Rs, 12(R13)
   263  	MOVW	RM, 16(R13)
   264  	MOVW	Rn, Rr			/* numerator */
   265  	MOVW	g_m(g), Rq
   266  	MOVW	m_divmod(Rq), Rq	/* denominator */
   267  	CMP 	$0, Rr
   268  	BGE 	d1
   269  	RSB 	$0, Rr, Rr
   270  	CMP 	$0, Rq
   271  	BGE 	d2
   272  	RSB 	$0, Rq, Rq
   273  d0:
   274  	BL  	udiv(SB)  		/* none/both neg */
   275  	MOVW	Rq, RTMP
   276  	B	out1
   277  d1:
   278  	CMP 	$0, Rq
   279  	BGE 	d0
   280  	RSB 	$0, Rq, Rq
   281  d2:
   282  	BL  	udiv(SB)  		/* one neg */
   283  	RSB	$0, Rq, RTMP
   284  out1:
   285  	MOVW	4(R13), Rq
   286  	MOVW	8(R13), Rr
   287  	MOVW	12(R13), Rs
   288  	MOVW	16(R13), RM
   289  	RET
   290  
   291  TEXT _mod(SB),NOSPLIT,$16-0
   292  	NO_LOCAL_POINTERS
   293  	MOVW	Rq, 4(R13)
   294  	MOVW	Rr, 8(R13)
   295  	MOVW	Rs, 12(R13)
   296  	MOVW	RM, 16(R13)
   297  	MOVW	Rn, Rr			/* numerator */
   298  	MOVW	g_m(g), Rq
   299  	MOVW	m_divmod(Rq), Rq	/* denominator */
   300  	CMP 	$0, Rq
   301  	RSB.LT	$0, Rq, Rq
   302  	CMP 	$0, Rr
   303  	BGE 	m1
   304  	RSB 	$0, Rr, Rr
   305  	BL  	udiv(SB)  		/* neg numerator */
   306  	RSB 	$0, Rr, RTMP
   307  	B   	out
   308  m1:
   309  	BL  	udiv(SB)  		/* pos numerator */
   310  	MOVW	Rr, RTMP
   311  out:
   312  	MOVW	4(R13), Rq
   313  	MOVW	8(R13), Rr
   314  	MOVW	12(R13), Rs
   315  	MOVW	16(R13), RM
   316  	RET
   317  
   318  // _mul64by32 and _div64by32 not implemented on arm
   319  TEXT runtime·_mul64by32(SB), NOSPLIT, $0
   320  	MOVW	$0, R0
   321  	MOVW	(R0), R1 // crash
   322  
   323  TEXT runtime·_div64by32(SB), NOSPLIT, $0
   324  	MOVW	$0, R0
   325  	MOVW	(R0), R1 // crash