github.com/prattmic/llgo-embedded@v0.0.0-20150820070356-41cfecea0e1e/third_party/gofrontend/libffi/src/aarch64/sysv.S (about)

     1  /* Copyright (c) 2009, 2010, 2011, 2012 ARM Ltd.
     2  
     3  Permission is hereby granted, free of charge, to any person obtaining
     4  a copy of this software and associated documentation files (the
     5  ``Software''), to deal in the Software without restriction, including
     6  without limitation the rights to use, copy, modify, merge, publish,
     7  distribute, sublicense, and/or sell copies of the Software, and to
     8  permit persons to whom the Software is furnished to do so, subject to
     9  the following conditions:
    10  
    11  The above copyright notice and this permission notice shall be
    12  included in all copies or substantial portions of the Software.
    13  
    14  THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND,
    15  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
    16  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
    17  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
    18  CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
    19  TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
    20  SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  */
    21  
    22  #define LIBFFI_ASM
    23  #include <fficonfig.h>
    24  #include <ffi.h>
    25  #include <ffi_cfi.h>
    26  #include "internal.h"
    27  
    28  #ifdef HAVE_MACHINE_ASM_H
    29  #include <machine/asm.h>
    30  #else
    31  #ifdef __USER_LABEL_PREFIX__
    32  #define CONCAT1(a, b) CONCAT2(a, b)
    33  #define CONCAT2(a, b) a ## b
    34  
    35  /* Use the right prefix for global labels.  */
    36  #define CNAME(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
    37  #else
    38  #define CNAME(x) x
    39  #endif
    40  #endif
    41  
    42  #ifdef __AARCH64EB__
    43  # define BE(X)	X
    44  #else
    45  # define BE(X)	0
    46  #endif
    47  
    48  	.text
    49  	.align 4
    50  
    51  /* ffi_call_SYSV
    52     extern void ffi_call_SYSV (void *stack, void *frame,
    53  			      void (*fn)(void), void *rvalue,
    54  			      int flags, void *closure);
    55  
    56     Therefore on entry we have:
    57  
    58     x0 stack
    59     x1 frame
    60     x2 fn
    61     x3 rvalue
    62     x4 flags
    63     x5 closure
    64  */
    65  
    66  	cfi_startproc
    67  CNAME(ffi_call_SYSV):
    68  	/* Use a stack frame allocated by our caller.  */
    69  	cfi_def_cfa(x1, 32);
    70  	stp	x29, x30, [x1]
    71  	mov	x29, x1
    72  	mov	sp, x0
    73  	cfi_def_cfa_register(x29)
    74  	cfi_rel_offset (x29, 0)
    75  	cfi_rel_offset (x30, 8)
    76  
    77  	mov	x9, x2			/* save fn */
    78  	mov	x8, x3			/* install structure return */
    79  #ifdef FFI_GO_CLOSURES
    80  	mov	x18, x5			/* install static chain */
    81  #endif
    82  	stp	x3, x4, [x29, #16]	/* save rvalue and flags */
    83  
    84  	/* Load the vector argument passing registers, if necessary.  */
    85  	tbz	w4, #AARCH64_FLAG_ARG_V_BIT, 1f
    86  	ldp     q0, q1, [sp, #0]
    87  	ldp     q2, q3, [sp, #32]
    88  	ldp     q4, q5, [sp, #64]
    89  	ldp     q6, q7, [sp, #96]
    90  1:
    91  	/* Load the core argument passing registers, including
    92  	   the structure return pointer.  */
    93  	ldp     x0, x1, [sp, #16*N_V_ARG_REG + 0]
    94  	ldp     x2, x3, [sp, #16*N_V_ARG_REG + 16]
    95  	ldp     x4, x5, [sp, #16*N_V_ARG_REG + 32]
    96  	ldp     x6, x7, [sp, #16*N_V_ARG_REG + 48]
    97  
    98  	/* Deallocate the context, leaving the stacked arguments.  */
    99  	add	sp, sp, #CALL_CONTEXT_SIZE
   100  
   101  	blr     x9			/* call fn */
   102  
   103  	ldp	x3, x4, [x29, #16]	/* reload rvalue and flags */
   104  
   105  	/* Partially deconstruct the stack frame.  */
   106  	mov     sp, x29
   107  	cfi_def_cfa_register (sp)
   108  	ldp     x29, x30, [x29]
   109  
   110  	/* Save the return value as directed.  */
   111  	adr	x5, 0f
   112  	and	w4, w4, #AARCH64_RET_MASK
   113  	add	x5, x5, x4, lsl #3
   114  	br	x5
   115  
   116  	/* Note that each table entry is 2 insns, and thus 8 bytes.
   117  	   For integer data, note that we're storing into ffi_arg
   118  	   and therefore we want to extend to 64 bits; these types
   119  	   have two consecutive entries allocated for them.  */
   120  	.align	4
   121  0:	ret				/* VOID */
   122  	nop
   123  1:	str	x0, [x3]		/* INT64 */
   124  	ret
   125  2:	stp	x0, x1, [x3]		/* INT128 */
   126  	ret
   127  3:	brk	#1000			/* UNUSED */
   128  	ret
   129  4:	brk	#1000			/* UNUSED */
   130  	ret
   131  5:	brk	#1000			/* UNUSED */
   132  	ret
   133  6:	brk	#1000			/* UNUSED */
   134  	ret
   135  7:	brk	#1000			/* UNUSED */
   136  	ret
   137  8:	st4	{ v0.s-v3.s }[0], [x3]	/* S4 */
   138  	ret
   139  9:	st3	{ v0.s-v2.s }[0], [x3]	/* S3 */
   140  	ret
   141  10:	stp	s0, s1, [x3]		/* S2 */
   142  	ret
   143  11:	str	s0, [x3]		/* S1 */
   144  	ret
   145  12:	st4	{ v0.d-v3.d }[0], [x3]	/* D4 */
   146  	ret
   147  13:	st3	{ v0.d-v2.d }[0], [x3]	/* D3 */
   148  	ret
   149  14:	stp	d0, d1, [x3]		/* D2 */
   150  	ret
   151  15:	str	d0, [x3]		/* D1 */
   152  	ret
   153  16:	str	q3, [x3, #48]		/* Q4 */
   154  	nop
   155  17:	str	q2, [x3, #32]		/* Q3 */
   156  	nop
   157  18:	stp	q0, q1, [x3]		/* Q2 */
   158  	ret
   159  19:	str	q0, [x3]		/* Q1 */
   160  	ret
   161  20:	uxtb	w0, w0			/* UINT8 */
   162  	str	x0, [x3]
   163  21:	ret				/* reserved */
   164  	nop
   165  22:	uxth	w0, w0			/* UINT16 */
   166  	str	x0, [x3]
   167  23:	ret				/* reserved */
   168  	nop
   169  24:	mov	w0, w0			/* UINT32 */
   170  	str	x0, [x3]
   171  25:	ret				/* reserved */
   172  	nop
   173  26:	sxtb	x0, w0			/* SINT8 */
   174  	str	x0, [x3]
   175  27:	ret				/* reserved */
   176  	nop
   177  28:	sxth	x0, w0			/* SINT16 */
   178  	str	x0, [x3]
   179  29:	ret				/* reserved */
   180  	nop
   181  30:	sxtw	x0, w0			/* SINT32 */
   182  	str	x0, [x3]
   183  31:	ret				/* reserved */
   184  	nop
   185  
   186  	cfi_endproc
   187  
   188  	.globl	CNAME(ffi_call_SYSV)
   189  #ifdef __ELF__
   190  	.type	CNAME(ffi_call_SYSV), #function
   191  	.hidden	CNAME(ffi_call_SYSV)
   192  	.size CNAME(ffi_call_SYSV), .-CNAME(ffi_call_SYSV)
   193  #endif
   194  
   195  /* ffi_closure_SYSV
   196  
   197     Closure invocation glue. This is the low level code invoked directly by
   198     the closure trampoline to setup and call a closure.
   199  
   200     On entry x17 points to a struct ffi_closure, x16 has been clobbered
   201     all other registers are preserved.
   202  
   203     We allocate a call context and save the argument passing registers,
   204     then invoked the generic C ffi_closure_SYSV_inner() function to do all
   205     the real work, on return we load the result passing registers back from
   206     the call context.
   207  */
   208  
   209  #define ffi_closure_SYSV_FS (8*2 + CALL_CONTEXT_SIZE + 64)
   210  
   211  	.align 4
   212  CNAME(ffi_closure_SYSV_V):
   213  	cfi_startproc
   214  	stp     x29, x30, [sp, #-ffi_closure_SYSV_FS]!
   215  	cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
   216  	cfi_rel_offset (x29, 0)
   217  	cfi_rel_offset (x30, 8)
   218  
   219  	/* Save the argument passing vector registers.  */
   220  	stp     q0, q1, [sp, #16 + 0]
   221  	stp     q2, q3, [sp, #16 + 32]
   222  	stp     q4, q5, [sp, #16 + 64]
   223  	stp     q6, q7, [sp, #16 + 96]
   224  	b	0f
   225  	cfi_endproc
   226  
   227  	.globl	CNAME(ffi_closure_SYSV_V)
   228  #ifdef __ELF__
   229  	.type	CNAME(ffi_closure_SYSV_V), #function
   230  	.hidden	CNAME(ffi_closure_SYSV_V)
   231  	.size	CNAME(ffi_closure_SYSV_V), . - CNAME(ffi_closure_SYSV_V)
   232  #endif
   233  
   234  	.align	4
   235  	cfi_startproc
   236  CNAME(ffi_closure_SYSV):
   237  	stp     x29, x30, [sp, #-ffi_closure_SYSV_FS]!
   238  	cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
   239  	cfi_rel_offset (x29, 0)
   240  	cfi_rel_offset (x30, 8)
   241  0:
   242  	mov     x29, sp
   243  
   244  	/* Save the argument passing core registers.  */
   245  	stp     x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
   246  	stp     x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
   247  	stp     x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
   248  	stp     x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
   249  
   250  	/* Load ffi_closure_inner arguments.  */
   251  	ldp	x0, x1, [x17, #FFI_TRAMPOLINE_SIZE]	/* load cif, fn */
   252  	ldr	x2, [x17, #FFI_TRAMPOLINE_SIZE+16]	/* load user_data */
   253  .Ldo_closure:
   254  	add	x3, sp, #16				/* load context */
   255  	add	x4, sp, #ffi_closure_SYSV_FS		/* load stack */
   256  	add	x5, sp, #16+CALL_CONTEXT_SIZE		/* load rvalue */
   257  	mov	x6, x8					/* load struct_rval */
   258  	bl      CNAME(ffi_closure_SYSV_inner)
   259  
   260  	/* Load the return value as directed.  */
   261  	adr	x1, 0f
   262  	and	w0, w0, #AARCH64_RET_MASK
   263  	add	x1, x1, x0, lsl #3
   264  	add	x3, sp, #16+CALL_CONTEXT_SIZE
   265  	br	x1
   266  
   267  	/* Note that each table entry is 2 insns, and thus 8 bytes.  */
   268  	.align	4
   269  0:	b	99f			/* VOID */
   270  	nop
   271  1:	ldr	x0, [x3]		/* INT64 */
   272  	b	99f
   273  2:	ldp	x0, x1, [x3]		/* INT128 */
   274  	b	99f
   275  3:	brk	#1000			/* UNUSED */
   276  	nop
   277  4:	brk	#1000			/* UNUSED */
   278  	nop
   279  5:	brk	#1000			/* UNUSED */
   280  	nop
   281  6:	brk	#1000			/* UNUSED */
   282  	nop
   283  7:	brk	#1000			/* UNUSED */
   284  	nop
   285  8:	ldr	s3, [x3, #12]		/* S4 */
   286  	nop
   287  9:	ldr	s2, [x2, #8]		/* S3 */
   288  	nop
   289  10:	ldp	s0, s1, [x3]		/* S2 */
   290  	b	99f
   291  11:	ldr	s0, [x3]		/* S1 */
   292  	b	99f
   293  12:	ldr	d3, [x3, #24]		/* D4 */
   294  	nop
   295  13:	ldr	d2, [x3, #16]		/* D3 */
   296  	nop
   297  14:	ldp	d0, d1, [x3]		/* D2 */
   298  	b	99f
   299  15:	ldr	d0, [x3]		/* D1 */
   300  	b	99f
   301  16:	ldr	q3, [x3, #48]		/* Q4 */
   302  	nop
   303  17:	ldr	q2, [x3, #32]		/* Q3 */
   304  	nop
   305  18:	ldp	q0, q1, [x3]		/* Q2 */
   306  	b	99f
   307  19:	ldr	q0, [x3]		/* Q1 */
   308  	b	99f
   309  20:	ldrb	w0, [x3, #BE(7)]	/* UINT8 */
   310  	b	99f
   311  21:	brk	#1000			/* reserved */
   312  	nop
   313  22:	ldrh	w0, [x3, #BE(6)]	/* UINT16 */
   314  	b	99f
   315  23:	brk	#1000			/* reserved */
   316  	nop
   317  24:	ldr	w0, [x3, #BE(4)]	/* UINT32 */
   318  	b	99f
   319  25:	brk	#1000			/* reserved */
   320  	nop
   321  26:	ldrsb	x0, [x3, #BE(7)]	/* SINT8 */
   322  	b	99f
   323  27:	brk	#1000			/* reserved */
   324  	nop
   325  28:	ldrsh	x0, [x3, #BE(6)]	/* SINT16 */
   326  	b	99f
   327  29:	brk	#1000			/* reserved */
   328  	nop
   329  30:	ldrsw	x0, [x3, #BE(4)]	/* SINT32 */
   330  	nop
   331  31:					/* reserved */
   332  99:	ldp     x29, x30, [sp], #ffi_closure_SYSV_FS
   333  	cfi_adjust_cfa_offset (-ffi_closure_SYSV_FS)
   334  	cfi_restore (x29)
   335  	cfi_restore (x30)
   336  	ret
   337  	cfi_endproc
   338  
   339  	.globl	CNAME(ffi_closure_SYSV)
   340  #ifdef __ELF__
   341  	.type	CNAME(ffi_closure_SYSV), #function
   342  	.hidden	CNAME(ffi_closure_SYSV)
   343  	.size	CNAME(ffi_closure_SYSV), . - CNAME(ffi_closure_SYSV)
   344  #endif
   345  
   346  #ifdef FFI_GO_CLOSURES
   347  	.align 4
   348  CNAME(ffi_go_closure_SYSV_V):
   349  	cfi_startproc
   350  	stp     x29, x30, [sp, #-ffi_closure_SYSV_FS]!
   351  	cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
   352  	cfi_rel_offset (x29, 0)
   353  	cfi_rel_offset (x30, 8)
   354  
   355  	/* Save the argument passing vector registers.  */
   356  	stp     q0, q1, [sp, #16 + 0]
   357  	stp     q2, q3, [sp, #16 + 32]
   358  	stp     q4, q5, [sp, #16 + 64]
   359  	stp     q6, q7, [sp, #16 + 96]
   360  	b	0f
   361  	cfi_endproc
   362  
   363  	.globl	CNAME(ffi_go_closure_SYSV_V)
   364  #ifdef __ELF__
   365  	.type	CNAME(ffi_go_closure_SYSV_V), #function
   366  	.hidden	CNAME(ffi_go_closure_SYSV_V)
   367  	.size	CNAME(ffi_go_closure_SYSV_V), . - CNAME(ffi_go_closure_SYSV_V)
   368  #endif
   369  
   370  	.align	4
   371  	cfi_startproc
   372  CNAME(ffi_go_closure_SYSV):
   373  	stp     x29, x30, [sp, #-ffi_closure_SYSV_FS]!
   374  	cfi_adjust_cfa_offset (ffi_closure_SYSV_FS)
   375  	cfi_rel_offset (x29, 0)
   376  	cfi_rel_offset (x30, 8)
   377  0:
   378  	mov     x29, sp
   379  
   380  	/* Save the argument passing core registers.  */
   381  	stp     x0, x1, [sp, #16 + 16*N_V_ARG_REG + 0]
   382  	stp     x2, x3, [sp, #16 + 16*N_V_ARG_REG + 16]
   383  	stp     x4, x5, [sp, #16 + 16*N_V_ARG_REG + 32]
   384  	stp     x6, x7, [sp, #16 + 16*N_V_ARG_REG + 48]
   385  
   386  	/* Load ffi_closure_inner arguments.  */
   387  	ldp	x0, x1, [x18, #8]			/* load cif, fn */
   388  	mov	x2, x18					/* load user_data */
   389  	b	.Ldo_closure
   390  	cfi_endproc
   391  
   392  	.globl	CNAME(ffi_go_closure_SYSV)
   393  #ifdef __ELF__
   394  	.type	CNAME(ffi_go_closure_SYSV), #function
   395  	.hidden	CNAME(ffi_go_closure_SYSV)
   396  	.size	CNAME(ffi_go_closure_SYSV), . - CNAME(ffi_go_closure_SYSV)
   397  #endif
   398  #endif /* FFI_GO_CLOSURES */
   399  
   400  #if defined __ELF__ && defined __linux__
   401  	.section .note.GNU-stack,"",%progbits
   402  #endif
   403