github.com/aergoio/aergo@v1.3.1/libtool/src/gmp-6.1.2/mpn/arm64/copyd.asm (about)

     1  dnl  ARM64 mpn_copyd.
     2  
     3  dnl  Copyright 2013 Free Software Foundation, Inc.
     4  
     5  dnl  This file is part of the GNU MP Library.
     6  dnl
     7  dnl  The GNU MP Library is free software; you can redistribute it and/or modify
     8  dnl  it under the terms of either:
     9  dnl
    10  dnl    * the GNU Lesser General Public License as published by the Free
    11  dnl      Software Foundation; either version 3 of the License, or (at your
    12  dnl      option) any later version.
    13  dnl
    14  dnl  or
    15  dnl
    16  dnl    * the GNU General Public License as published by the Free Software
    17  dnl      Foundation; either version 2 of the License, or (at your option) any
    18  dnl      later version.
    19  dnl
    20  dnl  or both in parallel, as here.
    21  dnl
    22  dnl  The GNU MP Library is distributed in the hope that it will be useful, but
    23  dnl  WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
    24  dnl  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    25  dnl  for more details.
    26  dnl
    27  dnl  You should have received copies of the GNU General Public License and the
    28  dnl  GNU Lesser General Public License along with the GNU MP Library.  If not,
    29  dnl  see https://www.gnu.org/licenses/.
    30  
    31  include(`../config.m4')
    32  
    33  C	     cycles/limb
    34  C Cortex-A53	 ?
    35  C Cortex-A57	 ?
    36  
    37  changecom(@&*$)
    38  
    39  define(`rp', `x0')
    40  define(`up', `x1')
    41  define(`n',  `x2')
    42  
    43  ASM_START()
    44  PROLOGUE(mpn_copyd)
    45  	add	rp, rp, n, lsl #3
    46  	add	up, up, n, lsl #3
    47  
    48  	cmp	n, #3
    49  	b.le	L(bc)
    50  
    51  C Copy until rp is 128-bit aligned
    52  	tbz	rp, #3, L(al2)
    53  	sub	up, up, #8
    54  	ld1	{v22.1d}, [up]
    55  	sub	n, n, #1
    56  	sub	rp, rp, #8
    57  	st1	{v22.1d}, [rp]
    58  
    59  L(al2):	sub	up, up, #16
    60  	ld1	{v26.2d}, [up]
    61  	subs	n, n, #6
    62  	sub	rp, rp, #16			C offset rp for loop
    63  	b.lt	L(end)
    64  
    65  	sub	up, up, #16			C offset up for loop
    66  	mov	x12, #-16
    67  
    68  	ALIGN(16)
    69  L(top):	ld1	{v22.2d}, [up], x12
    70  	st1	{v26.2d}, [rp], x12
    71  	ld1	{v26.2d}, [up], x12
    72  	st1	{v22.2d}, [rp], x12
    73  	subs	n, n, #4
    74  	b.ge	L(top)
    75  
    76  	add	up, up, #16			C undo up offset
    77  
    78  L(end):	st1	{v26.2d}, [rp]
    79  
    80  C Copy last 0-3 limbs.  Note that rp is aligned after loop, but not when we
    81  C arrive here via L(bc)
    82  L(bc):	tbz	n, #1, L(tl1)
    83  	sub	up, up, #16
    84  	ld1	{v22.2d}, [up]
    85  	sub	rp, rp, #16
    86  	st1	{v22.2d}, [rp]
    87  L(tl1):	tbz	n, #0, L(tl2)
    88  	sub	up, up, #8
    89  	ld1	{v22.1d}, [up]
    90  	sub	rp, rp, #8
    91  	st1	{v22.1d}, [rp]
    92  L(tl2):	ret
    93  EPILOGUE()