github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/sm4/gcm_sm4ni_arm64.s (about)

     1  #include "textflag.h"
     2  
     3  #define B0 V0
     4  #define B1 V1
     5  #define B2 V2
     6  #define B3 V3
     7  #define B4 V4
     8  #define B5 V5
     9  #define B6 V6
    10  #define B7 V7
    11  
    12  #define ACC0 V8
    13  #define ACC1 V9
    14  #define ACCM V10
    15  
    16  #define T0 V11
    17  #define T1 V12
    18  #define T2 V13
    19  #define T3 V14
    20  
    21  #define POLY V15
    22  #define ZERO V16
    23  #define INC V17
    24  #define CTR V18
    25  
    26  #define K0 V19
    27  #define K1 V20
    28  #define K2 V21
    29  #define K3 V22
    30  #define K4 V23
    31  #define K5 V24
    32  #define K6 V25
    33  #define K7 V26
    34  
    35  #define reduce() \
    36  	VEOR	ACC0.B16, ACCM.B16, ACCM.B16     \
    37  	VEOR	ACC1.B16, ACCM.B16, ACCM.B16     \
    38  	VEXT	$8, ZERO.B16, ACCM.B16, T0.B16   \
    39  	VEXT	$8, ACCM.B16, ZERO.B16, ACCM.B16 \
    40  	VEOR	ACCM.B16, ACC0.B16, ACC0.B16     \
    41  	VEOR	T0.B16, ACC1.B16, ACC1.B16       \
    42  	VPMULL	POLY.D1, ACC0.D1, T0.Q1          \
    43  	VEXT	$8, ACC0.B16, ACC0.B16, ACC0.B16 \
    44  	VEOR	T0.B16, ACC0.B16, ACC0.B16       \
    45  	VPMULL	POLY.D1, ACC0.D1, T0.Q1          \
    46  	VEOR	T0.B16, ACC1.B16, ACC1.B16       \
    47  	VEXT	$8, ACC1.B16, ACC1.B16, ACC1.B16 \
    48  	VEOR	ACC1.B16, ACC0.B16, ACC0.B16     \
    49  
    50  #define mulRound(X) \
    51  	VLD1.P	32(pTbl), [T1.B16, T2.B16] \
    52  	VREV64	X.B16, X.B16               \
    53  	VEXT	$8, X.B16, X.B16, T0.B16   \
    54  	VEOR	X.B16, T0.B16, T0.B16      \
    55  	VPMULL	X.D1, T1.D1, T3.Q1         \
    56  	VEOR	T3.B16, ACC1.B16, ACC1.B16 \
    57  	VPMULL2	X.D2, T1.D2, T3.Q1         \
    58  	VEOR	T3.B16, ACC0.B16, ACC0.B16 \
    59  	VPMULL	T0.D1, T2.D1, T3.Q1        \
    60  	VEOR	T3.B16, ACCM.B16, ACCM.B16
    61  
    62  #define sm4eEnc1block() \
    63  	WORD $0x6086c0ce         \ //SM4E V0.4S, V19.4S
    64  	WORD $0x8086c0ce         \ //SM4E V0.4S, V20.4S
    65  	WORD $0xa086c0ce         \ //SM4E V0.4S, V21.4S
    66  	WORD $0xc086c0ce         \ //SM4E V0.4S, V22.4S
    67  	WORD $0xe086c0ce         \ //SM4E V0.4S, V23.4S
    68  	WORD $0x0087c0ce         \ //SM4E V0.4S, V24.4S
    69  	WORD $0x2087c0ce         \ //SM4E V0.4S, V25.4S
    70  	WORD $0x4087c0ce           //SM4E V0.4S, V26.4S
    71  
    72  #define sm4eEnc8blocks() \
    73  	sm4eEnc1block()         \
    74  	WORD $0x6186c0ce         \ //SM4E V1.4S, V19.4S
    75  	WORD $0x8186c0ce         \ //SM4E V1.4S, V20.4S
    76  	WORD $0xa186c0ce         \ //SM4E V1.4S, V21.4S
    77  	WORD $0xc186c0ce         \ //SM4E V1.4S, V22.4S
    78  	WORD $0xe186c0ce         \ //SM4E V1.4S, V23.4S
    79  	WORD $0x0187c0ce         \ //SM4E V1.4S, V24.4S
    80  	WORD $0x2187c0ce         \ //SM4E V1.4S, V25.4S
    81  	WORD $0x4187c0ce         \ //SM4E V1.4S, V26.4S
    82  	WORD $0x6286c0ce         \ //SM4E V2.4S, V19.4S
    83  	WORD $0x8286c0ce         \ //SM4E V2.4S, V20.4S
    84  	WORD $0xa286c0ce         \ //SM4E V2.4S, V21.4S
    85  	WORD $0xc286c0ce         \ //SM4E V2.4S, V22.4S
    86  	WORD $0xe286c0ce         \ //SM4E V2.4S, V23.4S
    87  	WORD $0x0287c0ce         \ //SM4E V2.4S, V24.4S
    88  	WORD $0x2287c0ce         \ //SM4E V2.4S, V25.4S
    89  	WORD $0x4287c0ce         \ //SM4E V2.4S, V26.4S
    90  	WORD $0x6386c0ce         \ //SM4E V3.4S, V19.4S
    91  	WORD $0x8386c0ce         \ //SM4E V3.4S, V20.4S
    92  	WORD $0xa386c0ce         \ //SM4E V3.4S, V21.4S
    93  	WORD $0xc386c0ce         \ //SM4E V3.4S, V22.4S
    94  	WORD $0xe386c0ce         \ //SM4E V3.4S, V23.4S
    95  	WORD $0x0387c0ce         \ //SM4E V3.4S, V24.4S
    96  	WORD $0x2387c0ce         \ //SM4E V3.4S, V25.4S
    97  	WORD $0x4387c0ce         \ //SM4E V3.4S, V26.4S
    98  	WORD $0x6486c0ce         \ //SM4E V4.4S, V19.4S
    99  	WORD $0x8486c0ce         \ //SM4E V4.4S, V20.4S
   100  	WORD $0xa486c0ce         \ //SM4E V4.4S, V21.4S
   101  	WORD $0xc486c0ce         \ //SM4E V4.4S, V22.4S
   102  	WORD $0xe486c0ce         \ //SM4E V4.4S, V23.4S
   103  	WORD $0x0487c0ce         \ //SM4E V4.4S, V24.4S
   104  	WORD $0x2487c0ce         \ //SM4E V4.4S, V25.4S
   105  	WORD $0x4487c0ce         \ //SM4E V4.4S, V26.4S
   106  	WORD $0x6586c0ce         \ //SM4E V5.4S, V19.4S
   107  	WORD $0x8586c0ce         \ //SM4E V5.4S, V20.4S
   108  	WORD $0xa586c0ce         \ //SM4E V5.4S, V21.4S
   109  	WORD $0xc586c0ce         \ //SM4E V5.4S, V22.4S
   110  	WORD $0xe586c0ce         \ //SM4E V5.4S, V23.4S
   111  	WORD $0x0587c0ce         \ //SM4E V5.4S, V24.4S
   112  	WORD $0x2587c0ce         \ //SM4E V5.4S, V25.4S
   113  	WORD $0x4587c0ce         \ //SM4E V5.4S, V26.4S
   114  	WORD $0x6686c0ce         \ //SM4E V6.4S, V19.4S
   115  	WORD $0x8686c0ce         \ //SM4E V6.4S, V20.4S
   116  	WORD $0xa686c0ce         \ //SM4E V6.4S, V21.4S
   117  	WORD $0xc686c0ce         \ //SM4E V6.4S, V22.4S
   118  	WORD $0xe686c0ce         \ //SM4E V6.4S, V23.4S
   119  	WORD $0x0687c0ce         \ //SM4E V6.4S, V24.4S
   120  	WORD $0x2687c0ce         \ //SM4E V6.4S, V25.4S
   121  	WORD $0x4687c0ce         \ //SM4E V6.4S, V26.4S
   122  	WORD $0x6786c0ce         \ //SM4E V7.4S, V19.4S
   123  	WORD $0x8786c0ce         \ //SM4E V7.4S, V20.4S
   124  	WORD $0xa786c0ce         \ //SM4E V7.4S, V21.4S
   125  	WORD $0xc786c0ce         \ //SM4E V7.4S, V22.4S
   126  	WORD $0xe786c0ce         \ //SM4E V7.4S, V23.4S
   127  	WORD $0x0787c0ce         \ //SM4E V7.4S, V24.4S
   128  	WORD $0x2787c0ce         \ //SM4E V7.4S, V25.4S
   129  	WORD $0x4787c0ce           //SM4E V7.4S, V26.4S    
   130  
   131  // func gcmSm4niEnc(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, rk []uint32)
   132  TEXT ·gcmSm4niEnc(SB),NOSPLIT,$0
   133  #define pTbl R0
   134  #define dstPtr R1
   135  #define ctrPtr R2
   136  #define srcPtr R3
   137  #define rk R4
   138  #define tPtr R5
   139  #define srcPtrLen R6
   140  #define aluCTR R7
   141  #define aluTMP R8
   142  #define H0 R9
   143  #define H1 R10
   144  #define pTblSave R11
   145  #define rkSave R12
   146  	MOVD	productTable+0(FP), pTbl
   147  	MOVD	dst+8(FP), dstPtr
   148  	MOVD	src_base+32(FP), srcPtr
   149  	MOVD	src_len+40(FP), srcPtrLen
   150  	MOVD	ctr+56(FP), ctrPtr
   151  	MOVD	T+64(FP), tPtr
   152  	MOVD	rk_base+72(FP), rk
   153  	
   154  	MOVD	$0xC2, H1
   155  	LSL	$56, H1
   156  	MOVD	$1, H0
   157  	VMOV	H1, POLY.D[0]
   158  	VMOV	H0, POLY.D[1]
   159  	VEOR	ZERO.B16, ZERO.B16, ZERO.B16
   160  
   161  	MOVD	pTbl, pTblSave
   162  	// Current tag, after AAD
   163  	VLD1	(tPtr), [ACC0.B16]
   164  	VEOR	ACC1.B16, ACC1.B16, ACC1.B16
   165  	VEOR	ACCM.B16, ACCM.B16, ACCM.B16
   166  	// Prepare initial counter, and the increment vector
   167  	VLD1	(ctrPtr), [CTR.B16]
   168  	VEOR	INC.B16, INC.B16, INC.B16
   169  	MOVD	$1, H0
   170  	VMOV	H0, INC.S[3]
   171  	VREV32	CTR.B16, CTR.B16
   172  	VADD	CTR.S4, INC.S4, CTR.S4
   173  
   174  	// Skip to <8 blocks loop
   175  	CMP	$128, srcPtrLen
   176  
   177  	MOVD	rk, H0
   178  	// For SM4 round keys are stored in: K0 .. K7
   179  	VLD1.P	64(H0), [K0.S4, K1.S4, K2.S4, K3.S4]
   180  	VLD1.P	64(H0), [K4.S4, K5.S4, K6.S4, K7.S4]
   181  
   182  	BLT	startSingles
   183  octetsLoop:
   184  		SUB	$128, srcPtrLen
   185  		// Prepare 8 counters
   186  		VMOV	CTR.B16, B0.B16
   187  		VADD	B0.S4, INC.S4, B1.S4
   188  		VADD	B1.S4, INC.S4, B2.S4
   189  		VADD	B2.S4, INC.S4, B3.S4
   190  		VADD	B3.S4, INC.S4, B4.S4
   191  		VADD	B4.S4, INC.S4, B5.S4
   192  		VADD	B5.S4, INC.S4, B6.S4
   193  		VADD	B6.S4, INC.S4, B7.S4
   194  		VADD	B7.S4, INC.S4, CTR.S4
   195  
   196  		sm4eEnc8blocks()
   197  		VREV32 B0.B16, B0.B16
   198  		VREV32 B1.B16, B1.B16
   199  		VREV32 B2.B16, B2.B16
   200  		VREV32 B3.B16, B3.B16
   201  		VREV32 B4.B16, B4.B16
   202  		VREV32 B5.B16, B5.B16
   203  		VREV32 B6.B16, B6.B16
   204  		VREV32 B7.B16, B7.B16
   205  
   206  		// XOR plaintext and store ciphertext
   207  		VLD1.P	32(srcPtr), [T1.B16, T2.B16]
   208  		VEOR	B0.B16, T1.B16, B0.B16
   209  		VEOR	B1.B16, T2.B16, B1.B16
   210  		VST1.P  [B0.B16, B1.B16], 32(dstPtr)
   211  		VLD1.P	32(srcPtr), [T1.B16, T2.B16]
   212  		VEOR	B2.B16, T1.B16, B2.B16
   213  		VEOR	B3.B16, T2.B16, B3.B16
   214  		VST1.P  [B2.B16, B3.B16], 32(dstPtr)
   215  		VLD1.P	32(srcPtr), [T1.B16, T2.B16]
   216  		VEOR	B4.B16, T1.B16, B4.B16
   217  		VEOR	B5.B16, T2.B16, B5.B16
   218  		VST1.P  [B4.B16, B5.B16], 32(dstPtr)
   219  		VLD1.P	32(srcPtr), [T1.B16, T2.B16]
   220  		VEOR	B6.B16, T1.B16, B6.B16
   221  		VEOR	B7.B16, T2.B16, B7.B16
   222  		VST1.P  [B6.B16, B7.B16], 32(dstPtr)
   223  
   224  		VLD1.P	32(pTbl), [T1.B16, T2.B16]
   225  		VREV64	B0.B16, B0.B16
   226  		VEOR	ACC0.B16, B0.B16, B0.B16
   227  		VEXT	$8, B0.B16, B0.B16, T0.B16
   228  		VEOR	B0.B16, T0.B16, T0.B16
   229  		VPMULL	B0.D1, T1.D1, ACC1.Q1
   230  		VPMULL2	B0.D2, T1.D2, ACC0.Q1
   231  		VPMULL	T0.D1, T2.D1, ACCM.Q1
   232  
   233  		mulRound(B1)
   234  		mulRound(B2)
   235  		mulRound(B3)
   236  		mulRound(B4)
   237  		mulRound(B5)
   238  		mulRound(B6)
   239  		mulRound(B7)
   240  		MOVD	pTblSave, pTbl
   241  		reduce()
   242  
   243  		CMP	$128, srcPtrLen
   244  		BGE	octetsLoop
   245  
   246  startSingles:
   247  	CBZ	srcPtrLen, done
   248  	ADD	$14*16, pTbl
   249  	// Preload H and its Karatsuba precomp
   250  	VLD1.P	(pTbl), [T1.B16, T2.B16]
   251  
   252  singlesLoop:
   253  		CMP	$16, srcPtrLen
   254  		BLT	tail
   255  		SUB	$16, srcPtrLen
   256  
   257  		VMOV	CTR.B16, B0.B16
   258  		VADD	CTR.S4, INC.S4, CTR.S4
   259  		sm4eEnc1block()
   260  		VREV32 B0.B16, B0.B16
   261  
   262  singlesLast:
   263  		VLD1.P	16(srcPtr), [T0.B16]
   264  		VEOR	T0.B16, B0.B16, B0.B16
   265  
   266  encReduce:
   267  		VST1.P	[B0.B16], 16(dstPtr)
   268  
   269  		VREV64	B0.B16, B0.B16
   270  		VEOR	ACC0.B16, B0.B16, B0.B16
   271  
   272  		VEXT	$8, B0.B16, B0.B16, T0.B16
   273  		VEOR	B0.B16, T0.B16, T0.B16
   274  		VPMULL	B0.D1, T1.D1, ACC1.Q1
   275  		VPMULL2	B0.D2, T1.D2, ACC0.Q1
   276  		VPMULL	T0.D1, T2.D1, ACCM.Q1
   277  
   278  		reduce()
   279  
   280  	B	singlesLoop
   281  tail:
   282  	CBZ	srcPtrLen, done
   283  
   284  	VEOR	T0.B16, T0.B16, T0.B16
   285  	VEOR	T3.B16, T3.B16, T3.B16
   286  	MOVD	$0, H1
   287  	SUB	$1, H1
   288  	ADD	srcPtrLen, srcPtr
   289  
   290  	TBZ	$3, srcPtrLen, ld4
   291  	MOVD.W	-8(srcPtr), H0
   292  	VMOV	H0, T0.D[0]
   293  	VMOV	H1, T3.D[0]
   294  
   295  ld4:
   296  	TBZ	$2, srcPtrLen, ld2
   297  	MOVW.W	-4(srcPtr), H0
   298  	VEXT	$12, T0.B16, ZERO.B16, T0.B16
   299  	VEXT	$12, T3.B16, ZERO.B16, T3.B16
   300  	VMOV	H0, T0.S[0]
   301  	VMOV	H1, T3.S[0]
   302  ld2:
   303  	TBZ	$1, srcPtrLen, ld1
   304  	MOVH.W	-2(srcPtr), H0
   305  	VEXT	$14, T0.B16, ZERO.B16, T0.B16
   306  	VEXT	$14, T3.B16, ZERO.B16, T3.B16
   307  	VMOV	H0, T0.H[0]
   308  	VMOV	H1, T3.H[0]
   309  ld1:
   310  	TBZ	$0, srcPtrLen, ld0
   311  	MOVB.W	-1(srcPtr), H0
   312  	VEXT	$15, T0.B16, ZERO.B16, T0.B16
   313  	VEXT	$15, T3.B16, ZERO.B16, T3.B16
   314  	VMOV	H0, T0.B[0]
   315  	VMOV	H1, T3.B[0]
   316  ld0:
   317  	MOVD	ZR, srcPtrLen
   318  	VMOV	CTR.B16, B0.B16
   319  	sm4eEnc1block()
   320  	VREV32 B0.B16, B0.B16
   321  
   322  tailLast:
   323  	VEOR	T0.B16, B0.B16, B0.B16
   324  	VAND	T3.B16, B0.B16, B0.B16
   325  	B	encReduce
   326  
   327  done:
   328  	VST1	[ACC0.B16], (tPtr)
   329  	RET
   330  
   331  // func gcmSm4niDec(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, rk []uint32)
   332  TEXT ·gcmSm4niDec(SB),NOSPLIT,$0
   333  	MOVD	productTable+0(FP), pTbl
   334  	MOVD	dst+8(FP), dstPtr
   335  	MOVD	src_base+32(FP), srcPtr
   336  	MOVD	src_len+40(FP), srcPtrLen
   337  	MOVD	ctr+56(FP), ctrPtr
   338  	MOVD	T+64(FP), tPtr
   339  	MOVD	rk_base+72(FP), rk
   340  
   341  	MOVD	$0xC2, H1
   342  	LSL	$56, H1
   343  	MOVD	$1, H0
   344  	VMOV	H1, POLY.D[0]
   345  	VMOV	H0, POLY.D[1]
   346  	VEOR	ZERO.B16, ZERO.B16, ZERO.B16
   347  
   348  	MOVD	pTbl, pTblSave
   349  	MOVD rk, rkSave
   350  	// Current tag, after AAD
   351  	VLD1	(tPtr), [ACC0.B16]
   352  	VEOR	ACC1.B16, ACC1.B16, ACC1.B16
   353  	VEOR	ACCM.B16, ACCM.B16, ACCM.B16
   354  	// Prepare initial counter, and the increment vector
   355  	VLD1	(ctrPtr), [CTR.B16]
   356  	VEOR	INC.B16, INC.B16, INC.B16
   357  	MOVD	$1, H0
   358  	VMOV	H0, INC.S[3]
   359  	VREV32	CTR.B16, CTR.B16
   360  	VADD	CTR.S4, INC.S4, CTR.S4
   361  
   362  	// Skip to <8 blocks loop
   363  	CMP	$128, srcPtrLen
   364  
   365  	MOVD	rk, H0
   366  	// For SM4 round keys are stored in: K0 .. K7
   367  	VLD1.P	64(H0), [K0.S4, K1.S4, K2.S4, K3.S4]
   368  	VLD1.P	64(H0), [K4.S4, K5.S4, K6.S4, K7.S4]
   369  
   370  	BLT	startSingles
   371  octetsLoop:
   372  		SUB	$128, srcPtrLen
   373  
   374  		VMOV	CTR.B16, B0.B16
   375  		VADD	B0.S4, INC.S4, B1.S4
   376  		VADD	B1.S4, INC.S4, B2.S4
   377  		VADD	B2.S4, INC.S4, B3.S4
   378  		VADD	B3.S4, INC.S4, B4.S4
   379  		VADD	B4.S4, INC.S4, B5.S4
   380  		VADD	B5.S4, INC.S4, B6.S4
   381  		VADD	B6.S4, INC.S4, B7.S4
   382  		VADD	B7.S4, INC.S4, CTR.S4
   383  
   384  		sm4eEnc8blocks()      
   385  		VREV32 B0.B16, T1.B16
   386  		VREV32 B1.B16, T2.B16
   387  		VREV32 B2.B16, B2.B16
   388  		VREV32 B3.B16, B3.B16
   389  		VREV32 B4.B16, B4.B16
   390  		VREV32 B5.B16, B5.B16
   391  		VREV32 B6.B16, B6.B16
   392  		VREV32 B7.B16, B7.B16
   393  
   394  		VLD1.P	32(srcPtr), [B0.B16, B1.B16]
   395  		VEOR	B0.B16, T1.B16, T1.B16
   396  		VEOR	B1.B16, T2.B16, T2.B16
   397  		VST1.P  [T1.B16, T2.B16], 32(dstPtr)
   398  
   399  		VLD1.P	32(pTbl), [T1.B16, T2.B16]
   400  		VREV64	B0.B16, B0.B16
   401  		VEOR	ACC0.B16, B0.B16, B0.B16
   402  		VEXT	$8, B0.B16, B0.B16, T0.B16
   403  		VEOR	B0.B16, T0.B16, T0.B16
   404  		VPMULL	B0.D1, T1.D1, ACC1.Q1
   405  		VPMULL2	B0.D2, T1.D2, ACC0.Q1
   406  		VPMULL	T0.D1, T2.D1, ACCM.Q1
   407  		mulRound(B1)
   408  
   409  		VLD1.P	32(srcPtr), [B0.B16, B1.B16]
   410  		VEOR	B2.B16, B0.B16, T1.B16
   411  		VEOR	B3.B16, B1.B16, T2.B16
   412  		VST1.P  [T1.B16, T2.B16], 32(dstPtr)
   413  		mulRound(B0)
   414  		mulRound(B1)
   415  
   416  		VLD1.P	32(srcPtr), [B0.B16, B1.B16]
   417  		VEOR	B4.B16, B0.B16, T1.B16
   418  		VEOR	B5.B16, B1.B16, T2.B16
   419  		VST1.P  [T1.B16, T2.B16], 32(dstPtr)
   420  		mulRound(B0)
   421  		mulRound(B1)
   422  
   423  		VLD1.P	32(srcPtr), [B0.B16, B1.B16]
   424  		VEOR	B6.B16, B0.B16, T1.B16
   425  		VEOR	B7.B16, B1.B16, T2.B16
   426  		VST1.P  [T1.B16, T2.B16], 32(dstPtr)
   427  		mulRound(B0)
   428  		mulRound(B1)
   429  
   430  		MOVD	pTblSave, pTbl
   431  		reduce()
   432  
   433  		CMP	$128, srcPtrLen
   434  		BGE	octetsLoop
   435  
   436  startSingles:
   437  	CBZ	srcPtrLen, done
   438  	ADD	$14*16, pTbl
   439  	// Preload H and its Karatsuba precomp
   440  	VLD1.P	(pTbl), [T1.B16, T2.B16]
   441  
   442  singlesLoop:
   443  		CMP	$16, srcPtrLen
   444  		BLT	tail
   445  		SUB	$16, srcPtrLen
   446  
   447  		VLD1.P	16(srcPtr), [T0.B16]
   448  		VREV64	T0.B16, B5.B16
   449  
   450  		VMOV	CTR.B16, B0.B16
   451  		VADD	CTR.S4, INC.S4, CTR.S4
   452  		sm4eEnc1block()
   453  		VREV32 B0.B16, B0.B16
   454  
   455  singlesLast:
   456  		VEOR	T0.B16, B0.B16, B0.B16
   457  		VST1.P	[B0.B16], 16(dstPtr)
   458  
   459  		VEOR	ACC0.B16, B5.B16, B5.B16
   460  		VEXT	$8, B5.B16, B5.B16, T0.B16
   461  		VEOR	B5.B16, T0.B16, T0.B16
   462  		VPMULL	B5.D1, T1.D1, ACC1.Q1
   463  		VPMULL2	B5.D2, T1.D2, ACC0.Q1
   464  		VPMULL	T0.D1, T2.D1, ACCM.Q1
   465  		reduce()
   466  
   467  	B	singlesLoop        
   468  tail:
   469  	CBZ	srcPtrLen, done
   470  	VMOV	CTR.B16, B0.B16
   471  	VADD	CTR.S4, INC.S4, CTR.S4
   472  	sm4eEnc1block()
   473  	VREV32 B0.B16, B0.B16    
   474  tailLast:
   475  	// Assuming it is safe to load past dstPtr due to the presence of the tag
   476  	// B5 stored last ciphertext
   477  	VLD1	(srcPtr), [B5.B16]
   478  
   479  	VEOR	B5.B16, B0.B16, B0.B16
   480  
   481  	VEOR	T3.B16, T3.B16, T3.B16
   482  	MOVD	$0, H1
   483  	SUB	$1, H1
   484  
   485  	TBZ	$3, srcPtrLen, ld4 // Test if srcPtrLen < 8, if yes, goto ld4
   486  	VMOV	B0.D[0], H0
   487  	MOVD.P	H0, 8(dstPtr)
   488  	VMOV	H1, T3.D[0]
   489  	VEXT	$8, ZERO.B16, B0.B16, B0.B16
   490  ld4:
   491  	TBZ	$2, srcPtrLen, ld2 // Test if srcPtrLen < 4, if yes, goto ld2
   492  	VMOV	B0.S[0], H0
   493  	MOVW.P	H0, 4(dstPtr)
   494  	VEXT	$12, T3.B16, ZERO.B16, T3.B16
   495  	VMOV	H1, T3.S[0]
   496  	VEXT	$4, ZERO.B16, B0.B16, B0.B16
   497  ld2:
   498  	TBZ	$1, srcPtrLen, ld1 // Test if srcPtrLen < 2, if yes, goto ld1
   499  	VMOV	B0.H[0], H0
   500  	MOVH.P	H0, 2(dstPtr)
   501  	VEXT	$14, T3.B16, ZERO.B16, T3.B16
   502  	VMOV	H1, T3.H[0]
   503  	VEXT	$2, ZERO.B16, B0.B16, B0.B16
   504  ld1:
   505  	TBZ	$0, srcPtrLen, ld0 // Test if srcPtrLen < 1, if yes, goto ld0
   506  	VMOV	B0.B[0], H0
   507  	MOVB.P	H0, 1(dstPtr)
   508  	VEXT	$15, T3.B16, ZERO.B16, T3.B16
   509  	VMOV	H1, T3.B[0]
   510  ld0:
   511  
   512  	VAND	T3.B16, B5.B16, B5.B16
   513  	VREV64	B5.B16, B5.B16
   514  
   515  	VEOR	ACC0.B16, B5.B16, B5.B16
   516  	VEXT	$8, B5.B16, B5.B16, T0.B16
   517  	VEOR	B5.B16, T0.B16, T0.B16
   518  	VPMULL	B5.D1, T1.D1, ACC1.Q1
   519  	VPMULL2	B5.D2, T1.D2, ACC0.Q1
   520  	VPMULL	T0.D1, T2.D1, ACCM.Q1
   521  	reduce()
   522  done:
   523  	VST1	[ACC0.B16], (tPtr)
   524  
   525  	RET