github.com/emmansun/gmsm@v0.29.1/sm4/aesni_macros_ppc64x.s (about)

     1  #define LOAD_CONSTS(baseAddrReg, offsetReg) \
     2  	LXVD2X (baseAddrReg)(R0), M0; \
     3  	MOVD $0x10, offsetReg; \
     4  	LXVD2X (baseAddrReg)(offsetReg), M1; \
     5  	MOVD $0x20, offsetReg; \
     6  	LXVD2X (baseAddrReg)(offsetReg), M2; \
     7  	MOVD $0x30, offsetReg; \
     8  	LXVD2X (baseAddrReg)(offsetReg), M3; \
     9  	MOVD $0x40, offsetReg; \
    10  	LXVD2X (baseAddrReg)(offsetReg), REVERSE_WORDS; \
    11  	MOVD $0x50, offsetReg; \
    12  	LXVD2X (baseAddrReg)(offsetReg), NIBBLE_MASK; \
    13  	MOVD $0x70, offsetReg; \
    14  	LXVD2X (baseAddrReg)(offsetReg), M1L; \
    15  	MOVD $0x80, offsetReg; \
    16  	LXVD2X (baseAddrReg)(offsetReg), M1H; \
    17  	MOVD $0x90, offsetReg; \
    18  	LXVD2X (baseAddrReg)(offsetReg), M2L; \
    19  	MOVD $0xa0, offsetReg; \
    20  	LXVD2X (baseAddrReg)(offsetReg), M2H
    21  
    22  #ifdef GOARCH_ppc64le
    23  #define NEEDS_PERMW
    24  
    25  #define PPC64X_LXVW4X(RA,RB,VT) \
    26  	LXVW4X	(RA+RB), VT \
    27  	VPERM	VT, VT, ESPERMW, VT
    28  
    29  #define PPC64X_STXVW4X(VS, RA, RB) \
    30  	VPERM	VS, VS, ESPERMW, VS \
    31  	STXVW4X	VS, (RA+RB)
    32  
    33  #else
    34  #define PPC64X_LXVW4X(RA,RB,VT)  LXVW4X	(RA+RB), VT
    35  #define PPC64X_STXVW4X(VS, RA, RB) STXVW4X	VS, (RA+RB)
    36  #endif // defined(GOARCH_ppc64le)
    37  
    38  // r = s <<< n
    39  // Due to VSPLTISW's limitation, the n MUST be [0, 15],
    40  // If n > 15, we have to call it multiple times.
    41  // VSPLTISW takes a 5-bit immediate value as an operand.
    42  // I also did NOT find one vector instruction to use immediate value for ROTL.
    43  #define PROLD(s, r, tmp, n) \
    44  	VSPLTISW $n, tmp \
    45  	VRLW	s, tmp, r
    46  
    47  // input: from high to low
    48  // t0 = t0.S3, t0.S2, t0.S1, t0.S0
    49  // t1 = t1.S3, t1.S2, t1.S1, t1.S0
    50  // t2 = t2.S3, t2.S2, t2.S1, t2.S0
    51  // t3 = t3.S3, t3.S2, t3.S1, t3.S0
    52  // output: from high to low
    53  // t0 = t3.S0, t2.S0, t1.S0, t0.S0
    54  // t1 = t3.S1, t2.S1, t1.S1, t0.S1
    55  // t2 = t3.S2, t2.S2, t1.S2, t0.S2
    56  // t3 = t3.S3, t2.S3, t1.S3, t0.S3
    57  #define PRE_TRANSPOSE_MATRIX(T0, T1, T2, T3) \
    58  	VPERM T0, T1, M0, TMP0; \
    59  	VPERM T2, T3, M0, TMP1; \
    60  	VPERM T0, T1, M1, TMP2; \
    61  	VPERM T2, T3, M1, TMP3; \
    62  	VPERM TMP0, TMP1, M2, T0; \
    63  	VPERM TMP0, TMP1, M3, T1; \
    64  	VPERM TMP2, TMP3, M2, T2; \
    65  	VPERM TMP2, TMP3, M3, T3
    66  
    67  // input: from high to low
    68  // t0 = t0.S3, t0.S2, t0.S1, t0.S0
    69  // t1 = t1.S3, t1.S2, t1.S1, t1.S0
    70  // t2 = t2.S3, t2.S2, t2.S1, t2.S0
    71  // t3 = t3.S3, t3.S2, t3.S1, t3.S0
    72  // output: from high to low
    73  // t0 = t0.S0, t1.S0, t2.S0, t3.S0
    74  // t1 = t0.S1, t1.S1, t2.S1, t3.S1
    75  // t2 = t0.S2, t1.S2, t2.S2, t3.S2
    76  // t3 = t0.S3, t1.S3, t2.S3, t3.S3
    77  #define TRANSPOSE_MATRIX(T0, T1, T2, T3) \
    78  	VPERM T1, T0, M0, TMP0; \
    79  	VPERM T1, T0, M1, TMP1; \
    80  	VPERM T3, T2, M0, TMP2; \
    81  	VPERM T3, T2, M1, TMP3; \
    82  	VPERM TMP2, TMP0, M2, T0; \
    83  	VPERM TMP2, TMP0, M3, T1; \
    84  	VPERM TMP3, TMP1, M2, T2; \
    85  	VPERM TMP3, TMP1, M3, T3; \	
    86  
    87  // Affine Transform
    88  // parameters:
    89  // -  L: table low nibbles
    90  // -  H: table high nibbles
    91  // -  x: 128 bits register as sbox input/output data
    92  // -  y: 128 bits temp register
    93  // -  z: 128 bits temp register
    94  #define AFFINE_TRANSFORM(L, H, V_FOUR, x, y, z)  \
    95  	VAND NIBBLE_MASK, x, z;              \
    96  	VPERM L, L, z, y;                    \
    97  	VSRD x, V_FOUR, x;                   \
    98  	VAND NIBBLE_MASK, x, z;              \
    99  	VPERM H, H, z, x;                    \
   100  	VXOR y, x, x
   101  
   102  // Affine Transform
   103  // parameters:
   104  // -  L: table low nibbles
   105  // -  H: table high nibbles
   106  // -  x: 128 bits register as sbox input/output data
   107  // -  y: 128 bits temp register
   108  // -  z: 128 bits temp register
   109  #define AFFINE_TRANSFORM_NOTX(L, H, V_FOUR, x, y, z)  \
   110  	VNOR  x, x, z;                       \ // z = NOT(x)
   111  	VAND  NIBBLE_MASK, z, z;             \	
   112  	VPERM L, L, z, y;                    \
   113  	VSRD x, V_FOUR, x;                   \
   114  	VAND NIBBLE_MASK, x, z;              \
   115  	VPERM H, H, z, x;                    \
   116  	VXOR y, x, x
   117  
   118  // SM4 sbox function
   119  // parameters:
   120  // -  x: 128 bits register as sbox input/output data
   121  // -  y: 128 bits temp register
   122  // -  z: 128 bits temp register
   123  #define SM4_SBOX(x, y, z) \
   124  	AFFINE_TRANSFORM(M1L, M1H, V_FOUR, x, y, z); \
   125  	VSBOX x, x;                                  \
   126  	AFFINE_TRANSFORM(M2L, M2H, V_FOUR, x, y, z)
   127  
   128  // SM4 TAO L1 function
   129  // parameters:
   130  // -  x: 128 bits register as TAO_L1 input/output data
   131  // -  tmp1: 128 bits temp register
   132  // -  tmp2: 128 bits temp register
   133  // -  tmp3: 128 bits temp register
   134  #define SM4_TAO_L1(x, tmp1, tmp2, tmp3)         \
   135  	SM4_SBOX(x, tmp1, tmp2);                      \
   136  	;                                       \ //####################  4 parallel L1 linear transforms ##################//
   137  	VSPLTISW $8, tmp3;                      \
   138  	VRLW	x, tmp3, tmp1;                  \ // tmp1 = x <<< 8
   139  	VRLW tmp1, tmp3, tmp2;                  \ // tmp2 = x <<< 16
   140  	VXOR x, tmp1, tmp1;                     \ // tmp1 = x xor (x <<< 8)
   141  	VXOR tmp1, tmp2, tmp1;                  \ // tmp1 = x xor (x <<< 8) xor (x <<< 16)
   142  	VRLW tmp2, tmp3, tmp2;                  \ // tmp2 = x <<< 24
   143  	VXOR tmp2, x, x;                        \ // x = x xor (x <<< 24)
   144  	VSPLTISW $2, tmp3;                      \
   145  	VRLW tmp1, tmp3, tmp1;                  \ // tmp1 = (x xor (x <<< 8) xor (x <<< 16)) <<< 2
   146  	VXOR tmp1, x, x
   147  
   148  // SM4 round function
   149  // t0 ^= tao_l1(t1^t2^t3^xk)
   150  // parameters:
   151  // - RK: round key register
   152  // -  x: 128 bits temp register
   153  // - tmp1: 128 bits temp register
   154  // - tmp2: 128 bits temp register
   155  // - tmp3: 128 bits temp register
   156  // - t0: 128 bits register for data as result
   157  // - t1: 128 bits register for data
   158  // - t2: 128 bits register for data
   159  // - t3: 128 bits register for data
   160  #define SM4_ROUND(RK, x, tmp1, tmp2, tmp3, t0, t1, t2, t3) \ 
   161  	VXOR RK, t1, x;					  \
   162  	VXOR t2, x, x;					  \
   163  	VXOR t3, x, x;					  \
   164  	SM4_TAO_L1(x, tmp1, tmp2, tmp3);  \
   165  	VXOR x, t0, t0
   166  
   167  #define PROCESS_8BLOCKS_4ROUND \
   168  	VSPLTW $0, V8, V9; \
   169  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V0, V1, V2, V3); \
   170  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V4, V5, V6, V7); \
   171  	VSPLTW $1, V8, V9; \
   172  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V1, V2, V3, V0); \
   173  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V5, V6, V7, V4); \
   174  	VSPLTW $2, V8, V9; \
   175  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V2, V3, V0, V1); \
   176  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V6, V7, V4, V5); \
   177  	VSPLTW $3, V8, V9; \
   178  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V3, V0, V1, V2); \
   179  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V7, V4, V5, V6)
   180  
   181  #define PROCESS_4BLOCKS_4ROUND \
   182  	VSPLTW $0, V8, V9; \
   183  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V0, V1, V2, V3); \
   184  	VSPLTW $1, V8, V9; \
   185  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V1, V2, V3, V0); \
   186  	VSPLTW $2, V8, V9; \
   187  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V2, V3, V0, V1); \
   188  	VSPLTW $3, V8, V9; \
   189  	SM4_ROUND(V9, TMP0, TMP1, TMP2, TMP3, V3, V0, V1, V2)
   190  
   191  #define PROCESS_SINGLEBLOCK_4ROUND \
   192  	SM4_ROUND(V8, TMP0, TMP1, TMP2, TMP3, V0, V1, V2, V3); \
   193  	VSLDOI $4, V8, V8, V8; \
   194  	SM4_ROUND(V8, TMP0, TMP1, TMP2, TMP3, V1, V2, V3, V0); \
   195  	VSLDOI $4, V8, V8, V8; \
   196  	SM4_ROUND(V8, TMP0, TMP1, TMP2, TMP3, V2, V3, V0, V1); \
   197  	VSLDOI $4, V8, V8, V8; \
   198  	SM4_ROUND(V8, TMP0, TMP1, TMP2, TMP3, V3, V0, V1, V2)