github.com/insolar/x-crypto@v0.0.0-20191031140942-75fab8a325f6/md5/md5block_amd64.s (about)

     1  // Original source:
     2  //	http://www.zorinaq.com/papers/md5-amd64.html
     3  //	http://www.zorinaq.com/papers/md5-amd64.tar.bz2
     4  //
     5  // Translated from Perl generating GNU assembly into
     6  // #defines generating 6a assembly by the Go Authors.
     7  
     8  #include "textflag.h"
     9  
    10  // MD5 optimized for AMD64.
    11  //
    12  // Author: Marc Bevand <bevand_m (at) epita.fr>
    13  // Licence: I hereby disclaim the copyright on this code and place it
    14  // in the public domain.
    15  
    16  TEXT	·block(SB),NOSPLIT,$0-32
    17  	MOVQ	dig+0(FP),	BP
    18  	MOVQ	p+8(FP),	SI
    19  	MOVQ	p_len+16(FP), DX
    20  	SHRQ	$6,		DX
    21  	SHLQ	$6,		DX
    22  
    23  	LEAQ	(SI)(DX*1),	DI
    24  	MOVL	(0*4)(BP),	AX
    25  	MOVL	(1*4)(BP),	BX
    26  	MOVL	(2*4)(BP),	CX
    27  	MOVL	(3*4)(BP),	DX
    28  
    29  	CMPQ	SI,		DI
    30  	JEQ	end
    31  
    32  loop:
    33  	MOVL	AX,		R12
    34  	MOVL	BX,		R13
    35  	MOVL	CX,		R14
    36  	MOVL	DX,		R15
    37  
    38  	MOVL	(0*4)(SI),	R8
    39  	MOVL	DX,		R9
    40  
    41  #define ROUND1(a, b, c, d, index, const, shift) \
    42  	XORL	c, R9; \
    43  	LEAL	const(a)(R8*1), a; \
    44  	ANDL	b, R9; \
    45  	XORL d, R9; \
    46  	MOVL (index*4)(SI), R8; \
    47  	ADDL R9, a; \
    48  	ROLL $shift, a; \
    49  	MOVL c, R9; \
    50  	ADDL b, a
    51  
    52  	ROUND1(AX,BX,CX,DX, 1,0xd76aa478, 7);
    53  	ROUND1(DX,AX,BX,CX, 2,0xe8c7b756,12);
    54  	ROUND1(CX,DX,AX,BX, 3,0x242070db,17);
    55  	ROUND1(BX,CX,DX,AX, 4,0xc1bdceee,22);
    56  	ROUND1(AX,BX,CX,DX, 5,0xf57c0faf, 7);
    57  	ROUND1(DX,AX,BX,CX, 6,0x4787c62a,12);
    58  	ROUND1(CX,DX,AX,BX, 7,0xa8304613,17);
    59  	ROUND1(BX,CX,DX,AX, 8,0xfd469501,22);
    60  	ROUND1(AX,BX,CX,DX, 9,0x698098d8, 7);
    61  	ROUND1(DX,AX,BX,CX,10,0x8b44f7af,12);
    62  	ROUND1(CX,DX,AX,BX,11,0xffff5bb1,17);
    63  	ROUND1(BX,CX,DX,AX,12,0x895cd7be,22);
    64  	ROUND1(AX,BX,CX,DX,13,0x6b901122, 7);
    65  	ROUND1(DX,AX,BX,CX,14,0xfd987193,12);
    66  	ROUND1(CX,DX,AX,BX,15,0xa679438e,17);
    67  	ROUND1(BX,CX,DX,AX, 0,0x49b40821,22);
    68  
    69  	MOVL	(1*4)(SI),	R8
    70  	MOVL	DX,		R9
    71  	MOVL	DX,		R10
    72  
    73  #define ROUND2(a, b, c, d, index, const, shift) \
    74  	NOTL	R9; \
    75  	LEAL	const(a)(R8*1),a; \
    76  	ANDL	b,		R10; \
    77  	ANDL	c,		R9; \
    78  	MOVL	(index*4)(SI),R8; \
    79  	ORL	R9,		R10; \
    80  	MOVL	c,		R9; \
    81  	ADDL	R10,		a; \
    82  	MOVL	c,		R10; \
    83  	ROLL	$shift,	a; \
    84  	ADDL	b,		a
    85  
    86  	ROUND2(AX,BX,CX,DX, 6,0xf61e2562, 5);
    87  	ROUND2(DX,AX,BX,CX,11,0xc040b340, 9);
    88  	ROUND2(CX,DX,AX,BX, 0,0x265e5a51,14);
    89  	ROUND2(BX,CX,DX,AX, 5,0xe9b6c7aa,20);
    90  	ROUND2(AX,BX,CX,DX,10,0xd62f105d, 5);
    91  	ROUND2(DX,AX,BX,CX,15, 0x2441453, 9);
    92  	ROUND2(CX,DX,AX,BX, 4,0xd8a1e681,14);
    93  	ROUND2(BX,CX,DX,AX, 9,0xe7d3fbc8,20);
    94  	ROUND2(AX,BX,CX,DX,14,0x21e1cde6, 5);
    95  	ROUND2(DX,AX,BX,CX, 3,0xc33707d6, 9);
    96  	ROUND2(CX,DX,AX,BX, 8,0xf4d50d87,14);
    97  	ROUND2(BX,CX,DX,AX,13,0x455a14ed,20);
    98  	ROUND2(AX,BX,CX,DX, 2,0xa9e3e905, 5);
    99  	ROUND2(DX,AX,BX,CX, 7,0xfcefa3f8, 9);
   100  	ROUND2(CX,DX,AX,BX,12,0x676f02d9,14);
   101  	ROUND2(BX,CX,DX,AX, 0,0x8d2a4c8a,20);
   102   
   103  	MOVL	(5*4)(SI),	R8
   104  	MOVL	CX,		R9
   105  
   106  #define ROUND3(a, b, c, d, index, const, shift) \
   107  	LEAL	const(a)(R8*1),a; \
   108  	MOVL	(index*4)(SI),R8; \
   109  	XORL	d,		R9; \
   110  	XORL	b,		R9; \
   111  	ADDL	R9,		a; \
   112  	ROLL	$shift,		a; \
   113  	MOVL	b,		R9; \
   114  	ADDL	b,		a
   115  
   116  	ROUND3(AX,BX,CX,DX, 8,0xfffa3942, 4);
   117  	ROUND3(DX,AX,BX,CX,11,0x8771f681,11);
   118  	ROUND3(CX,DX,AX,BX,14,0x6d9d6122,16);
   119  	ROUND3(BX,CX,DX,AX, 1,0xfde5380c,23);
   120  	ROUND3(AX,BX,CX,DX, 4,0xa4beea44, 4);
   121  	ROUND3(DX,AX,BX,CX, 7,0x4bdecfa9,11);
   122  	ROUND3(CX,DX,AX,BX,10,0xf6bb4b60,16);
   123  	ROUND3(BX,CX,DX,AX,13,0xbebfbc70,23);
   124  	ROUND3(AX,BX,CX,DX, 0,0x289b7ec6, 4);
   125  	ROUND3(DX,AX,BX,CX, 3,0xeaa127fa,11);
   126  	ROUND3(CX,DX,AX,BX, 6,0xd4ef3085,16);
   127  	ROUND3(BX,CX,DX,AX, 9, 0x4881d05,23);
   128  	ROUND3(AX,BX,CX,DX,12,0xd9d4d039, 4);
   129  	ROUND3(DX,AX,BX,CX,15,0xe6db99e5,11);
   130  	ROUND3(CX,DX,AX,BX, 2,0x1fa27cf8,16);
   131  	ROUND3(BX,CX,DX,AX, 0,0xc4ac5665,23);
   132  
   133  	MOVL	(0*4)(SI),	R8
   134  	MOVL	$0xffffffff,	R9
   135  	XORL	DX,		R9
   136  
   137  #define ROUND4(a, b, c, d, index, const, shift) \
   138  	LEAL	const(a)(R8*1),a; \
   139  	ORL	b,		R9; \
   140  	XORL	c,		R9; \
   141  	ADDL	R9,		a; \
   142  	MOVL	(index*4)(SI),R8; \
   143  	MOVL	$0xffffffff,	R9; \
   144  	ROLL	$shift,		a; \
   145  	XORL	c,		R9; \
   146  	ADDL	b,		a
   147  	
   148  	ROUND4(AX,BX,CX,DX, 7,0xf4292244, 6);
   149  	ROUND4(DX,AX,BX,CX,14,0x432aff97,10);
   150  	ROUND4(CX,DX,AX,BX, 5,0xab9423a7,15);
   151  	ROUND4(BX,CX,DX,AX,12,0xfc93a039,21);
   152  	ROUND4(AX,BX,CX,DX, 3,0x655b59c3, 6);
   153  	ROUND4(DX,AX,BX,CX,10,0x8f0ccc92,10);
   154  	ROUND4(CX,DX,AX,BX, 1,0xffeff47d,15);
   155  	ROUND4(BX,CX,DX,AX, 8,0x85845dd1,21);
   156  	ROUND4(AX,BX,CX,DX,15,0x6fa87e4f, 6);
   157  	ROUND4(DX,AX,BX,CX, 6,0xfe2ce6e0,10);
   158  	ROUND4(CX,DX,AX,BX,13,0xa3014314,15);
   159  	ROUND4(BX,CX,DX,AX, 4,0x4e0811a1,21);
   160  	ROUND4(AX,BX,CX,DX,11,0xf7537e82, 6);
   161  	ROUND4(DX,AX,BX,CX, 2,0xbd3af235,10);
   162  	ROUND4(CX,DX,AX,BX, 9,0x2ad7d2bb,15);
   163  	ROUND4(BX,CX,DX,AX, 0,0xeb86d391,21);
   164  
   165  	ADDL	R12,	AX
   166  	ADDL	R13,	BX
   167  	ADDL	R14,	CX
   168  	ADDL	R15,	DX
   169  
   170  	ADDQ	$64,		SI
   171  	CMPQ	SI,		DI
   172  	JB	loop
   173  
   174  end:
   175  	MOVL	AX,		(0*4)(BP)
   176  	MOVL	BX,		(1*4)(BP)
   177  	MOVL	CX,		(2*4)(BP)
   178  	MOVL	DX,		(3*4)(BP)
   179  	RET