github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/crypto/md5/md5block_amd64.s (about) 1 // Original source: 2 // http://www.zorinaq.com/papers/md5-amd64.html 3 // http://www.zorinaq.com/papers/md5-amd64.tar.bz2 4 // 5 // Translated from Perl generating GNU assembly into 6 // #defines generating 6a assembly by the Go Authors. 7 8 // MD5 optimized for AMD64. 9 // 10 // Author: Marc Bevand <bevand_m (at) epita.fr> 11 // Licence: I hereby disclaim the copyright on this code and place it 12 // in the public domain. 13 14 TEXT ·block(SB),7,$0-32 15 MOVQ dig+0(FP), BP 16 MOVQ p+8(FP), SI 17 MOVQ p_len+16(FP), DX 18 SHRQ $6, DX 19 SHLQ $6, DX 20 21 LEAQ (SI)(DX*1), DI 22 MOVL (0*4)(BP), AX 23 MOVL (1*4)(BP), BX 24 MOVL (2*4)(BP), CX 25 MOVL (3*4)(BP), DX 26 27 CMPQ SI, DI 28 JEQ end 29 30 loop: 31 MOVL AX, R12 32 MOVL BX, R13 33 MOVL CX, R14 34 MOVL DX, R15 35 36 MOVL (0*4)(SI), R8 37 MOVL DX, R9 38 39 #define ROUND1(a, b, c, d, index, const, shift) \ 40 XORL c, R9; \ 41 LEAL const(a)(R8*1), a; \ 42 ANDL b, R9; \ 43 XORL d, R9; \ 44 MOVL (index*4)(SI), R8; \ 45 ADDL R9, a; \ 46 ROLL $shift, a; \ 47 MOVL c, R9; \ 48 ADDL b, a 49 50 ROUND1(AX,BX,CX,DX, 1,0xd76aa478, 7); 51 ROUND1(DX,AX,BX,CX, 2,0xe8c7b756,12); 52 ROUND1(CX,DX,AX,BX, 3,0x242070db,17); 53 ROUND1(BX,CX,DX,AX, 4,0xc1bdceee,22); 54 ROUND1(AX,BX,CX,DX, 5,0xf57c0faf, 7); 55 ROUND1(DX,AX,BX,CX, 6,0x4787c62a,12); 56 ROUND1(CX,DX,AX,BX, 7,0xa8304613,17); 57 ROUND1(BX,CX,DX,AX, 8,0xfd469501,22); 58 ROUND1(AX,BX,CX,DX, 9,0x698098d8, 7); 59 ROUND1(DX,AX,BX,CX,10,0x8b44f7af,12); 60 ROUND1(CX,DX,AX,BX,11,0xffff5bb1,17); 61 ROUND1(BX,CX,DX,AX,12,0x895cd7be,22); 62 ROUND1(AX,BX,CX,DX,13,0x6b901122, 7); 63 ROUND1(DX,AX,BX,CX,14,0xfd987193,12); 64 ROUND1(CX,DX,AX,BX,15,0xa679438e,17); 65 ROUND1(BX,CX,DX,AX, 0,0x49b40821,22); 66 67 MOVL (1*4)(SI), R8 68 MOVL DX, R9 69 MOVL DX, R10 70 71 #define ROUND2(a, b, c, d, index, const, shift) \ 72 NOTL R9; \ 73 LEAL const(a)(R8*1),a; \ 74 ANDL b, R10; \ 75 ANDL c, R9; \ 76 MOVL (index*4)(SI),R8; \ 77 ORL R9, R10; \ 78 MOVL c, R9; \ 79 ADDL R10, a; \ 80 MOVL c, R10; \ 81 ROLL $shift, a; \ 82 ADDL b, a 83 84 ROUND2(AX,BX,CX,DX, 6,0xf61e2562, 5); 85 ROUND2(DX,AX,BX,CX,11,0xc040b340, 9); 86 ROUND2(CX,DX,AX,BX, 0,0x265e5a51,14); 87 ROUND2(BX,CX,DX,AX, 5,0xe9b6c7aa,20); 88 ROUND2(AX,BX,CX,DX,10,0xd62f105d, 5); 89 ROUND2(DX,AX,BX,CX,15, 0x2441453, 9); 90 ROUND2(CX,DX,AX,BX, 4,0xd8a1e681,14); 91 ROUND2(BX,CX,DX,AX, 9,0xe7d3fbc8,20); 92 ROUND2(AX,BX,CX,DX,14,0x21e1cde6, 5); 93 ROUND2(DX,AX,BX,CX, 3,0xc33707d6, 9); 94 ROUND2(CX,DX,AX,BX, 8,0xf4d50d87,14); 95 ROUND2(BX,CX,DX,AX,13,0x455a14ed,20); 96 ROUND2(AX,BX,CX,DX, 2,0xa9e3e905, 5); 97 ROUND2(DX,AX,BX,CX, 7,0xfcefa3f8, 9); 98 ROUND2(CX,DX,AX,BX,12,0x676f02d9,14); 99 ROUND2(BX,CX,DX,AX, 0,0x8d2a4c8a,20); 100 101 MOVL (5*4)(SI), R8 102 MOVL CX, R9 103 104 #define ROUND3(a, b, c, d, index, const, shift) \ 105 LEAL const(a)(R8*1),a; \ 106 MOVL (index*4)(SI),R8; \ 107 XORL d, R9; \ 108 XORL b, R9; \ 109 ADDL R9, a; \ 110 ROLL $shift, a; \ 111 MOVL b, R9; \ 112 ADDL b, a 113 114 ROUND3(AX,BX,CX,DX, 8,0xfffa3942, 4); 115 ROUND3(DX,AX,BX,CX,11,0x8771f681,11); 116 ROUND3(CX,DX,AX,BX,14,0x6d9d6122,16); 117 ROUND3(BX,CX,DX,AX, 1,0xfde5380c,23); 118 ROUND3(AX,BX,CX,DX, 4,0xa4beea44, 4); 119 ROUND3(DX,AX,BX,CX, 7,0x4bdecfa9,11); 120 ROUND3(CX,DX,AX,BX,10,0xf6bb4b60,16); 121 ROUND3(BX,CX,DX,AX,13,0xbebfbc70,23); 122 ROUND3(AX,BX,CX,DX, 0,0x289b7ec6, 4); 123 ROUND3(DX,AX,BX,CX, 3,0xeaa127fa,11); 124 ROUND3(CX,DX,AX,BX, 6,0xd4ef3085,16); 125 ROUND3(BX,CX,DX,AX, 9, 0x4881d05,23); 126 ROUND3(AX,BX,CX,DX,12,0xd9d4d039, 4); 127 ROUND3(DX,AX,BX,CX,15,0xe6db99e5,11); 128 ROUND3(CX,DX,AX,BX, 2,0x1fa27cf8,16); 129 ROUND3(BX,CX,DX,AX, 0,0xc4ac5665,23); 130 131 MOVL (0*4)(SI), R8 132 MOVL $0xffffffff, R9 133 XORL DX, R9 134 135 #define ROUND4(a, b, c, d, index, const, shift) \ 136 LEAL const(a)(R8*1),a; \ 137 ORL b, R9; \ 138 XORL c, R9; \ 139 ADDL R9, a; \ 140 MOVL (index*4)(SI),R8; \ 141 MOVL $0xffffffff, R9; \ 142 ROLL $shift, a; \ 143 XORL c, R9; \ 144 ADDL b, a 145 146 ROUND4(AX,BX,CX,DX, 7,0xf4292244, 6); 147 ROUND4(DX,AX,BX,CX,14,0x432aff97,10); 148 ROUND4(CX,DX,AX,BX, 5,0xab9423a7,15); 149 ROUND4(BX,CX,DX,AX,12,0xfc93a039,21); 150 ROUND4(AX,BX,CX,DX, 3,0x655b59c3, 6); 151 ROUND4(DX,AX,BX,CX,10,0x8f0ccc92,10); 152 ROUND4(CX,DX,AX,BX, 1,0xffeff47d,15); 153 ROUND4(BX,CX,DX,AX, 8,0x85845dd1,21); 154 ROUND4(AX,BX,CX,DX,15,0x6fa87e4f, 6); 155 ROUND4(DX,AX,BX,CX, 6,0xfe2ce6e0,10); 156 ROUND4(CX,DX,AX,BX,13,0xa3014314,15); 157 ROUND4(BX,CX,DX,AX, 4,0x4e0811a1,21); 158 ROUND4(AX,BX,CX,DX,11,0xf7537e82, 6); 159 ROUND4(DX,AX,BX,CX, 2,0xbd3af235,10); 160 ROUND4(CX,DX,AX,BX, 9,0x2ad7d2bb,15); 161 ROUND4(BX,CX,DX,AX, 0,0xeb86d391,21); 162 163 ADDL R12, AX 164 ADDL R13, BX 165 ADDL R14, CX 166 ADDL R15, DX 167 168 ADDQ $64, SI 169 CMPQ SI, DI 170 JB loop 171 172 end: 173 MOVL AX, (0*4)(BP) 174 MOVL BX, (1*4)(BP) 175 MOVL CX, (2*4)(BP) 176 MOVL DX, (3*4)(BP) 177 RET