github.com/psiphon-Labs/psiphon-tunnel-core@v2.0.28+incompatible/psiphon/common/crypto/internal/poly1305/sum_s390x.s (about) 1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 //go:build gc && !purego 6 // +build gc,!purego 7 8 #include "textflag.h" 9 10 // This implementation of Poly1305 uses the vector facility (vx) 11 // to process up to 2 blocks (32 bytes) per iteration using an 12 // algorithm based on the one described in: 13 // 14 // NEON crypto, Daniel J. Bernstein & Peter Schwabe 15 // https://cryptojedi.org/papers/neoncrypto-20120320.pdf 16 // 17 // This algorithm uses 5 26-bit limbs to represent a 130-bit 18 // value. These limbs are, for the most part, zero extended and 19 // placed into 64-bit vector register elements. Each vector 20 // register is 128-bits wide and so holds 2 of these elements. 21 // Using 26-bit limbs allows us plenty of headroom to accommodate 22 // accumulations before and after multiplication without 23 // overflowing either 32-bits (before multiplication) or 64-bits 24 // (after multiplication). 25 // 26 // In order to parallelise the operations required to calculate 27 // the sum we use two separate accumulators and then sum those 28 // in an extra final step. For compatibility with the generic 29 // implementation we perform this summation at the end of every 30 // updateVX call. 31 // 32 // To use two accumulators we must multiply the message blocks 33 // by r² rather than r. Only the final message block should be 34 // multiplied by r. 35 // 36 // Example: 37 // 38 // We want to calculate the sum (h) for a 64 byte message (m): 39 // 40 // h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r 41 // 42 // To do this we split the calculation into the even indices 43 // and odd indices of the message. These form our SIMD 'lanes': 44 // 45 // h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 46 // m[16:32]r³ + m[48:64]r <- lane 1 47 // 48 // To calculate this iteratively we refactor so that both lanes 49 // are written in terms of r² and r: 50 // 51 // h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 52 // (m[16:32]r² + m[48:64])r <- lane 1 53 // ^ ^ 54 // | coefficients for second iteration 55 // coefficients for first iteration 56 // 57 // So in this case we would have two iterations. In the first 58 // both lanes are multiplied by r². In the second only the 59 // first lane is multiplied by r² and the second lane is 60 // instead multiplied by r. This gives use the odd and even 61 // powers of r that we need from the original equation. 62 // 63 // Notation: 64 // 65 // h - accumulator 66 // r - key 67 // m - message 68 // 69 // [a, b] - SIMD register holding two 64-bit values 70 // [a, b, c, d] - SIMD register holding four 32-bit values 71 // xᵢ[n] - limb n of variable x with bit width i 72 // 73 // Limbs are expressed in little endian order, so for 26-bit 74 // limbs x₂₆[4] will be the most significant limb and x₂₆[0] 75 // will be the least significant limb. 76 77 // masking constants 78 #define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits 79 #define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits 80 81 // expansion constants (see EXPAND macro) 82 #define EX0 V2 83 #define EX1 V3 84 #define EX2 V4 85 86 // key (r², r or 1 depending on context) 87 #define R_0 V5 88 #define R_1 V6 89 #define R_2 V7 90 #define R_3 V8 91 #define R_4 V9 92 93 // precalculated coefficients (5r², 5r or 0 depending on context) 94 #define R5_1 V10 95 #define R5_2 V11 96 #define R5_3 V12 97 #define R5_4 V13 98 99 // message block (m) 100 #define M_0 V14 101 #define M_1 V15 102 #define M_2 V16 103 #define M_3 V17 104 #define M_4 V18 105 106 // accumulator (h) 107 #define H_0 V19 108 #define H_1 V20 109 #define H_2 V21 110 #define H_3 V22 111 #define H_4 V23 112 113 // temporary registers (for short-lived values) 114 #define T_0 V24 115 #define T_1 V25 116 #define T_2 V26 117 #define T_3 V27 118 #define T_4 V28 119 120 GLOBL ·constants<>(SB), RODATA, $0x30 121 // EX0 122 DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 123 DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 124 // EX1 125 DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 126 DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 127 // EX2 128 DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d 129 DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d 130 131 // MULTIPLY multiplies each lane of f and g, partially reduced 132 // modulo 2¹³⁰ - 5. The result, h, consists of partial products 133 // in each lane that need to be reduced further to produce the 134 // final result. 135 // 136 // h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ 137 // 138 // Note that the multiplication by 5 of the high bits is 139 // achieved by precalculating the multiplication of four of the 140 // g coefficients by 5. These are g51-g54. 141 #define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ 142 VMLOF f0, g0, h0 \ 143 VMLOF f0, g3, h3 \ 144 VMLOF f0, g1, h1 \ 145 VMLOF f0, g4, h4 \ 146 VMLOF f0, g2, h2 \ 147 VMLOF f1, g54, T_0 \ 148 VMLOF f1, g2, T_3 \ 149 VMLOF f1, g0, T_1 \ 150 VMLOF f1, g3, T_4 \ 151 VMLOF f1, g1, T_2 \ 152 VMALOF f2, g53, h0, h0 \ 153 VMALOF f2, g1, h3, h3 \ 154 VMALOF f2, g54, h1, h1 \ 155 VMALOF f2, g2, h4, h4 \ 156 VMALOF f2, g0, h2, h2 \ 157 VMALOF f3, g52, T_0, T_0 \ 158 VMALOF f3, g0, T_3, T_3 \ 159 VMALOF f3, g53, T_1, T_1 \ 160 VMALOF f3, g1, T_4, T_4 \ 161 VMALOF f3, g54, T_2, T_2 \ 162 VMALOF f4, g51, h0, h0 \ 163 VMALOF f4, g54, h3, h3 \ 164 VMALOF f4, g52, h1, h1 \ 165 VMALOF f4, g0, h4, h4 \ 166 VMALOF f4, g53, h2, h2 \ 167 VAG T_0, h0, h0 \ 168 VAG T_3, h3, h3 \ 169 VAG T_1, h1, h1 \ 170 VAG T_4, h4, h4 \ 171 VAG T_2, h2, h2 172 173 // REDUCE performs the following carry operations in four 174 // stages, as specified in Bernstein & Schwabe: 175 // 176 // 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] 177 // 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] 178 // 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] 179 // 4: h₂₆[3]->h₂₆[4] 180 // 181 // The result is that all of the limbs are limited to 26-bits 182 // except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. 183 // 184 // Note that although each limb is aligned at 26-bit intervals 185 // they may contain values that exceed 2²⁶ - 1, hence the need 186 // to carry the excess bits in each limb. 187 #define REDUCE(h0, h1, h2, h3, h4) \ 188 VESRLG $26, h0, T_0 \ 189 VESRLG $26, h3, T_1 \ 190 VN MOD26, h0, h0 \ 191 VN MOD26, h3, h3 \ 192 VAG T_0, h1, h1 \ 193 VAG T_1, h4, h4 \ 194 VESRLG $26, h1, T_2 \ 195 VESRLG $26, h4, T_3 \ 196 VN MOD26, h1, h1 \ 197 VN MOD26, h4, h4 \ 198 VESLG $2, T_3, T_4 \ 199 VAG T_3, T_4, T_4 \ 200 VAG T_2, h2, h2 \ 201 VAG T_4, h0, h0 \ 202 VESRLG $26, h2, T_0 \ 203 VESRLG $26, h0, T_1 \ 204 VN MOD26, h2, h2 \ 205 VN MOD26, h0, h0 \ 206 VAG T_0, h3, h3 \ 207 VAG T_1, h1, h1 \ 208 VESRLG $26, h3, T_2 \ 209 VN MOD26, h3, h3 \ 210 VAG T_2, h4, h4 211 212 // EXPAND splits the 128-bit little-endian values in0 and in1 213 // into 26-bit big-endian limbs and places the results into 214 // the first and second lane of d₂₆[0:4] respectively. 215 // 216 // The EX0, EX1 and EX2 constants are arrays of byte indices 217 // for permutation. The permutation both reverses the bytes 218 // in the input and ensures the bytes are copied into the 219 // destination limb ready to be shifted into their final 220 // position. 221 #define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ 222 VPERM in0, in1, EX0, d0 \ 223 VPERM in0, in1, EX1, d2 \ 224 VPERM in0, in1, EX2, d4 \ 225 VESRLG $26, d0, d1 \ 226 VESRLG $30, d2, d3 \ 227 VESRLG $4, d2, d2 \ 228 VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] 229 VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] 230 VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] 231 VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] 232 VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] 233 234 // func updateVX(state *macState, msg []byte) 235 TEXT ·updateVX(SB), NOSPLIT, $0 236 MOVD state+0(FP), R1 237 LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len 238 239 // load EX0, EX1 and EX2 240 MOVD $·constants<>(SB), R5 241 VLM (R5), EX0, EX2 242 243 // generate masks 244 VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] 245 VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] 246 247 // load h (accumulator) and r (key) from state 248 VZERO T_1 // [0, 0] 249 VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] 250 VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] 251 VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] 252 VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] 253 VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] 254 255 // unpack h and r into 26-bit limbs 256 // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value 257 VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] 258 VZERO H_1 // [0, 0] 259 VZERO H_3 // [0, 0] 260 VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out 261 VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] 262 VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] 263 VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only 264 VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] 265 VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only 266 VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete 267 VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete 268 269 // replicate r across all 4 vector elements 270 VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] 271 VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] 272 VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] 273 VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] 274 VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] 275 276 // zero out lane 1 of h 277 VLEIG $1, $0, H_0 // [h₂₆[0], 0] 278 VLEIG $1, $0, H_1 // [h₂₆[1], 0] 279 VLEIG $1, $0, H_2 // [h₂₆[2], 0] 280 VLEIG $1, $0, H_3 // [h₂₆[3], 0] 281 VLEIG $1, $0, H_4 // [h₂₆[4], 0] 282 283 // calculate 5r (ignore least significant limb) 284 VREPIF $5, T_0 285 VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] 286 VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] 287 VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] 288 VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] 289 290 // skip r² calculation if we are only calculating one block 291 CMPBLE R3, $16, skip 292 293 // calculate r² 294 MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) 295 REDUCE(M_0, M_1, M_2, M_3, M_4) 296 VGBM $0x0f0f, T_0 297 VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] 298 VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] 299 VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] 300 VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] 301 VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] 302 303 // calculate 5r² (ignore least significant limb) 304 VREPIF $5, T_0 305 VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] 306 VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] 307 VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] 308 VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] 309 310 loop: 311 CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients 312 313 // load next 2 blocks from message 314 VLM (R2), T_0, T_1 315 316 // update message slice 317 SUB $32, R3 318 MOVD $32(R2), R2 319 320 // unpack message blocks into 26-bit big-endian limbs 321 EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) 322 323 // add 2¹²⁸ to each message block value 324 VLEIB $4, $1, M_4 325 VLEIB $12, $1, M_4 326 327 multiply: 328 // accumulate the incoming message 329 VAG H_0, M_0, M_0 330 VAG H_3, M_3, M_3 331 VAG H_1, M_1, M_1 332 VAG H_4, M_4, M_4 333 VAG H_2, M_2, M_2 334 335 // multiply the accumulator by the key coefficient 336 MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) 337 338 // carry and partially reduce the partial products 339 REDUCE(H_0, H_1, H_2, H_3, H_4) 340 341 CMPBNE R3, $0, loop 342 343 finish: 344 // sum lane 0 and lane 1 and put the result in lane 1 345 VZERO T_0 346 VSUMQG H_0, T_0, H_0 347 VSUMQG H_3, T_0, H_3 348 VSUMQG H_1, T_0, H_1 349 VSUMQG H_4, T_0, H_4 350 VSUMQG H_2, T_0, H_2 351 352 // reduce again after summation 353 // TODO(mundaym): there might be a more efficient way to do this 354 // now that we only have 1 active lane. For example, we could 355 // simultaneously pack the values as we reduce them. 356 REDUCE(H_0, H_1, H_2, H_3, H_4) 357 358 // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 359 // TODO(mundaym): in testing this final carry was unnecessary. 360 // Needs a proof before it can be removed though. 361 VESRLG $26, H_1, T_1 362 VN MOD26, H_1, H_1 363 VAQ T_1, H_2, H_2 364 VESRLG $26, H_2, T_2 365 VN MOD26, H_2, H_2 366 VAQ T_2, H_3, H_3 367 VESRLG $26, H_3, T_3 368 VN MOD26, H_3, H_3 369 VAQ T_3, H_4, H_4 370 371 // h is now < 2(2¹³⁰ - 5) 372 // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. 373 VESLG $26, H_1, H_1 374 VESLG $26, H_3, H_3 375 VO H_0, H_1, H_0 376 VO H_2, H_3, H_2 377 VESLG $4, H_2, H_2 378 VLEIB $7, $48, H_1 379 VSLB H_1, H_2, H_2 380 VO H_0, H_2, H_0 381 VLEIB $7, $104, H_1 382 VSLB H_1, H_4, H_3 383 VO H_3, H_0, H_0 384 VLEIB $7, $24, H_1 385 VSRLB H_1, H_4, H_1 386 387 // update state 388 VSTEG $1, H_0, 0(R1) 389 VSTEG $0, H_0, 8(R1) 390 VSTEG $1, H_1, 16(R1) 391 RET 392 393 b2: // 2 or fewer blocks remaining 394 CMPBLE R3, $16, b1 395 396 // Load the 2 remaining blocks (17-32 bytes remaining). 397 MOVD $-17(R3), R0 // index of final byte to load modulo 16 398 VL (R2), T_0 // load full 16 byte block 399 VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes 400 401 // The Poly1305 algorithm requires that a 1 bit be appended to 402 // each message block. If the final block is less than 16 bytes 403 // long then it is easiest to insert the 1 before the message 404 // block is split into 26-bit limbs. If, on the other hand, the 405 // final message block is 16 bytes long then we append the 1 bit 406 // after expansion as normal. 407 MOVBZ $1, R0 408 MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) 409 CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long 410 VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 411 412 // Split both blocks into 26-bit limbs in the appropriate lanes. 413 EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) 414 415 // Append a 1 byte to the end of the second to last block. 416 VLEIB $4, $1, M_4 417 418 // Append a 1 byte to the end of the last block only if it is a 419 // full 16 byte block. 420 CMPBNE R3, $16, 2(PC) 421 VLEIB $12, $1, M_4 422 423 // Finally, set up the coefficients for the final multiplication. 424 // We have previously saved r and 5r in the 32-bit even indexes 425 // of the R_[0-4] and R5_[1-4] coefficient registers. 426 // 427 // We want lane 0 to be multiplied by r² so that can be kept the 428 // same. We want lane 1 to be multiplied by r so we need to move 429 // the saved r value into the 32-bit odd index in lane 1 by 430 // rotating the 64-bit lane by 32. 431 VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only 432 VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] 433 VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] 434 VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] 435 VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] 436 VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] 437 VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] 438 VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] 439 VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] 440 VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] 441 442 MOVD $0, R3 443 BR multiply 444 445 skip: 446 CMPBEQ R3, $0, finish 447 448 b1: // 1 block remaining 449 450 // Load the final block (1-16 bytes). This will be placed into 451 // lane 0. 452 MOVD $-1(R3), R0 453 VLL R0, (R2), T_0 // pad to 16 bytes with zeros 454 455 // The Poly1305 algorithm requires that a 1 bit be appended to 456 // each message block. If the final block is less than 16 bytes 457 // long then it is easiest to insert the 1 before the message 458 // block is split into 26-bit limbs. If, on the other hand, the 459 // final message block is 16 bytes long then we append the 1 bit 460 // after expansion as normal. 461 MOVBZ $1, R0 462 CMPBEQ R3, $16, 2(PC) 463 VLVGB R3, R0, T_0 464 465 // Set the message block in lane 1 to the value 0 so that it 466 // can be accumulated without affecting the final result. 467 VZERO T_1 468 469 // Split the final message block into 26-bit limbs in lane 0. 470 // Lane 1 will be contain 0. 471 EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) 472 473 // Append a 1 byte to the end of the last block only if it is a 474 // full 16 byte block. 475 CMPBNE R3, $16, 2(PC) 476 VLEIB $4, $1, M_4 477 478 // We have previously saved r and 5r in the 32-bit even indexes 479 // of the R_[0-4] and R5_[1-4] coefficient registers. 480 // 481 // We want lane 0 to be multiplied by r so we need to move the 482 // saved r value into the 32-bit odd index in lane 0. We want 483 // lane 1 to be set to the value 1. This makes multiplication 484 // a no-op. We do this by setting lane 1 in every register to 0 485 // and then just setting the 32-bit index 3 in R_0 to 1. 486 VZERO T_0 487 MOVD $0, R0 488 MOVD $0x10111213, R12 489 VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] 490 VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] 491 VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] 492 VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] 493 VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] 494 VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] 495 VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] 496 VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] 497 VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] 498 VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] 499 500 // Set the value of lane 1 to be 1. 501 VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] 502 503 MOVD $0, R3 504 BR multiply