github.com/gopherd/gonum@v0.0.4/internal/asm/f64/axpyunitary_amd64.s (about)

     1  // Copyright ©2015 The Gonum Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  //
     5  // Some of the loop unrolling code is copied from:
     6  // http://golang.org/src/math/big/arith_amd64.s
     7  // which is distributed under these terms:
     8  //
     9  // Copyright (c) 2012 The Go Authors. All rights reserved.
    10  //
    11  // Redistribution and use in source and binary forms, with or without
    12  // modification, are permitted provided that the following conditions are
    13  // met:
    14  //
    15  //    * Redistributions of source code must retain the above copyright
    16  // notice, this list of conditions and the following disclaimer.
    17  //    * Redistributions in binary form must reproduce the above
    18  // copyright notice, this list of conditions and the following disclaimer
    19  // in the documentation and/or other materials provided with the
    20  // distribution.
    21  //    * Neither the name of Google Inc. nor the names of its
    22  // contributors may be used to endorse or promote products derived from
    23  // this software without specific prior written permission.
    24  //
    25  // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
    26  // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
    27  // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
    28  // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
    29  // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
    30  // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
    31  // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
    32  // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
    33  // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
    34  // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
    35  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
    36  
    37  // +build !noasm,!gccgo,!safe
    38  
    39  #include "textflag.h"
    40  
    41  #define X_PTR SI
    42  #define Y_PTR DI
    43  #define DST_PTR DI
    44  #define IDX AX
    45  #define LEN CX
    46  #define TAIL BX
    47  #define ALPHA X0
    48  #define ALPHA_2 X1
    49  
    50  // func AxpyUnitary(alpha float64, x, y []float64)
    51  TEXT ·AxpyUnitary(SB), NOSPLIT, $0
    52  	MOVQ    x_base+8(FP), X_PTR  // X_PTR := &x
    53  	MOVQ    y_base+32(FP), Y_PTR // Y_PTR := &y
    54  	MOVQ    x_len+16(FP), LEN    // LEN = min( len(x), len(y) )
    55  	CMPQ    y_len+40(FP), LEN
    56  	CMOVQLE y_len+40(FP), LEN
    57  	CMPQ    LEN, $0              // if LEN == 0 { return }
    58  	JE      end
    59  	XORQ    IDX, IDX
    60  	MOVSD   alpha+0(FP), ALPHA   // ALPHA := { alpha, alpha }
    61  	SHUFPD  $0, ALPHA, ALPHA
    62  	MOVUPS  ALPHA, ALPHA_2       // ALPHA_2 := ALPHA   for pipelining
    63  	MOVQ    Y_PTR, TAIL          // Check memory alignment
    64  	ANDQ    $15, TAIL            // TAIL = &y % 16
    65  	JZ      no_trim              // if TAIL == 0 { goto no_trim }
    66  
    67  	// Align on 16-byte boundary
    68  	MOVSD (X_PTR), X2   // X2 := x[0]
    69  	MULSD ALPHA, X2     // X2 *= a
    70  	ADDSD (Y_PTR), X2   // X2 += y[0]
    71  	MOVSD X2, (DST_PTR) // y[0] = X2
    72  	INCQ  IDX           // i++
    73  	DECQ  LEN           // LEN--
    74  	JZ    end           // if LEN == 0 { return }
    75  
    76  no_trim:
    77  	MOVQ LEN, TAIL
    78  	ANDQ $7, TAIL   // TAIL := n % 8
    79  	SHRQ $3, LEN    // LEN = floor( n / 8 )
    80  	JZ   tail_start // if LEN == 0 { goto tail2_start }
    81  
    82  loop:  // do {
    83  	// y[i] += alpha * x[i] unrolled 8x.
    84  	MOVUPS (X_PTR)(IDX*8), X2   // X_i = x[i]
    85  	MOVUPS 16(X_PTR)(IDX*8), X3
    86  	MOVUPS 32(X_PTR)(IDX*8), X4
    87  	MOVUPS 48(X_PTR)(IDX*8), X5
    88  
    89  	MULPD ALPHA, X2   // X_i *= a
    90  	MULPD ALPHA_2, X3
    91  	MULPD ALPHA, X4
    92  	MULPD ALPHA_2, X5
    93  
    94  	ADDPD (Y_PTR)(IDX*8), X2   // X_i += y[i]
    95  	ADDPD 16(Y_PTR)(IDX*8), X3
    96  	ADDPD 32(Y_PTR)(IDX*8), X4
    97  	ADDPD 48(Y_PTR)(IDX*8), X5
    98  
    99  	MOVUPS X2, (DST_PTR)(IDX*8)   // y[i] = X_i
   100  	MOVUPS X3, 16(DST_PTR)(IDX*8)
   101  	MOVUPS X4, 32(DST_PTR)(IDX*8)
   102  	MOVUPS X5, 48(DST_PTR)(IDX*8)
   103  
   104  	ADDQ $8, IDX  // i += 8
   105  	DECQ LEN
   106  	JNZ  loop     // } while --LEN > 0
   107  	CMPQ TAIL, $0 // if TAIL == 0 { return }
   108  	JE   end
   109  
   110  tail_start: // Reset loop registers
   111  	MOVQ TAIL, LEN // Loop counter: LEN = TAIL
   112  	SHRQ $1, LEN   // LEN = floor( TAIL / 2 )
   113  	JZ   tail_one  // if TAIL == 0 { goto tail }
   114  
   115  tail_two: // do {
   116  	MOVUPS (X_PTR)(IDX*8), X2   // X2 = x[i]
   117  	MULPD  ALPHA, X2            // X2 *= a
   118  	ADDPD  (Y_PTR)(IDX*8), X2   // X2 += y[i]
   119  	MOVUPS X2, (DST_PTR)(IDX*8) // y[i] = X2
   120  	ADDQ   $2, IDX              // i += 2
   121  	DECQ   LEN
   122  	JNZ    tail_two             // } while --LEN > 0
   123  
   124  	ANDQ $1, TAIL
   125  	JZ   end      // if TAIL == 0 { goto end }
   126  
   127  tail_one:
   128  	MOVSD (X_PTR)(IDX*8), X2   // X2 = x[i]
   129  	MULSD ALPHA, X2            // X2 *= a
   130  	ADDSD (Y_PTR)(IDX*8), X2   // X2 += y[i]
   131  	MOVSD X2, (DST_PTR)(IDX*8) // y[i] = X2
   132  
   133  end:
   134  	RET