github.com/cloudwego/frugal@v0.1.15/internal/atm/pgen/pgen_regabi_amd64.go (about)

     1  // +build go1.17
     2  
     3  /*
     4   * Copyright 2022 ByteDance Inc.
     5   *
     6   * Licensed under the Apache License, Version 2.0 (the "License");
     7   * you may not use this file except in compliance with the License.
     8   * You may obtain a copy of the License at
     9   *
    10   *     http://www.apache.org/licenses/LICENSE-2.0
    11   *
    12   * Unless required by applicable law or agreed to in writing, software
    13   * distributed under the License is distributed on an "AS IS" BASIS,
    14   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    15   * See the License for the specific language governing permissions and
    16   * limitations under the License.
    17   */
    18  
    19  package pgen
    20  
    21  import (
    22      `github.com/cloudwego/iasm/x86_64`
    23      `github.com/cloudwego/frugal/internal/atm/hir`
    24      `github.com/cloudwego/frugal/internal/atm/rtx`
    25  )
    26  
    27  /** Stack Checking **/
    28  
    29  const (
    30      _M_memcpyargs  = 0
    31      _G_stackguard0 = 0x10
    32  )
    33  
    34  func (self *CodeGen) abiStackCheck(p *x86_64.Program, to *x86_64.Label, sp uintptr) {
    35      p.LEAQ (Ptr(RSP, -self.ctxt.size() - int32(sp)), R12)
    36      p.CMPQ (Ptr(R14, _G_stackguard0), R12)
    37      p.JBE  (to)
    38  }
    39  
    40  /** Efficient Block Copy Algorithm **/
    41  
    42  var memcpyargs = [256]bool {
    43      RAX: true,
    44      RBX: true,
    45      RCX: true,
    46  }
    47  
    48  func (self *CodeGen) abiBlockCopy(p *x86_64.Program, pd hir.PointerRegister, ps hir.PointerRegister, nb hir.GenericRegister) {
    49      rd := self.r(pd)
    50      rs := self.r(ps)
    51      rl := self.r(nb)
    52  
    53      /* save all the registers, if they will be clobbered */
    54      for _, lr := range self.ctxt.regs {
    55          if rr := self.r(lr); rtx.R_memmove[rr] || memcpyargs[rr] {
    56              p.MOVQ(rr, self.ctxt.slot(lr))
    57          }
    58      }
    59  
    60      /* enumerate different register cases */
    61      switch {
    62          case rs == RBX && rl == RCX : p.MOVQ(rd, RAX)
    63          case rs == RBX && rl != RCX : p.MOVQ(rd, RAX); p.MOVQ  (rl, RCX)
    64          case rs != RBX && rl == RCX : p.MOVQ(rd, RAX); p.MOVQ  (rs, RBX)
    65          case rs == RCX && rl == RBX : p.MOVQ(rd, RAX); p.XCHGQ (RBX, RCX)
    66          case rs == RCX && rl != RBX : p.MOVQ(rd, RAX); p.MOVQ  (RCX, RBX); p.MOVQ(rl, RCX)
    67          case rs != RCX && rl == RBX : p.MOVQ(rd, RAX); p.MOVQ  (RBX, RCX); p.MOVQ(rs, RBX)
    68          default                     : p.MOVQ(rd, RAX); p.MOVQ  (rs, RBX);  p.MOVQ(rl, RCX)
    69      }
    70  
    71      /* call the function */
    72      p.MOVQ(uintptr(rtx.F_memmove), RDI)
    73      p.CALLQ(RDI)
    74  
    75      /* restore all the registers, if they were clobbered */
    76      for _, lr := range self.ctxt.regs {
    77          if rr := self.r(lr); rtx.R_memmove[rr] || memcpyargs[rr] {
    78              p.MOVQ(self.ctxt.slot(lr), rr)
    79          }
    80      }
    81  }