github.com/cloudwego/frugal@v0.1.15/internal/atm/ssa/pass_lowering_amd64.go (about)

     1  /*
     2   * Copyright 2022 ByteDance Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package ssa
    18  
    19  // Lowering lowers generic SSA IR to arch-dependent SSA IR.
    20  type Lowering struct{}
    21  
    22  func (Lowering) Apply(cfg *CFG) {
    23      cfg.PostOrder().ForEach(func(bb *BasicBlock) {
    24          ins := bb.Ins
    25          bb.Ins = make([]IrNode, 0, len(ins))
    26  
    27          /* lower every instruction */
    28          for _, v := range ins {
    29              switch p := v.(type) {
    30                  default: {
    31                      bb.Ins = append(bb.Ins, p)
    32                  }
    33  
    34                  /* load from memory */
    35                  case *IrLoad: {
    36                      bb.Ins = append(bb.Ins, &IrAMD64_MOV_load {
    37                          R: p.R,
    38                          N: p.Size,
    39                          M: Mem {
    40                              M: p.Mem,
    41                              I: Rz,
    42                              S: 1,
    43                              D: 0,
    44                          },
    45                      })
    46                  }
    47  
    48                  /* store into memory */
    49                  case *IrStore: {
    50                      bb.Ins = append(bb.Ins, &IrAMD64_MOV_store_r {
    51                          R: p.R,
    52                          N: p.Size,
    53                          M: Mem {
    54                              M: p.Mem,
    55                              I: Rz,
    56                              S: 1,
    57                              D: 0,
    58                          },
    59                      })
    60                  }
    61  
    62                  /* load constant into register */
    63                  case *IrConstInt: {
    64                      bb.Ins = append(bb.Ins, &IrAMD64_MOV_abs {
    65                          R: p.R,
    66                          V: p.V,
    67                      })
    68                  }
    69  
    70                  /* load pointer constant into register */
    71                  case *IrConstPtr: {
    72                      bb.Ins = append(bb.Ins, &IrAMD64_MOV_ptr {
    73                          R: p.R,
    74                          P: p.P,
    75                      })
    76                  }
    77  
    78                  /* load effective address */
    79                  case *IrLEA: {
    80                      bb.Ins = append(bb.Ins, &IrAMD64_LEA {
    81                          R: p.R,
    82                          M: Mem {
    83                              M: p.Mem,
    84                              I: p.Off,
    85                              S: 1,
    86                              D: 0,
    87                          },
    88                      })
    89                  }
    90  
    91                  /* unary operators */
    92                  case *IrUnaryExpr: {
    93                      switch p.Op {
    94                          case IrOpNegate   : bb.Ins = append(bb.Ins, &IrAMD64_NEG    { R: p.R, V: p.V })
    95                          case IrOpSwap16   : bb.Ins = append(bb.Ins, &IrAMD64_BSWAP  { R: p.R, V: p.V, N: 2 })
    96                          case IrOpSwap32   : bb.Ins = append(bb.Ins, &IrAMD64_BSWAP  { R: p.R, V: p.V, N: 4 })
    97                          case IrOpSwap64   : bb.Ins = append(bb.Ins, &IrAMD64_BSWAP  { R: p.R, V: p.V, N: 8 })
    98                          case IrOpSx32to64 : bb.Ins = append(bb.Ins, &IrAMD64_MOVSLQ { R: p.R, V: p.V })
    99                          default           : panic("unreachable")
   100                      }
   101                  }
   102  
   103                  /* binary operators */
   104                  case *IrBinaryExpr: {
   105                      switch p.Op {
   106                          case IrOpAdd  : bb.Ins = append(bb.Ins, &IrAMD64_BinOp_rr { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_BinAdd })
   107                          case IrOpSub  : bb.Ins = append(bb.Ins, &IrAMD64_BinOp_rr { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_BinSub })
   108                          case IrOpMul  : bb.Ins = append(bb.Ins, &IrAMD64_BinOp_rr { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_BinMul })
   109                          case IrOpAnd  : bb.Ins = append(bb.Ins, &IrAMD64_BinOp_rr { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_BinAnd })
   110                          case IrOpOr   : bb.Ins = append(bb.Ins, &IrAMD64_BinOp_rr { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_BinOr  })
   111                          case IrOpXor  : bb.Ins = append(bb.Ins, &IrAMD64_BinOp_rr { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_BinXor })
   112                          case IrOpShr  : bb.Ins = append(bb.Ins, &IrAMD64_BinOp_rr { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_BinShr })
   113                          case IrCmpEq  : bb.Ins = append(bb.Ins, &IrAMD64_CMPQ_rr  { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_CmpEq  })
   114                          case IrCmpNe  : bb.Ins = append(bb.Ins, &IrAMD64_CMPQ_rr  { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_CmpNe  })
   115                          case IrCmpLt  : bb.Ins = append(bb.Ins, &IrAMD64_CMPQ_rr  { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_CmpLt  })
   116                          case IrCmpLtu : bb.Ins = append(bb.Ins, &IrAMD64_CMPQ_rr  { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_CmpLtu })
   117                          case IrCmpGeu : bb.Ins = append(bb.Ins, &IrAMD64_CMPQ_rr  { R: p.R, X: p.X, Y: p.Y, Op: IrAMD64_CmpGeu })
   118                          default       : panic("unreachable")
   119                      }
   120                  }
   121  
   122                  /* bit test and set */
   123                  case *IrBitTestSet: {
   124                      bb.Ins = append(bb.Ins, &IrAMD64_BTSQ_rr {
   125                          T: p.T,
   126                          S: p.S,
   127                          X: p.X,
   128                          Y: p.Y,
   129                      })
   130                  }
   131  
   132                  /* breakpoint */
   133                  case *IrBreakpoint: {
   134                      bb.Ins = append(bb.Ins, &IrAMD64_INT { 3 })
   135                  }
   136              }
   137          }
   138  
   139          /* lower the terminator */
   140          switch p := bb.Term.(type) {
   141              default: {
   142                  panic("invalid terminator: " + bb.Term.String())
   143              }
   144  
   145              /* branch terminator */
   146              case *IrSwitch: {
   147                  switch t := p.iter().t; len(p.Br) {
   148                      case 0  : bb.Term = &IrAMD64_JMP    { To: p.Ln }
   149                      case 1  : bb.Term = &IrAMD64_Jcc_ri { X: p.V, Y: t[0].i, To: p.Br[t[0].i], Ln: p.Ln, Op: IrAMD64_CmpEq }
   150                      default : break
   151                  }
   152              }
   153  
   154              /* return terminator, ABI specific, will be lowered in later pass */
   155              case *IrReturn: {
   156                  break
   157              }
   158          }
   159      })
   160  }