github.com/cloudwego/frugal@v0.1.15/internal/atm/ssa/pass_mbarrier_amd64.go (about)

     1  /*
     2   * Copyright 2022 ByteDance Inc.
     3   *
     4   * Licensed under the Apache License, Version 2.0 (the "License");
     5   * you may not use this file except in compliance with the License.
     6   * You may obtain a copy of the License at
     7   *
     8   *     http://www.apache.org/licenses/LICENSE-2.0
     9   *
    10   * Unless required by applicable law or agreed to in writing, software
    11   * distributed under the License is distributed on an "AS IS" BASIS,
    12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13   * See the License for the specific language governing permissions and
    14   * limitations under the License.
    15   */
    16  
    17  package ssa
    18  
    19  import (
    20      `sort`
    21      `unsafe`
    22  
    23      `github.com/cloudwego/frugal/internal/atm/abi`
    24      `github.com/cloudwego/frugal/internal/rt`
    25  )
    26  
    27  // WriteBarrier inserts write barriers for pointer stores.
    28  type WriteBarrier struct{}
    29  
    30  func (WriteBarrier) Apply(cfg *CFG) {
    31      more := true
    32      mbir := make(map[*BasicBlock]int)
    33      ptrs := make(map[Reg]unsafe.Pointer)
    34  
    35      /* find all constant pointers */
    36      cfg.PostOrder().ForEach(func(bb *BasicBlock) {
    37          for _, v := range bb.Ins {
    38              if p, ok := v.(*IrAMD64_MOV_ptr); ok {
    39                  ptrs[p.R] = p.P
    40              }
    41          }
    42      })
    43  
    44      /* loop until no more write barriers */
    45      for more {
    46          more = false
    47          rt.MapClear(mbir)
    48  
    49          /* Phase 1: Find all the memory barriers and pointer constants */
    50          cfg.PostOrder().ForEach(func(bb *BasicBlock) {
    51              for i, v := range bb.Ins {
    52                  if _, ok := v.(*IrWriteBarrier); ok {
    53                      if _, ok = mbir[bb]; ok {
    54                          more = true
    55                      } else {
    56                          mbir[bb] = i
    57                      }
    58                  }
    59              }
    60          })
    61  
    62          /* split pair buffer */
    63          nb := len(mbir)
    64          mb := make([]Pos, 0, nb)
    65  
    66          /* extract from the map */
    67          for p, i := range mbir {
    68              mb = append(mb, pos(p, i))
    69          }
    70  
    71          /* sort by block ID */
    72          sort.Slice(mb, func(i int, j int) bool {
    73              return mb[i].isPriorTo(mb[j])
    74          })
    75  
    76          /* Phase 2: Split basic block at write barrier */
    77          for _, p := range mb {
    78              bb := cfg.CreateBlock()
    79              ds := cfg.CreateBlock()
    80              wb := cfg.CreateBlock()
    81              ir := p.B.Ins[p.I].(*IrWriteBarrier)
    82  
    83              /* move instructions after the write barrier into a new block */
    84              bb.Ins  = p.B.Ins[p.I + 1:]
    85              bb.Term = p.B.Term
    86              bb.Pred = []*BasicBlock { ds, wb }
    87  
    88              /* update all the predecessors & Phi nodes */
    89              for it := p.B.Term.Successors(); it.Next(); {
    90                  succ := it.Block()
    91                  pred := succ.Pred
    92  
    93                  /* update predecessors */
    94                  for x, v := range pred {
    95                      if v == p.B {
    96                          pred[x] = bb
    97                          break
    98                      }
    99                  }
   100  
   101                  /* update Phi nodes */
   102                  for _, phi := range succ.Phi {
   103                      phi.V[bb] = phi.V[p.B]
   104                      delete(phi.V, p.B)
   105                  }
   106              }
   107  
   108              /* rewrite the direct store instruction */
   109              st := &IrAMD64_MOV_store_r {
   110                  R: ir.R,
   111                  M: Ptr(ir.M, 0),
   112                  N: abi.PtrSize,
   113              }
   114  
   115              /* construct the direct store block */
   116              ds.Ins  = []IrNode { st }
   117              ds.Term = &IrAMD64_JMP { To: IrLikely(bb) }
   118              ds.Pred = []*BasicBlock { p.B}
   119  
   120              /* rewrite the write barrier instruction */
   121              fn := &IrAMD64_CALL_gcwb {
   122                  R  : ir.R,
   123                  M  : ir.M,
   124                  Fn : ptrs[ir.Fn],
   125              }
   126  
   127              /* function address must exist */
   128              if fn.Fn == nil {
   129                  panic("missing write barrier function address")
   130              }
   131  
   132              /* construct the write barrier block */
   133              wb.Ins  = []IrNode { fn }
   134              wb.Term = &IrAMD64_JMP { To: IrLikely(bb) }
   135              wb.Pred = []*BasicBlock { p.B}
   136  
   137              /* rewrite the terminator to check for write barrier */
   138              p.B.Ins  = p.B.Ins[:p.I]
   139              p.B.Term = &IrAMD64_Jcc_mi {
   140                  X  : Ptr(ir.Var, 0),
   141                  Y  : 0,
   142                  N  : 1,
   143                  To : IrUnlikely(wb),
   144                  Ln : IrLikely(ds),
   145                  Op : IrAMD64_CmpNe,
   146              }
   147          }
   148  
   149          /* Phase 3: Rebuild the CFG */
   150          if len(mbir) != 0 {
   151              cfg.Rebuild()
   152          }
   153      }
   154  }