github.com/erda-project/erda-infra@v1.0.9/providers/component-protocol/protocol/render_parallel.go (about) 1 // Copyright (c) 2021 Terminus, Inc. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package protocol 16 17 import ( 18 "bytes" 19 "context" 20 "fmt" 21 "io" 22 "strings" 23 "sync" 24 25 "github.com/sirupsen/logrus" 26 27 "github.com/erda-project/erda-infra/providers/component-protocol/cptype" 28 "github.com/erda-project/erda-infra/providers/component-protocol/utils/cputil" 29 ) 30 31 // Node used for parallel rendering. 32 type Node struct { 33 Name string 34 Parallel bool 35 36 NextNodes []*Node 37 nextNodesByName map[string]*Node 38 PreviousNode *Node 39 40 BindingStates []cptype.RendingState 41 42 doneNextNodesByName map[string]*Node 43 } 44 45 func (n *Node) toRendingItem() cptype.RendingItem { 46 return cptype.RendingItem{Name: n.Name, State: n.BindingStates} 47 } 48 49 func printIndent(w io.Writer, repeat int) { 50 if repeat == 0 { 51 repeat = 1 52 } 53 for i := 0; i < repeat; i++ { 54 fmt.Fprintf(w, " ") 55 } 56 } 57 func printNode(w io.Writer, n *Node) { 58 if n.Parallel { 59 fmt.Fprintf(w, "[P] %s\n", n.Name) 60 } else { 61 fmt.Fprintf(w, "[S] %s\n", n.Name) 62 } 63 } 64 65 func (n *Node) String() string { 66 w := new(bytes.Buffer) 67 fmt.Fprintf(w, "root: %s\n", n.Name) 68 depth := 1 69 n.printNexts(w, depth) 70 return w.String() 71 } 72 func (n *Node) printNexts(w io.Writer, depth int) { 73 for _, next := range n.NextNodes { 74 printIndent(w, depth*2) 75 printNode(w, next) 76 next.printNexts(w, depth+1) 77 } 78 } 79 80 func makeSerialNode(item cptype.RendingItem) *Node { 81 return &Node{Name: item.Name, Parallel: false, BindingStates: item.State, doneNextNodesByName: map[string]*Node{}} 82 } 83 func (n *Node) addNext(next *Node) { 84 // set next 85 n.NextNodes = append(n.NextNodes, next) 86 if n.nextNodesByName == nil { 87 n.nextNodesByName = make(map[string]*Node) 88 } 89 n.nextNodesByName[next.Name] = next 90 // set previous 91 next.PreviousNode = n 92 } 93 func removeOneNode(nodes *[]*Node, removeNodeName string) { 94 index := -1 95 for i, node := range *nodes { 96 if node.Name == removeNodeName { 97 index = i 98 break 99 } 100 } 101 if index == -1 { 102 return 103 } 104 *nodes = append((*nodes)[:index], (*nodes)[index+1:]...) 105 } 106 func (n *Node) cutOffPrevious() { 107 previousNode := n.PreviousNode 108 if previousNode == nil { 109 return 110 } 111 112 // cut off from node's previous node 113 n.PreviousNode = nil 114 115 // cut off from previous node's next node 116 delete(previousNode.nextNodesByName, n.Name) 117 removeOneNode(&previousNode.NextNodes, n.Name) 118 } 119 func (n *Node) linkSubParallelNode(subNode *Node) { 120 // set parallel to true 121 subNode.Parallel = true 122 // link as serial 123 n.linkSubSerialNode(subNode) 124 } 125 126 func (n *Node) linkSubSerialNode(subNode *Node) { 127 // find index that subNode should be put into 128 subNodeIndex := -1 129 for i, nextNode := range n.NextNodes { 130 if nextNode.Name == subNode.Name { 131 subNodeIndex = i 132 break 133 } 134 } 135 // not found, append to end 136 if subNodeIndex == -1 { 137 // first drop subNode's original link 138 subNode.cutOffPrevious() 139 // then add to node's nextNodes 140 n.addNext(subNode) 141 } 142 } 143 144 func parseParallelRendering(p *cptype.ComponentProtocol, compRenderingItems []cptype.RendingItem) (*Node, error) { 145 if len(compRenderingItems) == 0 { 146 return nil, nil 147 } 148 149 // link all nodes according to compRenderingItem's serial-order 150 nodesMap := make(map[string]*Node) 151 var rootNode *Node 152 var lastNode *Node 153 for _, item := range compRenderingItems { 154 // make new serial node 155 node := makeSerialNode(item) 156 // add to nodes map 157 nodesMap[node.Name] = node 158 // set root node 159 if lastNode == nil { 160 rootNode = node 161 } else { 162 // link node with previous 163 lastNode.addNext(node) 164 } 165 // set current node as lastNode 166 lastNode = node 167 continue 168 } 169 170 // link again according to hierarchy structure 171 for nodeName, v := range p.Hierarchy.Structure { 172 var subCompNames []string 173 if err := cputil.ObjJSONTransfer(&v, &subCompNames); err != nil { 174 continue 175 } 176 node, ok := nodesMap[nodeName] 177 if !ok { 178 continue 179 } 180 for _, subNodeName := range subCompNames { 181 // set subNode's previous again 182 subNode, ok := nodesMap[subNodeName] 183 if !ok { 184 continue 185 } 186 node.linkSubSerialNode(subNode) 187 } 188 } 189 190 // link all nodes again according to hierarchy.Parallel definition 191 parallelDef := p.Hierarchy.Parallel 192 if parallelDef == nil { 193 return rootNode, nil 194 } 195 for parentNodeName, subParallelNodeNames := range parallelDef { 196 // check firstly 197 parentNode, ok := nodesMap[parentNodeName] 198 if !ok { 199 continue 200 } 201 for _, subNodeName := range subParallelNodeNames { 202 // check firstly 203 subNode, ok := nodesMap[subNodeName] 204 if !ok { 205 continue 206 } 207 // link parent and sub node 208 parentNode.linkSubParallelNode(subNode) 209 } 210 } 211 212 return rootNode, nil 213 } 214 215 func renderFromNode(ctx context.Context, req *cptype.ComponentProtocolRequest, sr ScenarioRender, node *Node) error { 216 // render itself 217 if err := renderOneNode(ctx, req, sr, node); err != nil { 218 return err 219 } 220 221 // continue render until done 222 i := 0 223 for { 224 if i > 50 { 225 return fmt.Errorf("abnormal render next nodes, over 50 times, force stop") 226 } 227 if len(node.doneNextNodesByName) == len(node.NextNodes) { 228 break 229 } 230 // render next nodes 231 if err := node.renderNextNodes(ctx, req, sr); err != nil { 232 return err 233 } 234 i++ 235 } 236 237 return nil 238 } 239 240 func (n *Node) renderNextNodes(ctx context.Context, req *cptype.ComponentProtocolRequest, sr ScenarioRender) error { 241 // render next nodes 242 renderableNodes := n.calcRenderableNextNodes() 243 if len(renderableNodes) == 0 { 244 return nil 245 } 246 printRenderableNodes(renderableNodes) 247 var wg sync.WaitGroup 248 var errorMsgs []string 249 for _, nextNode := range renderableNodes { 250 wg.Add(1) 251 go func(nextNode *Node) { 252 logrus.Infof("begin render node: %s", nextNode.Name) 253 defer logrus.Infof("end render node: %s", nextNode.Name) 254 defer wg.Done() 255 256 if err := renderFromNode(ctx, req, sr, nextNode); err != nil { 257 errorMsgs = append(errorMsgs, err.Error()) 258 } 259 }(nextNode) 260 } 261 wg.Wait() 262 if len(errorMsgs) > 0 { 263 return fmt.Errorf(strings.Join(errorMsgs, ", ")) 264 } 265 return nil 266 } 267 268 func printRenderableNodes(nodes []*Node) { 269 var nodeNames []string 270 for _, node := range nodes { 271 nodeNames = append(nodeNames, node.Name) 272 } 273 switch len(nodeNames) { 274 case 0: 275 return 276 case 1: 277 logrus.Infof("[S] serial renderable node: %s", strings.Join(nodeNames, ", ")) 278 default: 279 logrus.Infof("[P] parallel renderable nodes: %s", strings.Join(nodeNames, ", ")) 280 } 281 } 282 283 func (n *Node) calcRenderableNextNodes() []*Node { 284 if n.doneNextNodesByName == nil { 285 n.doneNextNodesByName = make(map[string]*Node) 286 } 287 var renderableNodes []*Node 288 defer func() { 289 for _, node := range renderableNodes { 290 n.doneNextNodesByName[node.Name] = node 291 } 292 }() 293 // get from nextNodes by order 294 for _, next := range n.NextNodes { 295 // skip already done 296 if _, done := n.doneNextNodesByName[next.Name]; done { 297 continue 298 } 299 // add if empty 300 if len(renderableNodes) == 0 { 301 renderableNodes = append(renderableNodes, next) 302 continue 303 } 304 305 // if first is serial, stop until next serial node come(exclude) 306 // s->p->s => s->p 307 // s->s => s 308 // p->p->s => p->p 309 // p->s => p 310 if !next.Parallel { 311 return renderableNodes 312 } 313 314 // add 315 renderableNodes = append(renderableNodes, next) 316 } 317 return renderableNodes 318 } 319 320 func renderOneNode(ctx context.Context, req *cptype.ComponentProtocolRequest, sr ScenarioRender, node *Node) error { 321 return renderOneComp(ctx, req, sr, node.toRendingItem()) 322 }