github.com/matrixorigin/matrixone@v1.2.0/pkg/proxy/handler.go (about) 1 // Copyright 2021 - 2023 Matrix Origin 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package proxy 16 17 import ( 18 "context" 19 "fmt" 20 "net" 21 22 "github.com/fagongzi/goetty/v2" 23 "github.com/matrixorigin/matrixone/pkg/clusterservice" 24 "github.com/matrixorigin/matrixone/pkg/common/log" 25 "github.com/matrixorigin/matrixone/pkg/common/moerr" 26 "github.com/matrixorigin/matrixone/pkg/common/runtime" 27 "github.com/matrixorigin/matrixone/pkg/common/stopper" 28 "github.com/matrixorigin/matrixone/pkg/logservice" 29 v2 "github.com/matrixorigin/matrixone/pkg/util/metric/v2" 30 "go.uber.org/zap" 31 ) 32 33 // handler is the proxy service handler. 34 type handler struct { 35 ctx context.Context 36 logger *log.MOLogger 37 config Config 38 stopper *stopper.Stopper 39 // moCluster is the CN server cache, and is used to filter 40 // CN servers by label. 41 moCluster clusterservice.MOCluster 42 // router select the best CN server and connects to it. 43 router Router 44 // rebalancer is the global rebalancer. 45 rebalancer *rebalancer 46 // counterSet counts the events in proxy. 47 counterSet *counterSet 48 // haKeeperClient is the client to communicate with HAKeeper. 49 haKeeperClient logservice.ProxyHAKeeperClient 50 // ipNetList is the list of ip net, which is parsed from CIDRs. 51 ipNetList []*net.IPNet 52 } 53 54 var ErrNoAvailableCNServers = moerr.NewInternalErrorNoCtx("no available CN servers") 55 56 // newProxyHandler creates a new proxy handler. 57 func newProxyHandler( 58 ctx context.Context, 59 rt runtime.Runtime, 60 cfg Config, 61 st *stopper.Stopper, 62 cs *counterSet, 63 haKeeperClient logservice.ProxyHAKeeperClient, 64 ) (*handler, error) { 65 // Create the MO cluster. 66 mc := clusterservice.NewMOCluster(haKeeperClient, cfg.Cluster.RefreshInterval.Duration) 67 rt.SetGlobalVariables(runtime.ClusterService, mc) 68 69 // Create the rebalancer. 70 var opts []rebalancerOption 71 opts = append(opts, 72 withRebalancerInterval(cfg.RebalanceInterval.Duration), 73 withRebalancerTolerance(cfg.RebalanceTolerance), 74 ) 75 if cfg.RebalanceDisabled { 76 opts = append(opts, withRebalancerDisabled()) 77 } 78 79 re, err := newRebalancer(st, rt.Logger(), mc, opts...) 80 if err != nil { 81 return nil, err 82 } 83 84 ru := newRouter(mc, re, false, 85 withConnectTimeout(cfg.ConnectTimeout.Duration), 86 withAuthTimeout(cfg.AuthTimeout.Duration), 87 ) 88 // Decorate the router if plugin is enabled 89 if cfg.Plugin != nil { 90 p, err := newRPCPlugin(cfg.Plugin.Backend, cfg.Plugin.Timeout) 91 if err != nil { 92 return nil, err 93 } 94 ru = newPluginRouter(ru, p) 95 } 96 97 var ipNetList []*net.IPNet 98 for _, cidr := range cfg.InternalCIDRs { 99 _, ipNet, err := net.ParseCIDR(cidr) 100 if err != nil { 101 rt.Logger().Error("failed to parse CIDR", 102 zap.String("CIDR", cidr), 103 zap.Error(err)) 104 } else { 105 ipNetList = append(ipNetList, ipNet) 106 } 107 } 108 return &handler{ 109 ctx: ctx, 110 logger: rt.Logger(), 111 config: cfg, 112 stopper: st, 113 moCluster: mc, 114 counterSet: cs, 115 router: ru, 116 rebalancer: re, 117 haKeeperClient: haKeeperClient, 118 ipNetList: ipNetList, 119 }, nil 120 } 121 122 // handle handles the incoming connection. 123 func (h *handler) handle(c goetty.IOSession) error { 124 h.logger.Info("new connection comes", zap.Uint64("session ID", c.ID())) 125 v2.ProxyConnectAcceptedCounter.Inc() 126 h.counterSet.connAccepted.Add(1) 127 h.counterSet.connTotal.Add(1) 128 defer func() { 129 v2.ProxyConnectCurrentCounter.Inc() 130 h.counterSet.connTotal.Add(-1) 131 }() 132 133 // Create a new tunnel to manage client connection and server connection. 134 t := newTunnel(h.ctx, h.logger, h.counterSet, 135 withRebalancePolicy(RebalancePolicyMapping[h.config.RebalancePolicy]), 136 withRebalancer(h.rebalancer), 137 ) 138 defer func() { 139 _ = t.Close() 140 }() 141 142 cc, err := newClientConn( 143 h.ctx, 144 &h.config, 145 h.logger, 146 h.counterSet, 147 c, 148 h.haKeeperClient, 149 h.moCluster, 150 h.router, 151 t, 152 h.ipNetList, 153 ) 154 if err != nil { 155 h.logger.Error("failed to create client conn", zap.Error(err)) 156 return err 157 } 158 h.logger.Debug("client conn created") 159 defer func() { _ = cc.Close() }() 160 161 // client builds connections with a best CN server and returns 162 // the server connection. 163 sc, err := cc.BuildConnWithServer("") 164 if err != nil { 165 if isConnEndErr(err) { 166 return nil 167 } 168 h.logger.Error("failed to create server conn", zap.Error(err)) 169 h.counterSet.updateWithErr(err) 170 cc.SendErrToClient(err) 171 return err 172 } 173 h.logger.Debug("server conn created") 174 defer func() { _ = sc.Close() }() 175 176 h.logger.Info("build connection", 177 zap.String("client->proxy", fmt.Sprintf("%s -> %s", cc.RawConn().RemoteAddr(), cc.RawConn().LocalAddr())), 178 zap.String("proxy->server", fmt.Sprintf("%s -> %s", sc.RawConn().LocalAddr(), sc.RawConn().RemoteAddr())), 179 zap.Uint32("conn ID", cc.ConnID()), 180 zap.Uint64("session ID", c.ID()), 181 ) 182 183 st := stopper.NewStopper("proxy-conn-handle", stopper.WithLogger(h.logger.RawLogger())) 184 defer st.Stop() 185 // Starts the event handler go-routine to handle the events comes from tunnel data flow, 186 // such as, kill connection event. 187 if err := st.RunNamedTask("event-handler", func(ctx context.Context) { 188 for { 189 select { 190 case e := <-t.reqC: 191 if err := cc.HandleEvent(ctx, e, t.respC); err != nil { 192 h.logger.Error("failed to handle event", 193 zap.Any("event", e), zap.Error(err)) 194 } 195 case r := <-t.respC: 196 if len(r) > 0 { 197 t.mu.Lock() 198 // We must call this method because it locks writeMu. 199 if err := t.mu.serverConn.writeDataDirectly(cc.RawConn(), r); err != nil { 200 h.logger.Error("failed to write event response", 201 zap.Any("response", r), zap.Error(err)) 202 } 203 t.mu.Unlock() 204 } 205 case <-ctx.Done(): 206 h.logger.Debug("event handler stopped.") 207 return 208 } 209 } 210 }); err != nil { 211 return err 212 } 213 214 if err := t.run(cc, sc); err != nil { 215 return err 216 } 217 218 select { 219 case <-h.ctx.Done(): 220 return h.ctx.Err() 221 case err := <-t.errC: 222 if isEOFErr(err) || isConnEndErr(err) { 223 return nil 224 } 225 h.counterSet.updateWithErr(err) 226 h.logger.Error("proxy handle error", zap.Error(err)) 227 return err 228 } 229 } 230 231 // Close closes the handler. 232 func (h *handler) Close() error { 233 if h != nil { 234 h.moCluster.Close() 235 _ = h.haKeeperClient.Close() 236 } 237 return nil 238 }