github.com/xraypb/Xray-core@v1.8.1/common/mux/client.go (about) 1 package mux 2 3 import ( 4 "context" 5 "io" 6 "sync" 7 "time" 8 9 "github.com/xraypb/Xray-core/common" 10 "github.com/xraypb/Xray-core/common/buf" 11 "github.com/xraypb/Xray-core/common/errors" 12 "github.com/xraypb/Xray-core/common/net" 13 "github.com/xraypb/Xray-core/common/protocol" 14 "github.com/xraypb/Xray-core/common/session" 15 "github.com/xraypb/Xray-core/common/signal/done" 16 "github.com/xraypb/Xray-core/common/task" 17 "github.com/xraypb/Xray-core/common/xudp" 18 "github.com/xraypb/Xray-core/proxy" 19 "github.com/xraypb/Xray-core/transport" 20 "github.com/xraypb/Xray-core/transport/internet" 21 "github.com/xraypb/Xray-core/transport/pipe" 22 ) 23 24 type ClientManager struct { 25 Enabled bool // wheather mux is enabled from user config 26 Picker WorkerPicker 27 Only uint32 28 } 29 30 func (m *ClientManager) Dispatch(ctx context.Context, link *transport.Link) error { 31 for i := 0; i < 16; i++ { 32 worker, err := m.Picker.PickAvailable() 33 if err != nil { 34 return err 35 } 36 if worker.Dispatch(ctx, link) { 37 return nil 38 } 39 } 40 41 return newError("unable to find an available mux client").AtWarning() 42 } 43 44 type WorkerPicker interface { 45 PickAvailable() (*ClientWorker, error) 46 } 47 48 type IncrementalWorkerPicker struct { 49 Factory ClientWorkerFactory 50 51 access sync.Mutex 52 workers []*ClientWorker 53 cleanupTask *task.Periodic 54 } 55 56 func (p *IncrementalWorkerPicker) cleanupFunc() error { 57 p.access.Lock() 58 defer p.access.Unlock() 59 60 if len(p.workers) == 0 { 61 return newError("no worker") 62 } 63 64 p.cleanup() 65 return nil 66 } 67 68 func (p *IncrementalWorkerPicker) cleanup() { 69 var activeWorkers []*ClientWorker 70 for _, w := range p.workers { 71 if !w.Closed() { 72 activeWorkers = append(activeWorkers, w) 73 } 74 } 75 p.workers = activeWorkers 76 } 77 78 func (p *IncrementalWorkerPicker) findAvailable() int { 79 for idx, w := range p.workers { 80 if !w.IsFull() { 81 return idx 82 } 83 } 84 85 return -1 86 } 87 88 func (p *IncrementalWorkerPicker) pickInternal() (*ClientWorker, bool, error) { 89 p.access.Lock() 90 defer p.access.Unlock() 91 92 idx := p.findAvailable() 93 if idx >= 0 { 94 n := len(p.workers) 95 if n > 1 && idx != n-1 { 96 p.workers[n-1], p.workers[idx] = p.workers[idx], p.workers[n-1] 97 } 98 return p.workers[idx], false, nil 99 } 100 101 p.cleanup() 102 103 worker, err := p.Factory.Create() 104 if err != nil { 105 return nil, false, err 106 } 107 p.workers = append(p.workers, worker) 108 109 if p.cleanupTask == nil { 110 p.cleanupTask = &task.Periodic{ 111 Interval: time.Second * 30, 112 Execute: p.cleanupFunc, 113 } 114 } 115 116 return worker, true, nil 117 } 118 119 func (p *IncrementalWorkerPicker) PickAvailable() (*ClientWorker, error) { 120 worker, start, err := p.pickInternal() 121 if start { 122 common.Must(p.cleanupTask.Start()) 123 } 124 125 return worker, err 126 } 127 128 type ClientWorkerFactory interface { 129 Create() (*ClientWorker, error) 130 } 131 132 type DialingWorkerFactory struct { 133 Proxy proxy.Outbound 134 Dialer internet.Dialer 135 Strategy ClientStrategy 136 } 137 138 func (f *DialingWorkerFactory) Create() (*ClientWorker, error) { 139 opts := []pipe.Option{pipe.WithSizeLimit(64 * 1024)} 140 uplinkReader, upLinkWriter := pipe.New(opts...) 141 downlinkReader, downlinkWriter := pipe.New(opts...) 142 143 c, err := NewClientWorker(transport.Link{ 144 Reader: downlinkReader, 145 Writer: upLinkWriter, 146 }, f.Strategy) 147 if err != nil { 148 return nil, err 149 } 150 151 go func(p proxy.Outbound, d internet.Dialer, c common.Closable) { 152 ctx := session.ContextWithOutbound(context.Background(), &session.Outbound{ 153 Target: net.TCPDestination(muxCoolAddress, muxCoolPort), 154 }) 155 ctx, cancel := context.WithCancel(ctx) 156 157 if err := p.Process(ctx, &transport.Link{Reader: uplinkReader, Writer: downlinkWriter}, d); err != nil { 158 errors.New("failed to handler mux client connection").Base(err).WriteToLog() 159 } 160 common.Must(c.Close()) 161 cancel() 162 }(f.Proxy, f.Dialer, c.done) 163 164 return c, nil 165 } 166 167 type ClientStrategy struct { 168 MaxConcurrency uint32 169 MaxConnection uint32 170 } 171 172 type ClientWorker struct { 173 sessionManager *SessionManager 174 link transport.Link 175 done *done.Instance 176 strategy ClientStrategy 177 } 178 179 var ( 180 muxCoolAddress = net.DomainAddress("v1.mux.cool") 181 muxCoolPort = net.Port(9527) 182 ) 183 184 // NewClientWorker creates a new mux.Client. 185 func NewClientWorker(stream transport.Link, s ClientStrategy) (*ClientWorker, error) { 186 c := &ClientWorker{ 187 sessionManager: NewSessionManager(), 188 link: stream, 189 done: done.New(), 190 strategy: s, 191 } 192 193 go c.fetchOutput() 194 go c.monitor() 195 196 return c, nil 197 } 198 199 func (m *ClientWorker) TotalConnections() uint32 { 200 return uint32(m.sessionManager.Count()) 201 } 202 203 func (m *ClientWorker) ActiveConnections() uint32 { 204 return uint32(m.sessionManager.Size()) 205 } 206 207 // Closed returns true if this Client is closed. 208 func (m *ClientWorker) Closed() bool { 209 return m.done.Done() 210 } 211 212 func (m *ClientWorker) monitor() { 213 timer := time.NewTicker(time.Second * 16) 214 defer timer.Stop() 215 216 for { 217 select { 218 case <-m.done.Wait(): 219 m.sessionManager.Close() 220 common.Close(m.link.Writer) 221 common.Interrupt(m.link.Reader) 222 return 223 case <-timer.C: 224 size := m.sessionManager.Size() 225 if size == 0 && m.sessionManager.CloseIfNoSession() { 226 common.Must(m.done.Close()) 227 } 228 } 229 } 230 } 231 232 func writeFirstPayload(reader buf.Reader, writer *Writer) error { 233 err := buf.CopyOnceTimeout(reader, writer, time.Millisecond*100) 234 if err == buf.ErrNotTimeoutReader || err == buf.ErrReadTimeout { 235 return writer.WriteMultiBuffer(buf.MultiBuffer{}) 236 } 237 238 if err != nil { 239 return err 240 } 241 242 return nil 243 } 244 245 func fetchInput(ctx context.Context, s *Session, output buf.Writer) { 246 dest := session.OutboundFromContext(ctx).Target 247 transferType := protocol.TransferTypeStream 248 if dest.Network == net.Network_UDP { 249 transferType = protocol.TransferTypePacket 250 } 251 s.transferType = transferType 252 writer := NewWriter(s.ID, dest, output, transferType, xudp.GetGlobalID(ctx)) 253 defer s.Close(false) 254 defer writer.Close() 255 256 newError("dispatching request to ", dest).WriteToLog(session.ExportIDToError(ctx)) 257 if err := writeFirstPayload(s.input, writer); err != nil { 258 newError("failed to write first payload").Base(err).WriteToLog(session.ExportIDToError(ctx)) 259 writer.hasError = true 260 return 261 } 262 263 if err := buf.Copy(s.input, writer); err != nil { 264 newError("failed to fetch all input").Base(err).WriteToLog(session.ExportIDToError(ctx)) 265 writer.hasError = true 266 return 267 } 268 } 269 270 func (m *ClientWorker) IsClosing() bool { 271 sm := m.sessionManager 272 if m.strategy.MaxConnection > 0 && sm.Count() >= int(m.strategy.MaxConnection) { 273 return true 274 } 275 return false 276 } 277 278 func (m *ClientWorker) IsFull() bool { 279 if m.IsClosing() || m.Closed() { 280 return true 281 } 282 283 sm := m.sessionManager 284 if m.strategy.MaxConcurrency > 0 && sm.Size() >= int(m.strategy.MaxConcurrency) { 285 return true 286 } 287 return false 288 } 289 290 func (m *ClientWorker) Dispatch(ctx context.Context, link *transport.Link) bool { 291 if m.IsFull() || m.Closed() { 292 return false 293 } 294 295 sm := m.sessionManager 296 s := sm.Allocate() 297 if s == nil { 298 return false 299 } 300 s.input = link.Reader 301 s.output = link.Writer 302 go fetchInput(ctx, s, m.link.Writer) 303 return true 304 } 305 306 func (m *ClientWorker) handleStatueKeepAlive(meta *FrameMetadata, reader *buf.BufferedReader) error { 307 if meta.Option.Has(OptionData) { 308 return buf.Copy(NewStreamReader(reader), buf.Discard) 309 } 310 return nil 311 } 312 313 func (m *ClientWorker) handleStatusNew(meta *FrameMetadata, reader *buf.BufferedReader) error { 314 if meta.Option.Has(OptionData) { 315 return buf.Copy(NewStreamReader(reader), buf.Discard) 316 } 317 return nil 318 } 319 320 func (m *ClientWorker) handleStatusKeep(meta *FrameMetadata, reader *buf.BufferedReader) error { 321 if !meta.Option.Has(OptionData) { 322 return nil 323 } 324 325 s, found := m.sessionManager.Get(meta.SessionID) 326 if !found { 327 // Notify remote peer to close this session. 328 closingWriter := NewResponseWriter(meta.SessionID, m.link.Writer, protocol.TransferTypeStream) 329 closingWriter.Close() 330 331 return buf.Copy(NewStreamReader(reader), buf.Discard) 332 } 333 334 rr := s.NewReader(reader, &meta.Target) 335 err := buf.Copy(rr, s.output) 336 if err != nil && buf.IsWriteError(err) { 337 newError("failed to write to downstream. closing session ", s.ID).Base(err).WriteToLog() 338 s.Close(false) 339 return buf.Copy(rr, buf.Discard) 340 } 341 342 return err 343 } 344 345 func (m *ClientWorker) handleStatusEnd(meta *FrameMetadata, reader *buf.BufferedReader) error { 346 if s, found := m.sessionManager.Get(meta.SessionID); found { 347 s.Close(false) 348 } 349 if meta.Option.Has(OptionData) { 350 return buf.Copy(NewStreamReader(reader), buf.Discard) 351 } 352 return nil 353 } 354 355 func (m *ClientWorker) fetchOutput() { 356 defer func() { 357 common.Must(m.done.Close()) 358 }() 359 360 reader := &buf.BufferedReader{Reader: m.link.Reader} 361 362 var meta FrameMetadata 363 for { 364 err := meta.Unmarshal(reader) 365 if err != nil { 366 if errors.Cause(err) != io.EOF { 367 newError("failed to read metadata").Base(err).WriteToLog() 368 } 369 break 370 } 371 372 switch meta.SessionStatus { 373 case SessionStatusKeepAlive: 374 err = m.handleStatueKeepAlive(&meta, reader) 375 case SessionStatusEnd: 376 err = m.handleStatusEnd(&meta, reader) 377 case SessionStatusNew: 378 err = m.handleStatusNew(&meta, reader) 379 case SessionStatusKeep: 380 err = m.handleStatusKeep(&meta, reader) 381 default: 382 status := meta.SessionStatus 383 newError("unknown status: ", status).AtError().WriteToLog() 384 return 385 } 386 387 if err != nil { 388 newError("failed to process data").Base(err).WriteToLog() 389 return 390 } 391 } 392 }