github.com/blong14/gache@v0.0.0-20240124023949-89416fd8bbfa/internal/proxy/pool.go (about)

     1  package proxy
     2  
     3  import (
     4  	"bytes"
     5  	"context"
     6  	"fmt"
     7  	"runtime"
     8  	"sync"
     9  	"time"
    10  
    11  	gdb "github.com/blong14/gache/internal/db"
    12  	glog "github.com/blong14/gache/internal/logging"
    13  	gtable "github.com/blong14/gache/internal/map/tablemap"
    14  )
    15  
    16  type Worker struct {
    17  	id    string
    18  	inbox <-chan *gdb.Query
    19  	stop  chan interface{}
    20  	pool  *WorkPool
    21  }
    22  
    23  func (s *Worker) Start(ctx context.Context) {
    24  	glog.Track("%T::%s starting", s.pool, s.id)
    25  	for {
    26  		select {
    27  		case <-ctx.Done():
    28  			glog.Track("%T::%s ctx canceled", s.pool, s.id)
    29  			return
    30  		case <-s.stop:
    31  			glog.Track("%T::%s stopping", s.pool, s.id)
    32  			return
    33  		case query, ok := <-s.inbox:
    34  			if !ok {
    35  				return
    36  			}
    37  			start := time.Now()
    38  			s.pool.Execute(ctx, query)
    39  			glog.Track(
    40  				"%T::%s executed %s %s [%s]",
    41  				s.pool, s.id, query.Header.Inst, query.Key, time.Since(start),
    42  			)
    43  		}
    44  	}
    45  }
    46  
    47  func (s *Worker) Stop(ctx context.Context) {
    48  	select {
    49  	case <-ctx.Done():
    50  	case s.stop <- struct{}{}:
    51  	}
    52  }
    53  
    54  type WorkPool struct {
    55  	inbox chan *gdb.Query
    56  	// table name to table view
    57  	tables  *gtable.TableMap[[]byte, *Table]
    58  	workers []Worker
    59  }
    60  
    61  func NewWorkPool(inbox chan *gdb.Query) *WorkPool {
    62  	return &WorkPool{
    63  		inbox:   inbox,
    64  		tables:  gtable.New[[]byte, *Table](bytes.Compare),
    65  		workers: make([]Worker, 0),
    66  	}
    67  }
    68  
    69  func (w *WorkPool) Start(ctx context.Context) {
    70  	for i := 0; i < runtime.NumCPU(); i++ {
    71  		worker := Worker{
    72  			id:    fmt.Sprintf("worker::%d", i),
    73  			inbox: w.inbox,
    74  			stop:  make(chan interface{}),
    75  			pool:  w,
    76  		}
    77  		w.workers = append(w.workers, worker)
    78  		go worker.Start(ctx)
    79  	}
    80  }
    81  
    82  func (w *WorkPool) Send(ctx context.Context, query *gdb.Query) {
    83  	select {
    84  	case <-ctx.Done():
    85  	case w.inbox <- query:
    86  	}
    87  }
    88  
    89  func (w *WorkPool) Execute(ctx context.Context, query *gdb.Query) {
    90  	switch query.Header.Inst {
    91  	case gdb.AddTable:
    92  		var opts *gdb.TableOpts
    93  		if query.Header.Opts != nil {
    94  			opts = query.Header.Opts
    95  		} else {
    96  			opts = &gdb.TableOpts{
    97  				InMemory:  true,
    98  				WalMode:   false,
    99  				DataDir:   []byte("testdata"),
   100  				TableName: query.Header.TableName,
   101  			}
   102  		}
   103  		t := NewTable(opts)
   104  		w.tables.Set(query.Header.TableName, t)
   105  		query.Done(gdb.QueryResponse{Success: true})
   106  	case gdb.Load:
   107  		glog.Track(
   108  			"loading csv %s for %s", query.Header.FileName, query.Header.TableName)
   109  		loader := NewCSVReader(w)
   110  		loader.Read(ctx, query)
   111  	default:
   112  		table, ok := w.tables.Get(query.Header.TableName)
   113  		if !ok {
   114  			query.Done(gdb.QueryResponse{Success: false})
   115  			return
   116  		}
   117  		table.Execute(ctx, query)
   118  	}
   119  }
   120  
   121  func (w *WorkPool) WaitAndStop(ctx context.Context) {
   122  	glog.Track("%T stopping...\n", w)
   123  	var wg sync.WaitGroup
   124  	for _, worker := range w.workers {
   125  		wg.Add(1)
   126  		go func(w Worker) {
   127  			defer wg.Done()
   128  			w.Stop(ctx)
   129  			close(w.stop)
   130  		}(worker)
   131  	}
   132  	wg.Wait()
   133  	w.tables.Range(func(k []byte, table *Table) bool {
   134  		table.Stop()
   135  		return true
   136  	})
   137  	glog.Track("%T stopped\n", w)
   138  }