github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/dashboard/dashboard.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  package dashboard
    13  
    14  //go:generate npm --prefix ./assets install
    15  //go:generate ./assets/node_modules/.bin/webpack --config ./assets/webpack.config.js --context ./assets
    16  //go:generate go-bindata -nometadata -o assets.go -prefix assets -nocompress -pkg dashboard assets/dashboard.html assets/bundle.js
    17  //go:generate sh -c "sed 's#var _bundleJs#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
    18  //go:generate sh -c "sed 's#var _dashboardHtml#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
    19  //go:generate gofmt -w -s assets.go
    20  
    21  import (
    22  	"fmt"
    23  	"io/ioutil"
    24  	"net"
    25  	"net/http"
    26  	"path/filepath"
    27  	"runtime"
    28  	"sync"
    29  	"sync/atomic"
    30  	"time"
    31  
    32  	"github.com/elastic/gosigar"
    33  	"github.com/Sberex/go-sberex/log"
    34  	"github.com/Sberex/go-sberex/metrics"
    35  	"github.com/Sberex/go-sberex/p2p"
    36  	"github.com/Sberex/go-sberex/params"
    37  	"github.com/Sberex/go-sberex/rpc"
    38  	"golang.org/x/net/websocket"
    39  )
    40  
    41  const (
    42  	activeMemorySampleLimit   = 200 // Maximum number of active memory data samples
    43  	virtualMemorySampleLimit  = 200 // Maximum number of virtual memory data samples
    44  	networkIngressSampleLimit = 200 // Maximum number of network ingress data samples
    45  	networkEgressSampleLimit  = 200 // Maximum number of network egress data samples
    46  	processCPUSampleLimit     = 200 // Maximum number of process cpu data samples
    47  	systemCPUSampleLimit      = 200 // Maximum number of system cpu data samples
    48  	diskReadSampleLimit       = 200 // Maximum number of disk read data samples
    49  	diskWriteSampleLimit      = 200 // Maximum number of disk write data samples
    50  )
    51  
    52  var nextID uint32 // Next connection id
    53  
    54  // Dashboard contains the dashboard internals.
    55  type Dashboard struct {
    56  	config *Config
    57  
    58  	listener net.Listener
    59  	conns    map[uint32]*client // Currently live websocket connections
    60  	charts   *HomeMessage
    61  	commit   string
    62  	lock     sync.RWMutex // Lock protecting the dashboard's internals
    63  
    64  	quit chan chan error // Channel used for graceful exit
    65  	wg   sync.WaitGroup
    66  }
    67  
    68  // client represents active websocket connection with a remote browser.
    69  type client struct {
    70  	conn   *websocket.Conn // Particular live websocket connection
    71  	msg    chan Message    // Message queue for the update messages
    72  	logger log.Logger      // Logger for the particular live websocket connection
    73  }
    74  
    75  // New creates a new dashboard instance with the given configuration.
    76  func New(config *Config, commit string) (*Dashboard, error) {
    77  	now := time.Now()
    78  	db := &Dashboard{
    79  		conns:  make(map[uint32]*client),
    80  		config: config,
    81  		quit:   make(chan chan error),
    82  		charts: &HomeMessage{
    83  			ActiveMemory:   emptyChartEntries(now, activeMemorySampleLimit, config.Refresh),
    84  			VirtualMemory:  emptyChartEntries(now, virtualMemorySampleLimit, config.Refresh),
    85  			NetworkIngress: emptyChartEntries(now, networkIngressSampleLimit, config.Refresh),
    86  			NetworkEgress:  emptyChartEntries(now, networkEgressSampleLimit, config.Refresh),
    87  			ProcessCPU:     emptyChartEntries(now, processCPUSampleLimit, config.Refresh),
    88  			SystemCPU:      emptyChartEntries(now, systemCPUSampleLimit, config.Refresh),
    89  			DiskRead:       emptyChartEntries(now, diskReadSampleLimit, config.Refresh),
    90  			DiskWrite:      emptyChartEntries(now, diskWriteSampleLimit, config.Refresh),
    91  		},
    92  		commit: commit,
    93  	}
    94  	return db, nil
    95  }
    96  
    97  // emptyChartEntries returns a ChartEntry array containing limit number of empty samples.
    98  func emptyChartEntries(t time.Time, limit int, refresh time.Duration) ChartEntries {
    99  	ce := make(ChartEntries, limit)
   100  	for i := 0; i < limit; i++ {
   101  		ce[i] = &ChartEntry{
   102  			Time: t.Add(-time.Duration(i) * refresh),
   103  		}
   104  	}
   105  	return ce
   106  }
   107  
   108  // Protocols is a meaningless implementation of node.Service.
   109  func (db *Dashboard) Protocols() []p2p.Protocol { return nil }
   110  
   111  // APIs is a meaningless implementation of node.Service.
   112  func (db *Dashboard) APIs() []rpc.API { return nil }
   113  
   114  // Start implements node.Service, starting the data collection thread and the listening server of the dashboard.
   115  func (db *Dashboard) Start(server *p2p.Server) error {
   116  	log.Info("Starting dashboard")
   117  
   118  	db.wg.Add(2)
   119  	go db.collectData()
   120  	go db.collectLogs() // In case of removing this line change 2 back to 1 in wg.Add.
   121  
   122  	http.HandleFunc("/", db.webHandler)
   123  	http.Handle("/api", websocket.Handler(db.apiHandler))
   124  
   125  	listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", db.config.Host, db.config.Port))
   126  	if err != nil {
   127  		return err
   128  	}
   129  	db.listener = listener
   130  
   131  	go http.Serve(listener, nil)
   132  
   133  	return nil
   134  }
   135  
   136  // Stop implements node.Service, stopping the data collection thread and the connection listener of the dashboard.
   137  func (db *Dashboard) Stop() error {
   138  	// Close the connection listener.
   139  	var errs []error
   140  	if err := db.listener.Close(); err != nil {
   141  		errs = append(errs, err)
   142  	}
   143  	// Close the collectors.
   144  	errc := make(chan error, 1)
   145  	for i := 0; i < 2; i++ {
   146  		db.quit <- errc
   147  		if err := <-errc; err != nil {
   148  			errs = append(errs, err)
   149  		}
   150  	}
   151  	// Close the connections.
   152  	db.lock.Lock()
   153  	for _, c := range db.conns {
   154  		if err := c.conn.Close(); err != nil {
   155  			c.logger.Warn("Failed to close connection", "err", err)
   156  		}
   157  	}
   158  	db.lock.Unlock()
   159  
   160  	// Wait until every goroutine terminates.
   161  	db.wg.Wait()
   162  	log.Info("Dashboard stopped")
   163  
   164  	var err error
   165  	if len(errs) > 0 {
   166  		err = fmt.Errorf("%v", errs)
   167  	}
   168  
   169  	return err
   170  }
   171  
   172  // webHandler handles all non-api requests, simply flattening and returning the dashboard website.
   173  func (db *Dashboard) webHandler(w http.ResponseWriter, r *http.Request) {
   174  	log.Debug("Request", "URL", r.URL)
   175  
   176  	path := r.URL.String()
   177  	if path == "/" {
   178  		path = "/dashboard.html"
   179  	}
   180  	// If the path of the assets is manually set
   181  	if db.config.Assets != "" {
   182  		blob, err := ioutil.ReadFile(filepath.Join(db.config.Assets, path))
   183  		if err != nil {
   184  			log.Warn("Failed to read file", "path", path, "err", err)
   185  			http.Error(w, "not found", http.StatusNotFound)
   186  			return
   187  		}
   188  		w.Write(blob)
   189  		return
   190  	}
   191  	blob, err := Asset(path[1:])
   192  	if err != nil {
   193  		log.Warn("Failed to load the asset", "path", path, "err", err)
   194  		http.Error(w, "not found", http.StatusNotFound)
   195  		return
   196  	}
   197  	w.Write(blob)
   198  }
   199  
   200  // apiHandler handles requests for the dashboard.
   201  func (db *Dashboard) apiHandler(conn *websocket.Conn) {
   202  	id := atomic.AddUint32(&nextID, 1)
   203  	client := &client{
   204  		conn:   conn,
   205  		msg:    make(chan Message, 128),
   206  		logger: log.New("id", id),
   207  	}
   208  	done := make(chan struct{})
   209  
   210  	// Start listening for messages to send.
   211  	db.wg.Add(1)
   212  	go func() {
   213  		defer db.wg.Done()
   214  
   215  		for {
   216  			select {
   217  			case <-done:
   218  				return
   219  			case msg := <-client.msg:
   220  				if err := websocket.JSON.Send(client.conn, msg); err != nil {
   221  					client.logger.Warn("Failed to send the message", "msg", msg, "err", err)
   222  					client.conn.Close()
   223  					return
   224  				}
   225  			}
   226  		}
   227  	}()
   228  
   229  	versionMeta := ""
   230  	if len(params.VersionMeta) > 0 {
   231  		versionMeta = fmt.Sprintf(" (%s)", params.VersionMeta)
   232  	}
   233  	// Send the past data.
   234  	client.msg <- Message{
   235  		General: &GeneralMessage{
   236  			Version: fmt.Sprintf("v%d.%d.%d%s", params.VersionMajor, params.VersionMinor, params.VersionPatch, versionMeta),
   237  			Commit:  db.commit,
   238  		},
   239  		Home: &HomeMessage{
   240  			ActiveMemory:   db.charts.ActiveMemory,
   241  			VirtualMemory:  db.charts.VirtualMemory,
   242  			NetworkIngress: db.charts.NetworkIngress,
   243  			NetworkEgress:  db.charts.NetworkEgress,
   244  			ProcessCPU:     db.charts.ProcessCPU,
   245  			SystemCPU:      db.charts.SystemCPU,
   246  			DiskRead:       db.charts.DiskRead,
   247  			DiskWrite:      db.charts.DiskWrite,
   248  		},
   249  	}
   250  	// Start tracking the connection and drop at connection loss.
   251  	db.lock.Lock()
   252  	db.conns[id] = client
   253  	db.lock.Unlock()
   254  	defer func() {
   255  		db.lock.Lock()
   256  		delete(db.conns, id)
   257  		db.lock.Unlock()
   258  	}()
   259  	for {
   260  		fail := []byte{}
   261  		if _, err := conn.Read(fail); err != nil {
   262  			close(done)
   263  			return
   264  		}
   265  		// Ignore all messages
   266  	}
   267  }
   268  
   269  // collectData collects the required data to plot on the dashboard.
   270  func (db *Dashboard) collectData() {
   271  	defer db.wg.Done()
   272  	systemCPUUsage := gosigar.Cpu{}
   273  	systemCPUUsage.Get()
   274  	var (
   275  		prevNetworkIngress = metrics.DefaultRegistry.Get("p2p/InboundTraffic").(metrics.Meter).Count()
   276  		prevNetworkEgress  = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
   277  		prevProcessCPUTime = getProcessCPUTime()
   278  		prevSystemCPUUsage = systemCPUUsage
   279  		prevDiskRead       = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/input").(metrics.Meter).Count()
   280  		prevDiskWrite      = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/output").(metrics.Meter).Count()
   281  
   282  		frequency = float64(db.config.Refresh / time.Second)
   283  		numCPU    = float64(runtime.NumCPU())
   284  	)
   285  
   286  	for {
   287  		select {
   288  		case errc := <-db.quit:
   289  			errc <- nil
   290  			return
   291  		case <-time.After(db.config.Refresh):
   292  			systemCPUUsage.Get()
   293  			var (
   294  				curNetworkIngress = metrics.DefaultRegistry.Get("p2p/InboundTraffic").(metrics.Meter).Count()
   295  				curNetworkEgress  = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
   296  				curProcessCPUTime = getProcessCPUTime()
   297  				curSystemCPUUsage = systemCPUUsage
   298  				curDiskRead       = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/input").(metrics.Meter).Count()
   299  				curDiskWrite      = metrics.DefaultRegistry.Get("eth/db/chaindata/compact/output").(metrics.Meter).Count()
   300  
   301  				deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
   302  				deltaNetworkEgress  = float64(curNetworkEgress - prevNetworkEgress)
   303  				deltaProcessCPUTime = curProcessCPUTime - prevProcessCPUTime
   304  				deltaSystemCPUUsage = systemCPUUsage.Delta(prevSystemCPUUsage)
   305  				deltaDiskRead       = curDiskRead - prevDiskRead
   306  				deltaDiskWrite      = curDiskWrite - prevDiskWrite
   307  			)
   308  			prevNetworkIngress = curNetworkIngress
   309  			prevNetworkEgress = curNetworkEgress
   310  			prevProcessCPUTime = curProcessCPUTime
   311  			prevSystemCPUUsage = curSystemCPUUsage
   312  			prevDiskRead = curDiskRead
   313  			prevDiskWrite = curDiskWrite
   314  
   315  			now := time.Now()
   316  
   317  			var mem runtime.MemStats
   318  			runtime.ReadMemStats(&mem)
   319  			activeMemory := &ChartEntry{
   320  				Time:  now,
   321  				Value: float64(mem.Alloc) / frequency,
   322  			}
   323  			virtualMemory := &ChartEntry{
   324  				Time:  now,
   325  				Value: float64(mem.Sys) / frequency,
   326  			}
   327  			networkIngress := &ChartEntry{
   328  				Time:  now,
   329  				Value: deltaNetworkIngress / frequency,
   330  			}
   331  			networkEgress := &ChartEntry{
   332  				Time:  now,
   333  				Value: deltaNetworkEgress / frequency,
   334  			}
   335  			processCPU := &ChartEntry{
   336  				Time:  now,
   337  				Value: deltaProcessCPUTime / frequency / numCPU * 100,
   338  			}
   339  			systemCPU := &ChartEntry{
   340  				Time:  now,
   341  				Value: float64(deltaSystemCPUUsage.Sys+deltaSystemCPUUsage.User) / frequency / numCPU,
   342  			}
   343  			diskRead := &ChartEntry{
   344  				Time:  now,
   345  				Value: float64(deltaDiskRead) / frequency,
   346  			}
   347  			diskWrite := &ChartEntry{
   348  				Time:  now,
   349  				Value: float64(deltaDiskWrite) / frequency,
   350  			}
   351  			db.charts.ActiveMemory = append(db.charts.ActiveMemory[1:], activeMemory)
   352  			db.charts.VirtualMemory = append(db.charts.VirtualMemory[1:], virtualMemory)
   353  			db.charts.NetworkIngress = append(db.charts.NetworkIngress[1:], networkIngress)
   354  			db.charts.NetworkEgress = append(db.charts.NetworkEgress[1:], networkEgress)
   355  			db.charts.ProcessCPU = append(db.charts.ProcessCPU[1:], processCPU)
   356  			db.charts.SystemCPU = append(db.charts.SystemCPU[1:], systemCPU)
   357  			db.charts.DiskRead = append(db.charts.DiskRead[1:], diskRead)
   358  			db.charts.DiskWrite = append(db.charts.DiskRead[1:], diskWrite)
   359  
   360  			db.sendToAll(&Message{
   361  				Home: &HomeMessage{
   362  					ActiveMemory:   ChartEntries{activeMemory},
   363  					VirtualMemory:  ChartEntries{virtualMemory},
   364  					NetworkIngress: ChartEntries{networkIngress},
   365  					NetworkEgress:  ChartEntries{networkEgress},
   366  					ProcessCPU:     ChartEntries{processCPU},
   367  					SystemCPU:      ChartEntries{systemCPU},
   368  					DiskRead:       ChartEntries{diskRead},
   369  					DiskWrite:      ChartEntries{diskWrite},
   370  				},
   371  			})
   372  		}
   373  	}
   374  }
   375  
   376  // collectLogs collects and sends the logs to the active dashboards.
   377  func (db *Dashboard) collectLogs() {
   378  	defer db.wg.Done()
   379  
   380  	id := 1
   381  	// TODO (kurkomisi): log collection comes here.
   382  	for {
   383  		select {
   384  		case errc := <-db.quit:
   385  			errc <- nil
   386  			return
   387  		case <-time.After(db.config.Refresh / 2):
   388  			db.sendToAll(&Message{
   389  				Logs: &LogsMessage{
   390  					Log: []string{fmt.Sprintf("%-4d: This is a fake log.", id)},
   391  				},
   392  			})
   393  			id++
   394  		}
   395  	}
   396  }
   397  
   398  // sendToAll sends the given message to the active dashboards.
   399  func (db *Dashboard) sendToAll(msg *Message) {
   400  	db.lock.Lock()
   401  	for _, c := range db.conns {
   402  		select {
   403  		case c.msg <- *msg:
   404  		default:
   405  			c.conn.Close()
   406  		}
   407  	}
   408  	db.lock.Unlock()
   409  }