github.com/anthdm/go-ethereum@v1.8.4-0.20180412101906-60516c83b011/dashboard/dashboard.go (about)

     1  // Copyright 2017 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package dashboard
    18  
    19  //go:generate yarn --cwd ./assets install
    20  //go:generate yarn --cwd ./assets build
    21  //go:generate go-bindata -nometadata -o assets.go -prefix assets -nocompress -pkg dashboard assets/index.html assets/bundle.js
    22  //go:generate sh -c "sed 's#var _bundleJs#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
    23  //go:generate sh -c "sed 's#var _indexHtml#//nolint:misspell\\\n&#' assets.go > assets.go.tmp && mv assets.go.tmp assets.go"
    24  //go:generate gofmt -w -s assets.go
    25  
    26  import (
    27  	"fmt"
    28  	"net"
    29  	"net/http"
    30  	"runtime"
    31  	"sync"
    32  	"sync/atomic"
    33  	"time"
    34  
    35  	"github.com/elastic/gosigar"
    36  	"github.com/ethereum/go-ethereum/log"
    37  	"github.com/ethereum/go-ethereum/metrics"
    38  	"github.com/ethereum/go-ethereum/p2p"
    39  	"github.com/ethereum/go-ethereum/params"
    40  	"github.com/ethereum/go-ethereum/rpc"
    41  	"golang.org/x/net/websocket"
    42  )
    43  
    44  const (
    45  	activeMemorySampleLimit   = 200 // Maximum number of active memory data samples
    46  	virtualMemorySampleLimit  = 200 // Maximum number of virtual memory data samples
    47  	networkIngressSampleLimit = 200 // Maximum number of network ingress data samples
    48  	networkEgressSampleLimit  = 200 // Maximum number of network egress data samples
    49  	processCPUSampleLimit     = 200 // Maximum number of process cpu data samples
    50  	systemCPUSampleLimit      = 200 // Maximum number of system cpu data samples
    51  	diskReadSampleLimit       = 200 // Maximum number of disk read data samples
    52  	diskWriteSampleLimit      = 200 // Maximum number of disk write data samples
    53  )
    54  
    55  var nextID uint32 // Next connection id
    56  
    57  // Dashboard contains the dashboard internals.
    58  type Dashboard struct {
    59  	config *Config
    60  
    61  	listener net.Listener
    62  	conns    map[uint32]*client // Currently live websocket connections
    63  	charts   *SystemMessage
    64  	commit   string
    65  	lock     sync.RWMutex // Lock protecting the dashboard's internals
    66  
    67  	quit chan chan error // Channel used for graceful exit
    68  	wg   sync.WaitGroup
    69  }
    70  
    71  // client represents active websocket connection with a remote browser.
    72  type client struct {
    73  	conn   *websocket.Conn // Particular live websocket connection
    74  	msg    chan Message    // Message queue for the update messages
    75  	logger log.Logger      // Logger for the particular live websocket connection
    76  }
    77  
    78  // New creates a new dashboard instance with the given configuration.
    79  func New(config *Config, commit string) (*Dashboard, error) {
    80  	now := time.Now()
    81  	db := &Dashboard{
    82  		conns:  make(map[uint32]*client),
    83  		config: config,
    84  		quit:   make(chan chan error),
    85  		charts: &SystemMessage{
    86  			ActiveMemory:   emptyChartEntries(now, activeMemorySampleLimit, config.Refresh),
    87  			VirtualMemory:  emptyChartEntries(now, virtualMemorySampleLimit, config.Refresh),
    88  			NetworkIngress: emptyChartEntries(now, networkIngressSampleLimit, config.Refresh),
    89  			NetworkEgress:  emptyChartEntries(now, networkEgressSampleLimit, config.Refresh),
    90  			ProcessCPU:     emptyChartEntries(now, processCPUSampleLimit, config.Refresh),
    91  			SystemCPU:      emptyChartEntries(now, systemCPUSampleLimit, config.Refresh),
    92  			DiskRead:       emptyChartEntries(now, diskReadSampleLimit, config.Refresh),
    93  			DiskWrite:      emptyChartEntries(now, diskWriteSampleLimit, config.Refresh),
    94  		},
    95  		commit: commit,
    96  	}
    97  	return db, nil
    98  }
    99  
   100  // emptyChartEntries returns a ChartEntry array containing limit number of empty samples.
   101  func emptyChartEntries(t time.Time, limit int, refresh time.Duration) ChartEntries {
   102  	ce := make(ChartEntries, limit)
   103  	for i := 0; i < limit; i++ {
   104  		ce[i] = &ChartEntry{
   105  			Time: t.Add(-time.Duration(i) * refresh),
   106  		}
   107  	}
   108  	return ce
   109  }
   110  
   111  // Protocols is a meaningless implementation of node.Service.
   112  func (db *Dashboard) Protocols() []p2p.Protocol { return nil }
   113  
   114  // APIs is a meaningless implementation of node.Service.
   115  func (db *Dashboard) APIs() []rpc.API { return nil }
   116  
   117  // Start implements node.Service, starting the data collection thread and the listening server of the dashboard.
   118  func (db *Dashboard) Start(server *p2p.Server) error {
   119  	log.Info("Starting dashboard")
   120  
   121  	db.wg.Add(2)
   122  	go db.collectData()
   123  	go db.collectLogs() // In case of removing this line change 2 back to 1 in wg.Add.
   124  
   125  	http.HandleFunc("/", db.webHandler)
   126  	http.Handle("/api", websocket.Handler(db.apiHandler))
   127  
   128  	listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", db.config.Host, db.config.Port))
   129  	if err != nil {
   130  		return err
   131  	}
   132  	db.listener = listener
   133  
   134  	go http.Serve(listener, nil)
   135  
   136  	return nil
   137  }
   138  
   139  // Stop implements node.Service, stopping the data collection thread and the connection listener of the dashboard.
   140  func (db *Dashboard) Stop() error {
   141  	// Close the connection listener.
   142  	var errs []error
   143  	if err := db.listener.Close(); err != nil {
   144  		errs = append(errs, err)
   145  	}
   146  	// Close the collectors.
   147  	errc := make(chan error, 1)
   148  	for i := 0; i < 2; i++ {
   149  		db.quit <- errc
   150  		if err := <-errc; err != nil {
   151  			errs = append(errs, err)
   152  		}
   153  	}
   154  	// Close the connections.
   155  	db.lock.Lock()
   156  	for _, c := range db.conns {
   157  		if err := c.conn.Close(); err != nil {
   158  			c.logger.Warn("Failed to close connection", "err", err)
   159  		}
   160  	}
   161  	db.lock.Unlock()
   162  
   163  	// Wait until every goroutine terminates.
   164  	db.wg.Wait()
   165  	log.Info("Dashboard stopped")
   166  
   167  	var err error
   168  	if len(errs) > 0 {
   169  		err = fmt.Errorf("%v", errs)
   170  	}
   171  
   172  	return err
   173  }
   174  
   175  // webHandler handles all non-api requests, simply flattening and returning the dashboard website.
   176  func (db *Dashboard) webHandler(w http.ResponseWriter, r *http.Request) {
   177  	log.Debug("Request", "URL", r.URL)
   178  
   179  	path := r.URL.String()
   180  	if path == "/" {
   181  		path = "/index.html"
   182  	}
   183  	blob, err := Asset(path[1:])
   184  	if err != nil {
   185  		log.Warn("Failed to load the asset", "path", path, "err", err)
   186  		http.Error(w, "not found", http.StatusNotFound)
   187  		return
   188  	}
   189  	w.Write(blob)
   190  }
   191  
   192  // apiHandler handles requests for the dashboard.
   193  func (db *Dashboard) apiHandler(conn *websocket.Conn) {
   194  	id := atomic.AddUint32(&nextID, 1)
   195  	client := &client{
   196  		conn:   conn,
   197  		msg:    make(chan Message, 128),
   198  		logger: log.New("id", id),
   199  	}
   200  	done := make(chan struct{})
   201  
   202  	// Start listening for messages to send.
   203  	db.wg.Add(1)
   204  	go func() {
   205  		defer db.wg.Done()
   206  
   207  		for {
   208  			select {
   209  			case <-done:
   210  				return
   211  			case msg := <-client.msg:
   212  				if err := websocket.JSON.Send(client.conn, msg); err != nil {
   213  					client.logger.Warn("Failed to send the message", "msg", msg, "err", err)
   214  					client.conn.Close()
   215  					return
   216  				}
   217  			}
   218  		}
   219  	}()
   220  
   221  	versionMeta := ""
   222  	if len(params.VersionMeta) > 0 {
   223  		versionMeta = fmt.Sprintf(" (%s)", params.VersionMeta)
   224  	}
   225  	// Send the past data.
   226  	client.msg <- Message{
   227  		General: &GeneralMessage{
   228  			Version: fmt.Sprintf("v%d.%d.%d%s", params.VersionMajor, params.VersionMinor, params.VersionPatch, versionMeta),
   229  			Commit:  db.commit,
   230  		},
   231  		System: &SystemMessage{
   232  			ActiveMemory:   db.charts.ActiveMemory,
   233  			VirtualMemory:  db.charts.VirtualMemory,
   234  			NetworkIngress: db.charts.NetworkIngress,
   235  			NetworkEgress:  db.charts.NetworkEgress,
   236  			ProcessCPU:     db.charts.ProcessCPU,
   237  			SystemCPU:      db.charts.SystemCPU,
   238  			DiskRead:       db.charts.DiskRead,
   239  			DiskWrite:      db.charts.DiskWrite,
   240  		},
   241  	}
   242  	// Start tracking the connection and drop at connection loss.
   243  	db.lock.Lock()
   244  	db.conns[id] = client
   245  	db.lock.Unlock()
   246  	defer func() {
   247  		db.lock.Lock()
   248  		delete(db.conns, id)
   249  		db.lock.Unlock()
   250  	}()
   251  	for {
   252  		fail := []byte{}
   253  		if _, err := conn.Read(fail); err != nil {
   254  			close(done)
   255  			return
   256  		}
   257  		// Ignore all messages
   258  	}
   259  }
   260  
   261  // collectData collects the required data to plot on the dashboard.
   262  func (db *Dashboard) collectData() {
   263  	defer db.wg.Done()
   264  	systemCPUUsage := gosigar.Cpu{}
   265  	systemCPUUsage.Get()
   266  	var (
   267  		mem runtime.MemStats
   268  
   269  		prevNetworkIngress = metrics.DefaultRegistry.Get("p2p/InboundTraffic").(metrics.Meter).Count()
   270  		prevNetworkEgress  = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
   271  		prevProcessCPUTime = getProcessCPUTime()
   272  		prevSystemCPUUsage = systemCPUUsage
   273  		prevDiskRead       = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/read").(metrics.Meter).Count()
   274  		prevDiskWrite      = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/write").(metrics.Meter).Count()
   275  
   276  		frequency = float64(db.config.Refresh / time.Second)
   277  		numCPU    = float64(runtime.NumCPU())
   278  	)
   279  
   280  	for {
   281  		select {
   282  		case errc := <-db.quit:
   283  			errc <- nil
   284  			return
   285  		case <-time.After(db.config.Refresh):
   286  			systemCPUUsage.Get()
   287  			var (
   288  				curNetworkIngress = metrics.DefaultRegistry.Get("p2p/InboundTraffic").(metrics.Meter).Count()
   289  				curNetworkEgress  = metrics.DefaultRegistry.Get("p2p/OutboundTraffic").(metrics.Meter).Count()
   290  				curProcessCPUTime = getProcessCPUTime()
   291  				curSystemCPUUsage = systemCPUUsage
   292  				curDiskRead       = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/read").(metrics.Meter).Count()
   293  				curDiskWrite      = metrics.DefaultRegistry.Get("eth/db/chaindata/disk/write").(metrics.Meter).Count()
   294  
   295  				deltaNetworkIngress = float64(curNetworkIngress - prevNetworkIngress)
   296  				deltaNetworkEgress  = float64(curNetworkEgress - prevNetworkEgress)
   297  				deltaProcessCPUTime = curProcessCPUTime - prevProcessCPUTime
   298  				deltaSystemCPUUsage = curSystemCPUUsage.Delta(prevSystemCPUUsage)
   299  				deltaDiskRead       = curDiskRead - prevDiskRead
   300  				deltaDiskWrite      = curDiskWrite - prevDiskWrite
   301  			)
   302  			prevNetworkIngress = curNetworkIngress
   303  			prevNetworkEgress = curNetworkEgress
   304  			prevProcessCPUTime = curProcessCPUTime
   305  			prevSystemCPUUsage = curSystemCPUUsage
   306  			prevDiskRead = curDiskRead
   307  			prevDiskWrite = curDiskWrite
   308  
   309  			now := time.Now()
   310  
   311  			runtime.ReadMemStats(&mem)
   312  			activeMemory := &ChartEntry{
   313  				Time:  now,
   314  				Value: float64(mem.Alloc) / frequency,
   315  			}
   316  			virtualMemory := &ChartEntry{
   317  				Time:  now,
   318  				Value: float64(mem.Sys) / frequency,
   319  			}
   320  			networkIngress := &ChartEntry{
   321  				Time:  now,
   322  				Value: deltaNetworkIngress / frequency,
   323  			}
   324  			networkEgress := &ChartEntry{
   325  				Time:  now,
   326  				Value: deltaNetworkEgress / frequency,
   327  			}
   328  			processCPU := &ChartEntry{
   329  				Time:  now,
   330  				Value: deltaProcessCPUTime / frequency / numCPU * 100,
   331  			}
   332  			systemCPU := &ChartEntry{
   333  				Time:  now,
   334  				Value: float64(deltaSystemCPUUsage.Sys+deltaSystemCPUUsage.User) / frequency / numCPU,
   335  			}
   336  			diskRead := &ChartEntry{
   337  				Time:  now,
   338  				Value: float64(deltaDiskRead) / frequency,
   339  			}
   340  			diskWrite := &ChartEntry{
   341  				Time:  now,
   342  				Value: float64(deltaDiskWrite) / frequency,
   343  			}
   344  			db.charts.ActiveMemory = append(db.charts.ActiveMemory[1:], activeMemory)
   345  			db.charts.VirtualMemory = append(db.charts.VirtualMemory[1:], virtualMemory)
   346  			db.charts.NetworkIngress = append(db.charts.NetworkIngress[1:], networkIngress)
   347  			db.charts.NetworkEgress = append(db.charts.NetworkEgress[1:], networkEgress)
   348  			db.charts.ProcessCPU = append(db.charts.ProcessCPU[1:], processCPU)
   349  			db.charts.SystemCPU = append(db.charts.SystemCPU[1:], systemCPU)
   350  			db.charts.DiskRead = append(db.charts.DiskRead[1:], diskRead)
   351  			db.charts.DiskWrite = append(db.charts.DiskRead[1:], diskWrite)
   352  
   353  			db.sendToAll(&Message{
   354  				System: &SystemMessage{
   355  					ActiveMemory:   ChartEntries{activeMemory},
   356  					VirtualMemory:  ChartEntries{virtualMemory},
   357  					NetworkIngress: ChartEntries{networkIngress},
   358  					NetworkEgress:  ChartEntries{networkEgress},
   359  					ProcessCPU:     ChartEntries{processCPU},
   360  					SystemCPU:      ChartEntries{systemCPU},
   361  					DiskRead:       ChartEntries{diskRead},
   362  					DiskWrite:      ChartEntries{diskWrite},
   363  				},
   364  			})
   365  		}
   366  	}
   367  }
   368  
   369  // collectLogs collects and sends the logs to the active dashboards.
   370  func (db *Dashboard) collectLogs() {
   371  	defer db.wg.Done()
   372  
   373  	id := 1
   374  	// TODO (kurkomisi): log collection comes here.
   375  	for {
   376  		select {
   377  		case errc := <-db.quit:
   378  			errc <- nil
   379  			return
   380  		case <-time.After(db.config.Refresh / 2):
   381  			db.sendToAll(&Message{
   382  				Logs: &LogsMessage{
   383  					Log: []string{fmt.Sprintf("%-4d: This is a fake log.", id)},
   384  				},
   385  			})
   386  			id++
   387  		}
   388  	}
   389  }
   390  
   391  // sendToAll sends the given message to the active dashboards.
   392  func (db *Dashboard) sendToAll(msg *Message) {
   393  	db.lock.Lock()
   394  	for _, c := range db.conns {
   395  		select {
   396  		case c.msg <- *msg:
   397  		default:
   398  			c.conn.Close()
   399  		}
   400  	}
   401  	db.lock.Unlock()
   402  }