github.com/ergo-services/ergo@v1.999.224/node/monitor.go (about)

     1  package node
     2  
     3  // http://erlang.org/doc/reference_manual/processes.html
     4  
     5  import (
     6  	"fmt"
     7  	"reflect"
     8  	"sync"
     9  
    10  	"github.com/ergo-services/ergo/etf"
    11  	"github.com/ergo-services/ergo/gen"
    12  	"github.com/ergo-services/ergo/lib"
    13  )
    14  
    15  type monitorItem struct {
    16  	pid etf.Pid // by
    17  	ref etf.Ref
    18  }
    19  
    20  type eventItem struct {
    21  	owner        etf.Pid
    22  	messageTypes map[string]bool
    23  	monitors     []etf.Pid
    24  }
    25  
    26  type monitorInternal interface {
    27  	// RouteLink
    28  	RouteLink(pidA etf.Pid, pidB etf.Pid) error
    29  	// RouteUnlink
    30  	RouteUnlink(pidA etf.Pid, pidB etf.Pid) error
    31  	// RouteExit
    32  	RouteExit(to etf.Pid, terminated etf.Pid, reason string) error
    33  	// RouteMonitorReg
    34  	RouteMonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error
    35  	// RouteMonitor
    36  	RouteMonitor(by etf.Pid, process etf.Pid, ref etf.Ref) error
    37  	// RouteDemonitor
    38  	RouteDemonitor(by etf.Pid, ref etf.Ref) error
    39  	// RouteMonitorExitReg
    40  	RouteMonitorExitReg(terminated gen.ProcessID, reason string, ref etf.Ref) error
    41  	// RouteMonitorExit
    42  	RouteMonitorExit(terminated etf.Pid, reason string, ref etf.Ref) error
    43  	// RouteNodeDown
    44  	RouteNodeDown(name string, disconnect *ProxyDisconnect)
    45  
    46  	// IsMonitor
    47  	IsMonitor(ref etf.Ref) bool
    48  
    49  	monitorNode(by etf.Pid, node string, ref etf.Ref)
    50  	demonitorNode(ref etf.Ref) bool
    51  
    52  	registerEvent(by etf.Pid, event gen.Event, messages []gen.EventMessage) error
    53  	unregisterEvent(by etf.Pid, event gen.Event) error
    54  	monitorEvent(by etf.Pid, event gen.Event) error
    55  	demonitorEvent(by etf.Pid, event gen.Event) error
    56  	sendEvent(by etf.Pid, event gen.Event, message gen.EventMessage) error
    57  
    58  	handleTerminated(terminated etf.Pid, name, reason string)
    59  
    60  	processLinks(process etf.Pid) []etf.Pid
    61  	processMonitors(process etf.Pid) []etf.Pid
    62  	processMonitorsByName(process etf.Pid) []gen.ProcessID
    63  	processMonitoredBy(process etf.Pid) []etf.Pid
    64  
    65  	monitorStats() internalMonitorStats
    66  }
    67  
    68  type internalMonitorStats struct {
    69  	monitorsByPid  int
    70  	monitorsByName int
    71  	monitorsNodes  int
    72  	links          int
    73  }
    74  
    75  type monitor struct {
    76  	// monitors by pid
    77  	processes      map[etf.Pid][]monitorItem
    78  	ref2pid        map[etf.Ref]etf.Pid
    79  	mutexProcesses sync.RWMutex
    80  	// monitors by name
    81  	names      map[gen.ProcessID][]monitorItem
    82  	ref2name   map[etf.Ref]gen.ProcessID
    83  	mutexNames sync.RWMutex
    84  
    85  	// links
    86  	links      map[etf.Pid][]etf.Pid
    87  	mutexLinks sync.RWMutex
    88  
    89  	// monitors of nodes
    90  	nodes      map[string][]monitorItem
    91  	ref2node   map[etf.Ref]string
    92  	mutexNodes sync.RWMutex
    93  
    94  	// monitors of events
    95  	events      map[gen.Event]eventItem
    96  	pid2events  map[etf.Pid][]gen.Event
    97  	mutexEvents sync.RWMutex
    98  
    99  	nodename string
   100  	router   coreRouterInternal
   101  }
   102  
   103  func newMonitor(nodename string, router coreRouterInternal) monitorInternal {
   104  	return &monitor{
   105  		processes: make(map[etf.Pid][]monitorItem),
   106  		names:     make(map[gen.ProcessID][]monitorItem),
   107  		links:     make(map[etf.Pid][]etf.Pid),
   108  		nodes:     make(map[string][]monitorItem),
   109  
   110  		ref2pid:  make(map[etf.Ref]etf.Pid),
   111  		ref2name: make(map[etf.Ref]gen.ProcessID),
   112  		ref2node: make(map[etf.Ref]string),
   113  
   114  		events:     make(map[gen.Event]eventItem),
   115  		pid2events: make(map[etf.Pid][]gen.Event),
   116  
   117  		nodename: nodename,
   118  		router:   router,
   119  	}
   120  }
   121  
   122  func (m *monitor) monitorNode(by etf.Pid, node string, ref etf.Ref) {
   123  	lib.Log("[%s] MONITOR NODE : %v => %s", m.nodename, by, node)
   124  
   125  	m.mutexNodes.Lock()
   126  
   127  	l := m.nodes[node]
   128  	item := monitorItem{
   129  		pid: by,
   130  		ref: ref,
   131  	}
   132  	m.nodes[node] = append(l, item)
   133  	m.ref2node[ref] = node
   134  	m.mutexNodes.Unlock()
   135  
   136  	_, err := m.router.getConnection(node)
   137  	if err != nil {
   138  		m.RouteNodeDown(node, nil)
   139  	}
   140  }
   141  
   142  func (m *monitor) demonitorNode(ref etf.Ref) bool {
   143  	var name string
   144  	var ok bool
   145  
   146  	m.mutexNodes.Lock()
   147  	defer m.mutexNodes.Unlock()
   148  
   149  	if name, ok = m.ref2node[ref]; !ok {
   150  		return false
   151  	}
   152  
   153  	l := m.nodes[name]
   154  
   155  	// remove PID from monitoring processes list
   156  	for i := range l {
   157  		if l[i].ref != ref {
   158  			continue
   159  		}
   160  
   161  		l[i] = l[0]
   162  		l = l[1:]
   163  		break
   164  	}
   165  	delete(m.ref2node, ref)
   166  
   167  	if len(l) == 0 {
   168  		delete(m.nodes, name)
   169  	} else {
   170  		m.nodes[name] = l
   171  	}
   172  
   173  	return true
   174  }
   175  
   176  func (m *monitor) RouteNodeDown(name string, disconnect *ProxyDisconnect) {
   177  	lib.Log("[%s] MONITOR NODE  down: %v", m.nodename, name)
   178  
   179  	// notify node monitors
   180  	m.mutexNodes.RLock()
   181  	if pids, ok := m.nodes[name]; ok {
   182  		for i := range pids {
   183  			lib.Log("[%s] MONITOR node down: %v. send notify to: %s", m.nodename, name, pids[i].pid)
   184  			if disconnect == nil {
   185  				message := gen.MessageNodeDown{Ref: pids[i].ref, Name: name}
   186  				m.router.RouteSend(etf.Pid{}, pids[i].pid, message)
   187  				continue
   188  			}
   189  			message := gen.MessageProxyDown{
   190  				Ref:    pids[i].ref,
   191  				Node:   disconnect.Node,
   192  				Proxy:  disconnect.Proxy,
   193  				Reason: disconnect.Reason,
   194  			}
   195  			m.router.RouteSend(etf.Pid{}, pids[i].pid, message)
   196  
   197  		}
   198  		delete(m.nodes, name)
   199  	}
   200  	m.mutexNodes.RUnlock()
   201  
   202  	// notify processes created monitors by pid
   203  	m.mutexProcesses.Lock()
   204  	for pid, ps := range m.processes {
   205  		if string(pid.Node) != name {
   206  			continue
   207  		}
   208  		for i := range ps {
   209  			// args: (to, terminated, reason, ref)
   210  			delete(m.ref2pid, ps[i].ref)
   211  			if disconnect == nil || disconnect.Node == name {
   212  				m.sendMonitorExit(ps[i].pid, pid, "noconnection", ps[i].ref)
   213  				continue
   214  			}
   215  			m.sendMonitorExit(ps[i].pid, pid, "noproxy", ps[i].ref)
   216  		}
   217  		delete(m.processes, pid)
   218  	}
   219  	m.mutexProcesses.Unlock()
   220  
   221  	// notify processes created monitors by name
   222  	m.mutexNames.Lock()
   223  	for processID, ps := range m.names {
   224  		if processID.Node != name {
   225  			continue
   226  		}
   227  		for i := range ps {
   228  			// args: (to, terminated, reason, ref)
   229  			delete(m.ref2name, ps[i].ref)
   230  			if disconnect == nil || disconnect.Node == name {
   231  				m.sendMonitorExitReg(ps[i].pid, processID, "noconnection", ps[i].ref)
   232  				continue
   233  			}
   234  			m.sendMonitorExitReg(ps[i].pid, processID, "noproxy", ps[i].ref)
   235  		}
   236  		delete(m.names, processID)
   237  	}
   238  	m.mutexNames.Unlock()
   239  
   240  	// notify linked processes
   241  	m.mutexLinks.Lock()
   242  	for link, pids := range m.links {
   243  		if link.Node != etf.Atom(name) {
   244  			continue
   245  		}
   246  
   247  		for i := range pids {
   248  			if disconnect == nil || disconnect.Node == name {
   249  				m.sendExit(pids[i], link, "noconnection")
   250  			} else {
   251  				m.sendExit(pids[i], link, "noproxy")
   252  			}
   253  			p, ok := m.links[pids[i]]
   254  
   255  			if !ok {
   256  				continue
   257  			}
   258  
   259  			for k := range p {
   260  				if p[k].Node != etf.Atom(name) {
   261  					continue
   262  				}
   263  
   264  				p[k] = p[0]
   265  				p = p[1:]
   266  
   267  			}
   268  
   269  			if len(p) > 0 {
   270  				m.links[pids[i]] = p
   271  				continue
   272  			}
   273  
   274  			delete(m.links, pids[i])
   275  		}
   276  
   277  		delete(m.links, link)
   278  	}
   279  	m.mutexLinks.Unlock()
   280  }
   281  
   282  func (m *monitor) handleTerminated(terminated etf.Pid, name string, reason string) {
   283  	lib.Log("[%s] MONITOR process terminated: %v", m.nodename, terminated)
   284  
   285  	// if terminated process had a name we should make shure to clean up them all
   286  	m.mutexNames.Lock()
   287  	if name != "" {
   288  		terminatedProcessID := gen.ProcessID{Name: name, Node: m.nodename}
   289  		if items, ok := m.names[terminatedProcessID]; ok {
   290  			for i := range items {
   291  				lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminatedProcessID, items[i].pid)
   292  				m.sendMonitorExitReg(items[i].pid, terminatedProcessID, reason, items[i].ref)
   293  				delete(m.ref2name, items[i].ref)
   294  			}
   295  			delete(m.names, terminatedProcessID)
   296  		}
   297  	}
   298  	m.mutexNames.Unlock()
   299  
   300  	// check whether we have monitorItem on this process by Pid (terminated)
   301  	m.mutexProcesses.Lock()
   302  	if items, ok := m.processes[terminated]; ok {
   303  
   304  		for i := range items {
   305  			lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid)
   306  			m.sendMonitorExit(items[i].pid, terminated, reason, items[i].ref)
   307  			delete(m.ref2pid, items[i].ref)
   308  		}
   309  		delete(m.processes, terminated)
   310  	}
   311  	m.mutexProcesses.Unlock()
   312  
   313  	m.mutexLinks.Lock()
   314  	if pidLinks, ok := m.links[terminated]; ok {
   315  		for i := range pidLinks {
   316  			lib.Log("[%s] LINK process exited: %s. send notify to: %s", m.nodename, terminated, pidLinks[i])
   317  			m.sendExit(pidLinks[i], terminated, reason)
   318  
   319  			// remove A link
   320  			pids, ok := m.links[pidLinks[i]]
   321  			if !ok {
   322  				continue
   323  			}
   324  			for k := range pids {
   325  				if pids[k] != terminated {
   326  					continue
   327  				}
   328  				pids[k] = pids[0]
   329  				pids = pids[1:]
   330  				break
   331  			}
   332  
   333  			if len(pids) > 0 {
   334  				m.links[pidLinks[i]] = pids
   335  			} else {
   336  				delete(m.links, pidLinks[i])
   337  			}
   338  		}
   339  		// remove link
   340  		delete(m.links, terminated)
   341  	}
   342  	m.mutexLinks.Unlock()
   343  
   344  	// check for event owning and monitoring
   345  	m.mutexEvents.Lock()
   346  	events, exist := m.pid2events[terminated]
   347  	if exist == false {
   348  		// this process hasn't been involved in any events
   349  		m.mutexEvents.Unlock()
   350  		return
   351  	}
   352  
   353  	for _, e := range events {
   354  		item := m.events[e]
   355  		if item.owner == terminated {
   356  			message := gen.MessageEventDown{
   357  				Event:  e,
   358  				Reason: reason,
   359  			}
   360  			for _, pid := range item.monitors {
   361  				pidevents := m.pid2events[pid]
   362  				removed := 0
   363  				for i := range pidevents {
   364  					if pidevents[i] != e {
   365  						continue
   366  					}
   367  					m.router.RouteSend(etf.Pid{}, pid, message)
   368  					pidevents[i] = pidevents[removed]
   369  					removed++
   370  				}
   371  				pidevents = pidevents[removed:]
   372  				if len(pidevents) == 0 {
   373  					delete(m.pid2events, pid)
   374  				} else {
   375  					m.pid2events[pid] = pidevents
   376  				}
   377  			}
   378  			delete(m.events, e)
   379  			continue
   380  		}
   381  
   382  		removed := 0
   383  		for i := range item.monitors {
   384  			if item.monitors[i] != terminated {
   385  				continue
   386  			}
   387  			item.monitors[i] = item.monitors[removed]
   388  			removed++
   389  		}
   390  		item.monitors = item.monitors[removed:]
   391  		m.events[e] = item
   392  	}
   393  
   394  	delete(m.pid2events, terminated)
   395  	m.mutexEvents.Unlock()
   396  }
   397  
   398  func (m *monitor) processLinks(process etf.Pid) []etf.Pid {
   399  	m.mutexLinks.RLock()
   400  	defer m.mutexLinks.RUnlock()
   401  
   402  	if l, ok := m.links[process]; ok {
   403  		return l
   404  	}
   405  	return nil
   406  }
   407  
   408  func (m *monitor) processMonitors(process etf.Pid) []etf.Pid {
   409  	monitors := []etf.Pid{}
   410  	m.mutexProcesses.RLock()
   411  	defer m.mutexProcesses.RUnlock()
   412  
   413  	for p, by := range m.processes {
   414  		for b := range by {
   415  			if by[b].pid == process {
   416  				monitors = append(monitors, p)
   417  			}
   418  		}
   419  	}
   420  	return monitors
   421  }
   422  
   423  func (m *monitor) processMonitorsByName(process etf.Pid) []gen.ProcessID {
   424  	monitors := []gen.ProcessID{}
   425  	m.mutexProcesses.RLock()
   426  	defer m.mutexProcesses.RUnlock()
   427  
   428  	for processID, by := range m.names {
   429  		for b := range by {
   430  			if by[b].pid == process {
   431  				monitors = append(monitors, processID)
   432  			}
   433  		}
   434  	}
   435  	return monitors
   436  }
   437  
   438  func (m *monitor) processMonitoredBy(process etf.Pid) []etf.Pid {
   439  	monitors := []etf.Pid{}
   440  	m.mutexProcesses.RLock()
   441  	defer m.mutexProcesses.RUnlock()
   442  	if m, ok := m.processes[process]; ok {
   443  		for i := range m {
   444  			monitors = append(monitors, m[i].pid)
   445  		}
   446  
   447  	}
   448  	return monitors
   449  }
   450  
   451  func (m *monitor) IsMonitor(ref etf.Ref) bool {
   452  	m.mutexProcesses.RLock()
   453  	defer m.mutexProcesses.RUnlock()
   454  	if _, ok := m.ref2pid[ref]; ok {
   455  		return true
   456  	}
   457  	if _, ok := m.ref2name[ref]; ok {
   458  		return true
   459  	}
   460  	return false
   461  }
   462  
   463  //
   464  // implementation of CoreRouter interface:
   465  //
   466  // RouteLink
   467  // RouteUnlink
   468  // RouteExit
   469  // RouteMonitor
   470  // RouteMonitorReg
   471  // RouteDemonitor
   472  // RouteMonitorExit
   473  // RouteMonitorExitReg
   474  //
   475  
   476  func (m *monitor) RouteLink(pidA etf.Pid, pidB etf.Pid) error {
   477  	lib.Log("[%s] LINK process: %v => %v", m.nodename, pidA, pidB)
   478  
   479  	// http://erlang.org/doc/reference_manual/processes.html#links
   480  	// Links are bidirectional and there can only be one link between
   481  	// two processes. Repeated calls to link(Pid) have no effect.
   482  
   483  	// Returns error if link is already exist or a process attempts to create
   484  	// a link to itself
   485  
   486  	if pidA == pidB {
   487  		return fmt.Errorf("Can not link to itself")
   488  	}
   489  
   490  	m.mutexLinks.RLock()
   491  	linksA := m.links[pidA]
   492  	if pidA.Node == etf.Atom(m.nodename) {
   493  		// check if these processes are linked already (source)
   494  		for i := range linksA {
   495  			if linksA[i] == pidB {
   496  				m.mutexLinks.RUnlock()
   497  				return fmt.Errorf("Already linked")
   498  			}
   499  		}
   500  
   501  	}
   502  	m.mutexLinks.RUnlock()
   503  
   504  	// check if these processes are linked already (destination)
   505  	m.mutexLinks.RLock()
   506  	linksB := m.links[pidB]
   507  
   508  	for i := range linksB {
   509  		if linksB[i] == pidA {
   510  			m.mutexLinks.RUnlock()
   511  			return fmt.Errorf("Already linked")
   512  		}
   513  	}
   514  	m.mutexLinks.RUnlock()
   515  
   516  	if pidB.Node == etf.Atom(m.nodename) {
   517  		// for the local process we should make sure if its alive
   518  		// otherwise send 'EXIT' message with 'noproc' as a reason
   519  		if p := m.router.processByPid(pidB); p == nil {
   520  			m.sendExit(pidA, pidB, "noproc")
   521  			return lib.ErrProcessUnknown
   522  		}
   523  		m.mutexLinks.Lock()
   524  		m.links[pidA] = append(linksA, pidB)
   525  		m.links[pidB] = append(linksB, pidA)
   526  		m.mutexLinks.Unlock()
   527  		return nil
   528  	}
   529  
   530  	// linking with remote process
   531  	connection, err := m.router.getConnection(string(pidB.Node))
   532  	if err != nil {
   533  		m.sendExit(pidA, pidB, "noconnection")
   534  		return nil
   535  	}
   536  
   537  	if err := connection.Link(pidA, pidB); err != nil {
   538  		m.sendExit(pidA, pidB, err.Error())
   539  		return nil
   540  	}
   541  
   542  	m.mutexLinks.Lock()
   543  	m.links[pidA] = append(linksA, pidB)
   544  	m.links[pidB] = append(linksB, pidA)
   545  	m.mutexLinks.Unlock()
   546  	return nil
   547  }
   548  
   549  func (m *monitor) RouteUnlink(pidA etf.Pid, pidB etf.Pid) error {
   550  	m.mutexLinks.Lock()
   551  	defer m.mutexLinks.Unlock()
   552  
   553  	if pidA.Node == etf.Atom(m.nodename) {
   554  		linksA := m.links[pidA]
   555  		for i := range linksA {
   556  			if linksA[i] != pidB {
   557  				continue
   558  			}
   559  
   560  			linksA[i] = linksA[0]
   561  			linksA = linksA[1:]
   562  			if len(linksA) > 0 {
   563  				m.links[pidA] = linksA
   564  			} else {
   565  				delete(m.links, pidA)
   566  			}
   567  			break
   568  		}
   569  	}
   570  
   571  	linksB := m.links[pidB]
   572  	for i := range linksB {
   573  		if linksB[i] != pidA {
   574  			continue
   575  		}
   576  		linksB[i] = linksB[0]
   577  		linksB = linksB[1:]
   578  		if len(linksB) > 0 {
   579  			m.links[pidB] = linksB
   580  		} else {
   581  			delete(m.links, pidB)
   582  		}
   583  		break
   584  
   585  	}
   586  
   587  	if pidB.Node != etf.Atom(m.nodename) {
   588  		connection, err := m.router.getConnection(string(pidB.Node))
   589  		if err != nil {
   590  			m.sendExit(pidA, pidB, "noconnection")
   591  			return err
   592  		}
   593  		if err := connection.Unlink(pidA, pidB); err != nil {
   594  			m.sendExit(pidA, pidB, err.Error())
   595  			return err
   596  		}
   597  	}
   598  	return nil
   599  }
   600  
   601  func (m *monitor) RouteExit(to etf.Pid, terminated etf.Pid, reason string) error {
   602  	m.mutexLinks.Lock()
   603  	defer m.mutexLinks.Unlock()
   604  
   605  	pidLinks, ok := m.links[terminated]
   606  	if !ok {
   607  		return nil
   608  	}
   609  	for i := range pidLinks {
   610  		lib.Log("[%s] LINK process exited: %s. send notify to: %s", m.nodename, terminated, pidLinks[i])
   611  		m.sendExit(pidLinks[i], terminated, reason)
   612  
   613  		// remove A link
   614  		pids, ok := m.links[pidLinks[i]]
   615  		if !ok {
   616  			continue
   617  		}
   618  		for k := range pids {
   619  			if pids[k] != terminated {
   620  				continue
   621  			}
   622  			pids[k] = pids[0]
   623  			pids = pids[1:]
   624  			break
   625  		}
   626  
   627  		if len(pids) > 0 {
   628  			m.links[pidLinks[i]] = pids
   629  		} else {
   630  			delete(m.links, pidLinks[i])
   631  		}
   632  	}
   633  	// remove link
   634  	delete(m.links, terminated)
   635  	return nil
   636  
   637  }
   638  
   639  func (m *monitor) RouteMonitor(by etf.Pid, pid etf.Pid, ref etf.Ref) error {
   640  	lib.Log("[%s] MONITOR process: %s => %s", m.nodename, by, pid)
   641  
   642  	// If 'process' belongs to this node we should make sure if its alive.
   643  	// http://erlang.org/doc/reference_manual/processes.html#monitors
   644  	// If Pid does not exist a gen.MessageDown must be
   645  	// send immediately with Reason set to noproc.
   646  	if p := m.router.processByPid(pid); string(pid.Node) == m.nodename && p == nil {
   647  		return m.sendMonitorExit(by, pid, "noproc", ref)
   648  	}
   649  
   650  	if string(pid.Node) != m.nodename {
   651  		connection, err := m.router.getConnection(string(pid.Node))
   652  		if err != nil {
   653  			m.sendMonitorExit(by, pid, "noconnection", ref)
   654  			return err
   655  		}
   656  
   657  		if err := connection.Monitor(by, pid, ref); err != nil {
   658  			switch err {
   659  			case lib.ErrPeerUnsupported:
   660  				m.sendMonitorExit(by, pid, "unsupported", ref)
   661  			case lib.ErrProcessIncarnation:
   662  				m.sendMonitorExit(by, pid, "incarnation", ref)
   663  			default:
   664  				m.sendMonitorExit(by, pid, "noconnection", ref)
   665  			}
   666  			return err
   667  		}
   668  	}
   669  
   670  	m.mutexProcesses.Lock()
   671  	l := m.processes[pid]
   672  	item := monitorItem{
   673  		pid: by,
   674  		ref: ref,
   675  	}
   676  	m.processes[pid] = append(l, item)
   677  	m.ref2pid[ref] = pid
   678  	m.mutexProcesses.Unlock()
   679  
   680  	return nil
   681  }
   682  
   683  func (m *monitor) RouteMonitorReg(by etf.Pid, process gen.ProcessID, ref etf.Ref) error {
   684  	// If 'process' belongs to this node and does not exist a gen.MessageDown must be
   685  	// send immediately with Reason set to noproc.
   686  	if p := m.router.ProcessByName(process.Name); process.Node == m.nodename && p == nil {
   687  		return m.sendMonitorExitReg(by, process, "noproc", ref)
   688  	}
   689  	if process.Node != m.nodename {
   690  		connection, err := m.router.getConnection(process.Node)
   691  		if err != nil {
   692  			m.sendMonitorExitReg(by, process, "noconnection", ref)
   693  			return err
   694  		}
   695  
   696  		if err := connection.MonitorReg(by, process, ref); err != nil {
   697  			if err == lib.ErrPeerUnsupported {
   698  				m.sendMonitorExitReg(by, process, "unsupported", ref)
   699  			} else {
   700  				m.sendMonitorExitReg(by, process, "noconnection", ref)
   701  			}
   702  			return err
   703  		}
   704  	}
   705  
   706  	m.mutexNames.Lock()
   707  	l := m.names[process]
   708  	item := monitorItem{
   709  		pid: by,
   710  		ref: ref,
   711  	}
   712  	m.names[process] = append(l, item)
   713  	m.ref2name[ref] = process
   714  	m.mutexNames.Unlock()
   715  
   716  	return nil
   717  }
   718  
   719  func (m *monitor) RouteDemonitor(by etf.Pid, ref etf.Ref) error {
   720  	m.mutexProcesses.RLock()
   721  	pid, knownRefByPid := m.ref2pid[ref]
   722  	m.mutexProcesses.RUnlock()
   723  
   724  	if knownRefByPid == false {
   725  		// monitor was created by process name
   726  		m.mutexNames.Lock()
   727  		defer m.mutexNames.Unlock()
   728  		processID, knownRefByName := m.ref2name[ref]
   729  		if knownRefByName == false {
   730  			// unknown monitor reference
   731  			return lib.ErrMonitorUnknown
   732  		}
   733  		items := m.names[processID]
   734  
   735  		for i := range items {
   736  			if items[i].pid != by {
   737  				continue
   738  			}
   739  			if items[i].ref != ref {
   740  				continue
   741  			}
   742  
   743  			items[i] = items[0]
   744  			items = items[1:]
   745  
   746  			if len(items) == 0 {
   747  				delete(m.names, processID)
   748  			} else {
   749  				m.names[processID] = items
   750  			}
   751  			delete(m.ref2name, ref)
   752  
   753  			if processID.Node != m.nodename {
   754  				connection, err := m.router.getConnection(processID.Node)
   755  				if err != nil {
   756  					return err
   757  				}
   758  				return connection.DemonitorReg(by, processID, ref)
   759  			}
   760  			return nil
   761  		}
   762  		return nil
   763  	}
   764  
   765  	// monitor was created by pid
   766  
   767  	// cheching for monitorItem list
   768  	m.mutexProcesses.Lock()
   769  	defer m.mutexProcesses.Unlock()
   770  	items := m.processes[pid]
   771  
   772  	// remove PID from monitoring processes list
   773  	for i := range items {
   774  		if items[i].pid != by {
   775  			continue
   776  		}
   777  		if items[i].ref != ref {
   778  			continue
   779  		}
   780  
   781  		items[i] = items[0]
   782  		items = items[1:]
   783  
   784  		if len(items) == 0 {
   785  			delete(m.processes, pid)
   786  		} else {
   787  			m.processes[pid] = items
   788  		}
   789  		delete(m.ref2pid, ref)
   790  
   791  		if string(pid.Node) != m.nodename {
   792  			connection, err := m.router.getConnection(string(pid.Node))
   793  			if err != nil {
   794  				return err
   795  			}
   796  			return connection.Demonitor(by, pid, ref)
   797  		}
   798  
   799  		return nil
   800  	}
   801  	return nil
   802  }
   803  
   804  func (m *monitor) RouteMonitorExit(terminated etf.Pid, reason string, ref etf.Ref) error {
   805  	m.mutexProcesses.Lock()
   806  	defer m.mutexProcesses.Unlock()
   807  
   808  	items, ok := m.processes[terminated]
   809  	if !ok {
   810  		return nil
   811  	}
   812  
   813  	for i := range items {
   814  		lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid)
   815  		if items[i].ref != ref {
   816  			continue
   817  		}
   818  
   819  		delete(m.ref2pid, items[i].ref)
   820  		m.sendMonitorExit(items[i].pid, terminated, reason, items[i].ref)
   821  
   822  		items[i] = items[0]
   823  		items = items[1:]
   824  		if len(items) == 0 {
   825  			delete(m.processes, terminated)
   826  			return nil
   827  		}
   828  		m.processes[terminated] = items
   829  		return nil
   830  	}
   831  
   832  	return nil
   833  }
   834  
   835  func (m *monitor) RouteMonitorExitReg(terminated gen.ProcessID, reason string, ref etf.Ref) error {
   836  	m.mutexNames.Lock()
   837  	defer m.mutexNames.Unlock()
   838  
   839  	items, ok := m.names[terminated]
   840  	if !ok {
   841  		return nil
   842  	}
   843  
   844  	for i := range items {
   845  		lib.Log("[%s] MONITOR process terminated: %s. send notify to: %s", m.nodename, terminated, items[i].pid)
   846  		if items[i].ref != ref {
   847  			continue
   848  		}
   849  
   850  		delete(m.ref2name, items[i].ref)
   851  		m.sendMonitorExitReg(items[i].pid, terminated, reason, items[i].ref)
   852  
   853  		items[i] = items[0]
   854  		items = items[1:]
   855  		if len(items) == 0 {
   856  			delete(m.names, terminated)
   857  			return nil
   858  		}
   859  		m.names[terminated] = items
   860  		return nil
   861  	}
   862  
   863  	return nil
   864  }
   865  
   866  func (m *monitor) sendMonitorExit(to etf.Pid, terminated etf.Pid, reason string, ref etf.Ref) error {
   867  	if string(to.Node) != m.nodename {
   868  		// remote
   869  		if reason == "noconnection" {
   870  			// do nothing. it was a monitor created by the remote node we lost connection to.
   871  			return nil
   872  		}
   873  
   874  		connection, err := m.router.getConnection(string(to.Node))
   875  		if err != nil {
   876  			return err
   877  		}
   878  
   879  		return connection.MonitorExit(to, terminated, reason, ref)
   880  	}
   881  
   882  	// local
   883  	down := gen.MessageDown{
   884  		Ref:    ref,
   885  		Pid:    terminated,
   886  		Reason: reason,
   887  	}
   888  	from := to
   889  	return m.router.RouteSend(from, to, down)
   890  }
   891  
   892  func (m *monitor) sendMonitorExitReg(to etf.Pid, terminated gen.ProcessID, reason string, ref etf.Ref) error {
   893  	if string(to.Node) != m.nodename {
   894  		// remote
   895  		if reason == "noconnection" {
   896  			// do nothing
   897  			return nil
   898  		}
   899  
   900  		connection, err := m.router.getConnection(string(to.Node))
   901  		if err != nil {
   902  			return err
   903  		}
   904  
   905  		return connection.MonitorExitReg(to, terminated, reason, ref)
   906  	}
   907  
   908  	// local
   909  	down := gen.MessageDown{
   910  		Ref:       ref,
   911  		ProcessID: terminated,
   912  		Reason:    reason,
   913  	}
   914  	from := to
   915  	return m.router.RouteSend(from, to, down)
   916  }
   917  
   918  func (m *monitor) sendExit(to etf.Pid, terminated etf.Pid, reason string) error {
   919  	// for remote: {3, FromPid, ToPid, Reason}
   920  	if to.Node != etf.Atom(m.nodename) {
   921  		if reason == "noconnection" {
   922  			return nil
   923  		}
   924  		connection, err := m.router.getConnection(string(to.Node))
   925  		if err != nil {
   926  			return err
   927  		}
   928  		return connection.LinkExit(to, terminated, reason)
   929  	}
   930  
   931  	// check if 'to' process is still alive
   932  	if p := m.router.processByPid(to); p != nil {
   933  		p.exit(terminated, reason)
   934  		return nil
   935  	}
   936  	return lib.ErrProcessUnknown
   937  }
   938  
   939  func (m *monitor) registerEvent(by etf.Pid, event gen.Event, messages []gen.EventMessage) error {
   940  	m.mutexEvents.Lock()
   941  	defer m.mutexEvents.Unlock()
   942  	if _, taken := m.events[event]; taken {
   943  		return lib.ErrTaken
   944  	}
   945  	events, _ := m.pid2events[by]
   946  	events = append(events, event)
   947  	m.pid2events[by] = events
   948  
   949  	mt := make(map[string]bool)
   950  	for _, m := range messages {
   951  		t := reflect.TypeOf(m)
   952  		st := t.PkgPath() + "/" + t.Name()
   953  		mt[st] = true
   954  	}
   955  	item := eventItem{
   956  		owner:        by,
   957  		messageTypes: mt,
   958  	}
   959  	m.events[event] = item
   960  	return nil
   961  }
   962  
   963  func (m *monitor) unregisterEvent(by etf.Pid, event gen.Event) error {
   964  	m.mutexEvents.Lock()
   965  	defer m.mutexEvents.Unlock()
   966  
   967  	item, exist := m.events[event]
   968  	if exist == false {
   969  		return lib.ErrEventUnknown
   970  	}
   971  	if item.owner != by {
   972  		return lib.ErrEventOwner
   973  	}
   974  	message := gen.MessageEventDown{
   975  		Event:  event,
   976  		Reason: "unregistered",
   977  	}
   978  
   979  	monitors := append(item.monitors, by)
   980  	for _, pid := range monitors {
   981  		events, _ := m.pid2events[pid]
   982  		removed := 0
   983  		for i := range events {
   984  			if events[i] != event {
   985  				continue
   986  			}
   987  			if pid != by {
   988  				m.router.RouteSend(etf.Pid{}, pid, message)
   989  			}
   990  			events[i] = events[removed]
   991  			removed++
   992  		}
   993  		events = events[removed:]
   994  
   995  		if len(events) == 0 {
   996  			delete(m.pid2events, pid)
   997  		} else {
   998  			m.pid2events[pid] = events
   999  		}
  1000  
  1001  	}
  1002  
  1003  	delete(m.events, event)
  1004  	return nil
  1005  }
  1006  
  1007  func (m *monitor) monitorEvent(by etf.Pid, event gen.Event) error {
  1008  	m.mutexEvents.Lock()
  1009  	defer m.mutexEvents.Unlock()
  1010  
  1011  	item, exist := m.events[event]
  1012  	if exist == false {
  1013  		return lib.ErrEventUnknown
  1014  	}
  1015  	if item.owner == by {
  1016  		return lib.ErrEventSelf
  1017  	}
  1018  	item.monitors = append(item.monitors, by)
  1019  	m.events[event] = item
  1020  
  1021  	events, exist := m.pid2events[by]
  1022  	events = append(events, event)
  1023  	m.pid2events[by] = events
  1024  	return nil
  1025  }
  1026  
  1027  func (m *monitor) demonitorEvent(by etf.Pid, event gen.Event) error {
  1028  	m.mutexEvents.Lock()
  1029  	defer m.mutexEvents.Unlock()
  1030  
  1031  	item, exist := m.events[event]
  1032  	if exist == false {
  1033  		return lib.ErrEventUnknown
  1034  	}
  1035  	removed := 0
  1036  	for i := range item.monitors {
  1037  		if item.monitors[i] != by {
  1038  			continue
  1039  		}
  1040  
  1041  		item.monitors[i] = item.monitors[removed]
  1042  		removed++
  1043  	}
  1044  	item.monitors = item.monitors[removed:]
  1045  	m.events[event] = item
  1046  
  1047  	events, _ := m.pid2events[by]
  1048  
  1049  	removed = 0
  1050  	for i := range events {
  1051  		if events[i] != event {
  1052  			continue
  1053  		}
  1054  		events[i] = events[removed]
  1055  	}
  1056  	events = events[removed:]
  1057  
  1058  	if len(events) == 0 {
  1059  		delete(m.pid2events, by)
  1060  	} else {
  1061  		m.pid2events[by] = events
  1062  	}
  1063  
  1064  	return nil
  1065  }
  1066  
  1067  func (m *monitor) sendEvent(by etf.Pid, event gen.Event, message gen.EventMessage) error {
  1068  	m.mutexEvents.RLock()
  1069  	defer m.mutexEvents.RUnlock()
  1070  
  1071  	item, exist := m.events[event]
  1072  	if exist == false {
  1073  		return lib.ErrEventUnknown
  1074  	}
  1075  	if item.owner != by {
  1076  		return lib.ErrEventOwner
  1077  	}
  1078  
  1079  	t := reflect.TypeOf(message)
  1080  	st := t.PkgPath() + "/" + t.Name()
  1081  	if _, exist := item.messageTypes[st]; exist == false {
  1082  		return lib.ErrEventMismatch
  1083  	}
  1084  
  1085  	// TODO clean up terminated subscribers
  1086  	for _, pid := range item.monitors {
  1087  		m.router.RouteSend(etf.Pid{}, pid, message)
  1088  	}
  1089  
  1090  	return nil
  1091  }
  1092  
  1093  func (m *monitor) monitorStats() internalMonitorStats {
  1094  	stats := internalMonitorStats{}
  1095  	m.mutexProcesses.RLock()
  1096  	stats.monitorsByPid = len(m.processes)
  1097  	m.mutexProcesses.RUnlock()
  1098  
  1099  	m.mutexNames.RLock()
  1100  	stats.monitorsByName = len(m.names)
  1101  	m.mutexNames.RUnlock()
  1102  
  1103  	m.mutexNodes.RLock()
  1104  	stats.monitorsNodes = len(m.nodes)
  1105  	m.mutexNodes.RUnlock()
  1106  
  1107  	m.mutexLinks.RLock()
  1108  	stats.links = len(m.links)
  1109  	m.mutexLinks.RUnlock()
  1110  	return stats
  1111  }