decred.org/dcrdex@v1.0.5/tatanka/client_messages.go (about)

     1  // This code is available on the terms of the project LICENSE.md file,
     2  // also available online at https://blueoakcouncil.org/license/1.0.0.
     3  
     4  package tatanka
     5  
     6  import (
     7  	"context"
     8  	"encoding/json"
     9  	"errors"
    10  	"time"
    11  
    12  	"decred.org/dcrdex/dex"
    13  	"decred.org/dcrdex/dex/msgjson"
    14  	"decred.org/dcrdex/dex/utils"
    15  	"decred.org/dcrdex/tatanka/mj"
    16  	"decred.org/dcrdex/tatanka/tanka"
    17  )
    18  
    19  // clientJob is a job for the remote clients loop.
    20  type clientJob struct {
    21  	task interface{}
    22  	res  chan interface{}
    23  }
    24  
    25  // clientJobNewRemote is a clientJob task that adds a remote client to the
    26  // remoteClients map.
    27  type clientJobNewRemote struct {
    28  	tankaID  tanka.PeerID
    29  	clientID tanka.PeerID
    30  }
    31  
    32  // clientJobRemoteDisconnect is a clientJob task that removes a remote client
    33  // from the remoteClients map.
    34  type clientJobRemoteDisconnect clientJobNewRemote
    35  
    36  // clientJobFindRemotes is a clientJob that produces a list of remote tatanka
    37  // nodes to which the client is thought to be connected.
    38  type clientJobFindRemotes struct {
    39  	clientID tanka.PeerID
    40  }
    41  
    42  // runRemoteClientsLoop is a loop for reading and writing to the remoteClients
    43  // map. I don't know if it's more performant, I'm just tired of mutex patterns.
    44  func (t *Tatanka) runRemoteClientsLoop(ctx context.Context) {
    45  	for {
    46  		select {
    47  		case job := <-t.clientJobs:
    48  			switch task := job.task.(type) {
    49  			case *clientJobNewRemote:
    50  				srvs, found := t.remoteClients[task.clientID]
    51  				if !found {
    52  					srvs = make(map[tanka.PeerID]struct{})
    53  					t.remoteClients[task.clientID] = srvs
    54  				}
    55  				if _, found = srvs[task.tankaID]; !found {
    56  					srvs[task.tankaID] = struct{}{}
    57  					if t.log.Level() <= dex.LevelTrace {
    58  						t.log.Tracef("Indexing new remote client %s from %s", task.clientID, task.tankaID)
    59  					}
    60  				}
    61  				job.res <- true
    62  			case *clientJobRemoteDisconnect:
    63  				srvs, found := t.remoteClients[task.clientID]
    64  				if !found {
    65  					return
    66  				}
    67  				delete(srvs, task.tankaID)
    68  				if len(srvs) == 0 {
    69  					delete(t.remoteClients, task.clientID)
    70  				}
    71  				job.res <- true
    72  			case *clientJobFindRemotes:
    73  				job.res <- utils.CopyMap(t.remoteClients[task.clientID])
    74  			}
    75  		case <-ctx.Done():
    76  			return
    77  		}
    78  	}
    79  }
    80  
    81  // registerRemoteClient ensures the remote client is in the remoteClients map.
    82  func (t *Tatanka) registerRemoteClient(tankaID, clientID tanka.PeerID) {
    83  	job := &clientJob{
    84  		task: &clientJobNewRemote{
    85  			clientID: clientID,
    86  			tankaID:  tankaID,
    87  		},
    88  		res: make(chan interface{}, 1),
    89  	}
    90  	t.clientJobs <- job
    91  	select {
    92  	case <-job.res:
    93  	case <-t.ctx.Done():
    94  	}
    95  }
    96  
    97  // handleClientConnect handles a new locally-connected client. checking
    98  // reputation before adding the client to the map.
    99  func (t *Tatanka) handleClientConnect(cl tanka.Sender, msg *msgjson.Message) *msgjson.Error {
   100  	var conn *mj.Connect
   101  	if err := msg.Unmarshal(&conn); err != nil {
   102  		return msgjson.NewError(mj.ErrBadRequest, "error unmarshaling client connection configuration from %q: %v", cl.PeerID(), err)
   103  	}
   104  
   105  	p, rrs, err := t.loadPeer(conn.ID)
   106  	if err != nil {
   107  		return msgjson.NewError(mj.ErrInternal, "error getting peer info for peer %q: %v", conn.ID, err)
   108  	}
   109  
   110  	if err := mj.CheckSig(msg, p.PubKey); err != nil {
   111  		return msgjson.NewError(mj.ErrAuth, "signature error: %v", err)
   112  	}
   113  
   114  	cl.SetPeerID(p.ID)
   115  
   116  	pp := &peer{Peer: p, Sender: cl, rrs: rrs}
   117  	if pp.banned() {
   118  		return msgjson.NewError(mj.ErrBanned, "your tier is <= 0. post some bonds")
   119  	}
   120  
   121  	bondTier := p.BondTier()
   122  
   123  	t.clientMtx.Lock()
   124  	oldClient := t.clients[conn.ID]
   125  	t.clients[conn.ID] = &client{peer: pp}
   126  	t.clientMtx.Unlock()
   127  
   128  	if oldClient != nil {
   129  		t.log.Debugf("new connection for already connected client %q", conn.ID)
   130  		oldClient.Disconnect()
   131  	}
   132  
   133  	t.sendResult(cl, msg.ID, t.generateConfig(bondTier))
   134  
   135  	note := mj.MustNotification(mj.RouteNewClient, conn)
   136  	for _, s := range t.tatankaNodes() {
   137  		if err := t.send(s, note); err != nil {
   138  			t.log.Errorf("error sharing new client info with tatanka node %q", s.ID)
   139  		}
   140  	}
   141  
   142  	return nil
   143  }
   144  
   145  // handleClientMessage handles incoming message from locally-connected clients.
   146  // All messages except for handleClientConnect and handlePostBond are handled
   147  // here, with some common pre-processing and validation done before the
   148  // subsequent route handler is called.
   149  func (t *Tatanka) handleClientMessage(cl tanka.Sender, msg *msgjson.Message) *msgjson.Error {
   150  	peerID := cl.PeerID()
   151  	c := t.clientNode(peerID)
   152  	if c == nil {
   153  		t.log.Errorf("Ignoring message from unknown client %s", peerID)
   154  		cl.Disconnect()
   155  		return nil
   156  	}
   157  
   158  	if err := mj.CheckSig(msg, c.PubKey); err != nil {
   159  		t.log.Errorf("Signature error for %q message from %q: %v", msg.Route, c.ID, err)
   160  		return msgjson.NewError(mj.ErrSig, "signature doesn't check")
   161  	}
   162  
   163  	t.clientMtx.RLock()
   164  	c, found := t.clients[peerID]
   165  	t.clientMtx.RUnlock()
   166  	if !found {
   167  		t.log.Errorf("client %s sent a message requiring tier before connecting", peerID)
   168  		return msgjson.NewError(mj.ErrAuth, "not connected")
   169  	}
   170  
   171  	switch msg.Type {
   172  	case msgjson.Request:
   173  		switch msg.Route {
   174  		case mj.RouteSubscribe:
   175  			return t.handleSubscription(c, msg)
   176  		// case mj.RouteUnsubscribe:
   177  		// 	return t.handleUnsubscribe(c, msg)
   178  		case mj.RouteBroadcast:
   179  			return t.handleBroadcast(c.peer, msg, true)
   180  		case mj.RouteTankagram:
   181  			return t.handleTankagram(c, msg)
   182  		default:
   183  			return msgjson.NewError(mj.ErrBadRequest, "unknown request route %q", msg.Route)
   184  		}
   185  	case msgjson.Notification:
   186  		switch msg.Route {
   187  		default:
   188  			// TODO: What? Can't let this happen too much.
   189  			return msgjson.NewError(mj.ErrBadRequest, "unknown notification route %q", msg.Route)
   190  		}
   191  	default:
   192  		return msgjson.NewError(mj.ErrBadRequest, "unknown message type %d", msg.Type)
   193  	}
   194  }
   195  
   196  // handlePostBond handles a new bond sent from a locally connected client.
   197  // handlePostBond is the only client route than can be invoked before the user
   198  // is bonded.
   199  func (t *Tatanka) handlePostBond(cl tanka.Sender, msg *msgjson.Message) *msgjson.Error {
   200  	var bonds []*tanka.Bond
   201  	if err := msg.Unmarshal(&bonds); err != nil {
   202  		t.log.Errorf("Bond-posting client sent a bad bond message: %v", err)
   203  		return msgjson.NewError(mj.ErrBadRequest, "bad request")
   204  	}
   205  
   206  	if len(bonds) == 0 {
   207  		t.log.Errorf("Bond-posting client sent zero bonds")
   208  		return msgjson.NewError(mj.ErrBadRequest, "no bonds sent")
   209  	}
   210  
   211  	peerID := bonds[0].PeerID
   212  	if peerID == (tanka.PeerID{}) {
   213  		t.log.Errorf("Bond-posting client didn't provide a peer ID")
   214  		return msgjson.NewError(mj.ErrBadRequest, "no peer ID")
   215  	}
   216  	for i := 1; i < len(bonds); i++ {
   217  		if bonds[i].PeerID != bonds[0].PeerID {
   218  			t.log.Errorf("Bond-posting client provided non uniform peer IDs")
   219  			return msgjson.NewError(mj.ErrBadRequest, "mismatched peer IDs")
   220  		}
   221  	}
   222  
   223  	var allBonds []*tanka.Bond
   224  	for _, b := range bonds {
   225  		if b == nil {
   226  			t.log.Errorf("Bond-posting client %s sent a nil bond", peerID)
   227  			return msgjson.NewError(mj.ErrBadRequest, "nil bond")
   228  		}
   229  		if b.Expiration.Before(time.Now()) {
   230  			t.log.Errorf("Bond-posting client %q sent an expired bond", peerID)
   231  			return msgjson.NewError(mj.ErrBadRequest, "bond already expired")
   232  		}
   233  
   234  		t.chainMtx.RLock()
   235  		ch := t.chains[b.AssetID]
   236  		t.chainMtx.RUnlock()
   237  		if ch == nil {
   238  			t.log.Errorf("Bond-posting client %q sent a bond for an unknown chain %d", peerID, b.AssetID)
   239  			return msgjson.NewError(mj.ErrBadRequest, "unsupported asset")
   240  		}
   241  
   242  		if err := ch.CheckBond(b); err != nil {
   243  			t.log.Errorf("Bond-posting client %q with bond %s didn't pass validation for chain %d: %v", peerID, b.CoinID, b.AssetID, err)
   244  			return msgjson.NewError(mj.ErrBadRequest, "failed validation")
   245  		}
   246  
   247  		var err error
   248  		allBonds, err = t.db.StoreBond(b)
   249  		if err != nil {
   250  			t.log.Errorf("Error storing bond for client %s in db: %v", peerID, err)
   251  			return msgjson.NewError(mj.ErrInternal, "internal error")
   252  		}
   253  
   254  	}
   255  
   256  	if len(allBonds) > 0 { // Probably no way to get here with empty allBonds, but checking anyway.
   257  		if c := t.clientNode(peerID); c != nil {
   258  			c.updateBonds(allBonds)
   259  		}
   260  	}
   261  
   262  	t.sendResult(cl, msg.ID, true)
   263  
   264  	return nil
   265  }
   266  
   267  // handleSubscription handles a new subscription, adding the subject to the
   268  // map if it doesn't exist. It then distributes a NewSubscriber broadcast
   269  // to all current subscribers and remote tatankas.
   270  func (t *Tatanka) handleSubscription(c *client, msg *msgjson.Message) *msgjson.Error {
   271  	if t.skipRelay(msg) {
   272  		return nil
   273  	}
   274  
   275  	var sub *mj.Subscription
   276  	if err := msg.Unmarshal(&sub); err != nil || sub == nil || sub.Topic == "" {
   277  		t.log.Errorf("error unmarshaling subscription from %s: %w", c.ID, err)
   278  		return msgjson.NewError(mj.ErrBadRequest, "is this payload a subscription?")
   279  	}
   280  
   281  	bcast := &mj.Broadcast{
   282  		Topic:       sub.Topic,
   283  		Subject:     sub.Subject,
   284  		MessageType: mj.MessageTypeNewSubscriber,
   285  		PeerID:      c.ID,
   286  		Stamp:       time.Now(),
   287  	}
   288  
   289  	// Do a helper function here to keep things tidy below.
   290  	relay := func(subs map[tanka.PeerID]struct{}) {
   291  		clientMsg := mj.MustNotification(mj.RouteBroadcast, bcast)
   292  		mj.SignMessage(t.priv, clientMsg)
   293  		clientMsgB, _ := json.Marshal(clientMsg)
   294  		for peerID := range subs {
   295  			subscriber, found := t.clients[peerID]
   296  			if !found {
   297  				t.log.Errorf("client not found for subscriber %s on topic %q, subject %q", peerID, sub.Topic, sub.Subject)
   298  				continue
   299  			}
   300  
   301  			if err := subscriber.Sender.SendRaw(clientMsgB); err != nil {
   302  				// DRAFT TODO: Remove subscriber and client and disconnect?
   303  				// Or do that in (*Tatanka).send?
   304  				t.log.Errorf("Error relaying broadcast: %v", err)
   305  				continue
   306  			}
   307  		}
   308  	}
   309  
   310  	// Send it to all other remote tatankas.
   311  	t.relayBroadcast(bcast, c.ID)
   312  
   313  	// Find it and broadcast to locally-connected clients, or add the subject if
   314  	// it doesn't exist.
   315  	t.clientMtx.Lock()
   316  	defer t.clientMtx.Unlock()
   317  	topic, exists := t.topics[sub.Topic]
   318  	if exists {
   319  		// We have the topic. Do we have the subject?
   320  		topic.subscribers[c.ID] = struct{}{}
   321  		subs, exists := topic.subjects[sub.Subject]
   322  		if exists {
   323  			// We already have the subject, distribute the broadcast to existing
   324  			// subscribers.
   325  			relay(subs)
   326  			subs[c.ID] = struct{}{}
   327  		} else {
   328  			// Add the subject. Nothing to broadcast.
   329  			topic.subjects[sub.Subject] = map[tanka.PeerID]struct{}{
   330  				c.ID: {},
   331  			}
   332  		}
   333  	} else {
   334  		// New topic and subject.
   335  		t.log.Tracef("Adding new subscription topic and subject %s -> %s", sub.Topic, sub.Subject)
   336  		t.topics[sub.Topic] = &Topic{
   337  			subjects: map[tanka.Subject]map[tanka.PeerID]struct{}{
   338  				sub.Subject: {
   339  					c.ID: {},
   340  				},
   341  			},
   342  			subscribers: map[tanka.PeerID]struct{}{
   343  				c.ID: {},
   344  			},
   345  		}
   346  	}
   347  
   348  	t.sendResult(c, msg.ID, true)
   349  	t.replySubscription(c, sub.Topic)
   350  	return nil
   351  }
   352  
   353  // replySubscription sends a follow up reply to a sender's subscription after
   354  // their message has been processed successfully.
   355  func (t *Tatanka) replySubscription(cl tanka.Sender, topic tanka.Topic) {
   356  	switch topic {
   357  	case mj.TopicFiatRate:
   358  		if t.fiatOracleEnabled() {
   359  			rates := t.fiatRateOracle.Rates()
   360  			if len(rates) == 0 { // no data to send
   361  				return
   362  			}
   363  
   364  			reply := mj.MustNotification(mj.RouteRates, &mj.RateMessage{
   365  				Topic: mj.TopicFiatRate,
   366  				Rates: rates,
   367  			})
   368  
   369  			if err := t.send(cl, reply); err != nil {
   370  				peerID := cl.PeerID()
   371  				t.log.Errorf("error sending result to %q: %v", dex.Bytes(peerID[:]), err)
   372  			}
   373  		}
   374  	}
   375  }
   376  
   377  func (t *Tatanka) unsub(peerID tanka.PeerID, topicID tanka.Topic, subjectID tanka.Subject) *msgjson.Error {
   378  	t.clientMtx.Lock()
   379  	defer t.clientMtx.Unlock()
   380  	topic, exists := t.topics[topicID]
   381  	if !exists {
   382  		t.log.Errorf("client %q unsubscribed from an unknown topic %q", peerID, topicID)
   383  		return msgjson.NewError(mj.ErrBadRequest, "unknown topic")
   384  	}
   385  	if _, found := topic.subscribers[peerID]; !found {
   386  		t.log.Errorf("client %q unsubscribed from topic %q, to which they were not suscribed", peerID, topicID)
   387  		return msgjson.NewError(mj.ErrBadRequest, "unknown topic")
   388  	}
   389  	if subjectID == "" {
   390  		// Unsubbing all subjects.
   391  		for subID, subs := range topic.subjects {
   392  			delete(subs, peerID)
   393  			if len(subs) == 0 {
   394  				delete(topic.subjects, subID)
   395  			}
   396  		}
   397  	} else {
   398  		subs, exists := topic.subjects[subjectID]
   399  		if !exists {
   400  			t.log.Errorf("client %q unsubscribed subject %q, topic %q, to which they were not suscribed", peerID, subjectID, topicID)
   401  			return msgjson.NewError(mj.ErrBadRequest, "unknown subject")
   402  		}
   403  		delete(subs, peerID)
   404  		if len(subs) == 0 {
   405  			delete(topic.subjects, subjectID)
   406  		}
   407  	}
   408  
   409  	if len(topic.subscribers) == 0 {
   410  		delete(t.topics, topicID)
   411  	}
   412  	return nil
   413  }
   414  
   415  // skipRelay checks whether the message has already been handled. This function
   416  // may not be necessary with the version 0 whitelisted mesh net, since
   417  // it is expected to be highly connected and relays only go one hop.
   418  func (t *Tatanka) skipRelay(msg *msgjson.Message) bool {
   419  	bcastID := mj.MessageDigest(msg)
   420  	t.relayMtx.Lock()
   421  	defer t.relayMtx.Unlock()
   422  	_, exists := t.recentRelays[bcastID]
   423  	if !exists {
   424  		t.recentRelays[bcastID] = time.Now()
   425  	}
   426  	return exists
   427  }
   428  
   429  // distributeBroadcastedMessage distributes the broadcast to any
   430  // locally-connected subscribers.
   431  func (t *Tatanka) distributeBroadcastedMessage(bcast *mj.Broadcast, mustExist bool) *msgjson.Error {
   432  	relayedMsg := mj.MustNotification(mj.RouteBroadcast, bcast)
   433  	mj.SignMessage(t.priv, relayedMsg)
   434  	relayedMsgB, _ := json.Marshal(relayedMsg)
   435  
   436  	t.clientMtx.RLock()
   437  	defer t.clientMtx.RUnlock()
   438  	topic, found := t.topics[bcast.Topic]
   439  	if !found {
   440  		if mustExist {
   441  			t.log.Errorf("client %q broadcasted to an unknown topic %q", bcast.PeerID, bcast.Topic)
   442  			return msgjson.NewError(mj.ErrBadRequest, "unknown topic")
   443  		}
   444  		return nil
   445  	}
   446  
   447  	relay := func(subs map[tanka.PeerID]struct{}) {
   448  		for peerID := range subs {
   449  			subscriber, found := t.clients[peerID]
   450  			if !found {
   451  				t.log.Errorf("client not found for subscriber %s on topic %q, subject %q", peerID, bcast.Topic, bcast.Subject)
   452  				continue
   453  			}
   454  
   455  			if err := subscriber.Sender.SendRaw(relayedMsgB); err != nil {
   456  				// DRAFT TODO: Remove subscriber and client and disconnect?
   457  				// Or do that in (*Tatanka).send?
   458  				t.log.Errorf("Error relaying broadcast: %v", err)
   459  				continue
   460  			}
   461  		}
   462  	}
   463  
   464  	if bcast.Subject == "" {
   465  		relay(topic.subscribers)
   466  	} else {
   467  		subs, found := topic.subjects[bcast.Subject]
   468  		if !found {
   469  			if mustExist {
   470  				t.log.Errorf("client %s broadcasted to an unknown subject %q on topic %s", bcast.PeerID, bcast.Subject, bcast.Topic)
   471  			}
   472  			return msgjson.NewError(mj.ErrBadRequest, "unknown subject")
   473  		}
   474  		relay(subs)
   475  	}
   476  	return nil
   477  }
   478  
   479  // handleBroadcast handles a broadcast from a locally connected client,
   480  // forwarding the message to all remote tatankas and local subscribers.
   481  func (t *Tatanka) handleBroadcast(p *peer, msg *msgjson.Message, mustExist bool) *msgjson.Error {
   482  	if t.skipRelay(msg) {
   483  		return nil
   484  	}
   485  
   486  	var bcast *mj.Broadcast
   487  	if err := msg.Unmarshal(&bcast); err != nil || bcast == nil || bcast.Topic == "" {
   488  		t.log.Errorf("error unmarshaling broadcast from %s: %w", p.ID, err)
   489  		return msgjson.NewError(mj.ErrBadRequest, "is this payload a broadcast?")
   490  	}
   491  
   492  	if bcast.PeerID != p.ID {
   493  		t.log.Errorf("broadcast peer ID does not match connected client: %s != %s", bcast.PeerID, p.ID)
   494  		return msgjson.NewError(mj.ErrBadRequest, "who's broadcast is this?")
   495  	}
   496  
   497  	if time.Since(bcast.Stamp) > tanka.EpochLength || time.Until(bcast.Stamp) > tanka.EpochLength {
   498  		t.log.Errorf("Ignoring broadcast with old stamp received from %s", p.ID)
   499  		return msgjson.NewError(mj.ErrBadRequest, "too old")
   500  	}
   501  
   502  	// Relay to remote tatankas first.
   503  	t.relayBroadcast(bcast, p.ID)
   504  
   505  	// Send to local subscribers.
   506  	if msgErr := t.distributeBroadcastedMessage(bcast, mustExist); msgErr != nil {
   507  		return msgErr
   508  	}
   509  
   510  	t.sendResult(p, msg.ID, true)
   511  
   512  	// Handle unsubs.
   513  	switch bcast.MessageType {
   514  	case mj.MessageTypeUnsubTopic:
   515  		t.unsub(p.ID, bcast.Topic, "")
   516  	case mj.MessageTypeUnsubSubject:
   517  		t.unsub(p.ID, bcast.Topic, bcast.Subject)
   518  	}
   519  
   520  	return nil
   521  }
   522  
   523  // relayBroadcast sends a relay_broadcast message to all remote tatankas.
   524  func (t *Tatanka) relayBroadcast(bcast *mj.Broadcast, from tanka.PeerID) {
   525  	relayedMsg := mj.MustNotification(mj.RouteRelayBroadcast, bcast)
   526  	mj.SignMessage(t.priv, relayedMsg)
   527  	relayedMsgB, _ := json.Marshal(relayedMsg)
   528  
   529  	for _, tt := range t.tatankaNodes() {
   530  		if tt.ID == from {
   531  			// don't send back to sender
   532  			continue
   533  		}
   534  		if err := tt.Sender.SendRaw(relayedMsgB); err != nil {
   535  			t.log.Errorf("Error relaying broadcast to %s: %v", tt.ID, err)
   536  		}
   537  	}
   538  }
   539  
   540  // findPath finds remote tatankas that are hosting the specified peer.
   541  func (t *Tatanka) findPath(peerID tanka.PeerID) []*remoteTatanka {
   542  	job := &clientJob{
   543  		task: &clientJobFindRemotes{
   544  			clientID: peerID,
   545  		},
   546  		res: make(chan interface{}),
   547  	}
   548  	t.clientJobs <- job
   549  	ttIDs := (<-job.res).(map[tanka.PeerID]struct{})
   550  
   551  	nodes := make([]*remoteTatanka, 0, len(ttIDs))
   552  	t.tatankasMtx.RLock()
   553  	for ttID := range ttIDs {
   554  		if tt, found := t.tatankas[ttID]; found {
   555  			nodes = append(nodes, tt)
   556  		}
   557  	}
   558  	t.tatankasMtx.RUnlock()
   559  	return nodes
   560  }
   561  
   562  // handleTankagram forwards a tankagram from a locally connected client, sending
   563  // it to the recipient if the recipient is also locally connected, or else
   564  // bouncing it off of a remote tatanka if one is known.
   565  func (t *Tatanka) handleTankagram(c *client, msg *msgjson.Message) *msgjson.Error {
   566  	var gram mj.Tankagram
   567  	if err := msg.Unmarshal(&gram); err != nil {
   568  		t.log.Errorf("Error unmarshaling tankagram from %s: %w", c.ID, err)
   569  		return msgjson.NewError(mj.ErrBadRequest, "bad tankagram")
   570  	}
   571  
   572  	if gram.From != c.ID {
   573  		t.log.Errorf("Tankagram from %s has wrong sender %s", c.ID, gram.From)
   574  		return msgjson.NewError(mj.ErrBadRequest, "wrong sender")
   575  	}
   576  
   577  	// The TankagramResult is signed separately.
   578  	sendTankagramResult := func(r *mj.TankagramResult) {
   579  		r.Sign(t.priv)
   580  		t.sendResult(c, msg.ID, r)
   581  	}
   582  
   583  	t.clientMtx.RLock()
   584  	recip, foundLocally := t.clients[gram.To]
   585  	t.clientMtx.RUnlock()
   586  	if foundLocally {
   587  		var resB dex.Bytes
   588  		relayedMsg := mj.MustRequest(mj.RouteTankagram, gram)
   589  		sent, clientErr, err := t.requestAnyOne([]tanka.Sender{recip}, relayedMsg, &resB)
   590  		if sent {
   591  			sendTankagramResult(&mj.TankagramResult{Result: mj.TRTTransmitted, Response: resB})
   592  			return nil
   593  		}
   594  		if clientErr != nil {
   595  			sendTankagramResult(&mj.TankagramResult{Result: mj.TRTErrBadClient})
   596  			return nil
   597  		}
   598  		if err != nil {
   599  			t.log.Errorf("Error sending to local client %s: %v", recip.ID, err)
   600  		}
   601  	}
   602  
   603  	// Either it's not a locally-connected client, or the send attempt failed.
   604  	// See if we have any other routes.
   605  
   606  	tts := t.findPath(gram.To)
   607  	if len(tts) == 0 {
   608  		// TODO: Disconnect client?
   609  		t.log.Errorf("No local or remote client %s for tankagram from %s", gram.To, c.ID)
   610  		sendTankagramResult(&mj.TankagramResult{Result: mj.TRTNoPath})
   611  		return nil
   612  	}
   613  
   614  	relayedMsg := mj.MustRequest(mj.RouteRelayTankagram, gram)
   615  	var r mj.TankagramResult
   616  	sent, clientErr, err := t.requestAnyOne(tankasToSenders(tts), relayedMsg, &r)
   617  	if sent {
   618  		sendTankagramResult(&r)
   619  		return nil
   620  	}
   621  	if clientErr != nil {
   622  		// TODO: This means weren't able to communicate with the server
   623  		// node. We should probably disconnect this server.
   624  		sendTankagramResult(&mj.TankagramResult{Result: mj.TRTErrFromTanka})
   625  		return nil
   626  	}
   627  	if err != nil {
   628  		if errors.Is(err, ErrNoPath) {
   629  			sendTankagramResult(&mj.TankagramResult{Result: mj.TRTNoPath})
   630  		} else {
   631  			t.log.Errorf("Error relaying tankagram: %v", err)
   632  			sendTankagramResult(&mj.TankagramResult{Result: mj.TRTErrFromTanka})
   633  		}
   634  	}
   635  	return nil
   636  }
   637  
   638  const ErrNoPath = dex.ErrorKind("no path")
   639  
   640  // requestAnyOne tries to request from the senders in order until one succeeds.
   641  func (t *Tatanka) requestAnyOne(senders []tanka.Sender, msg *msgjson.Message, resp interface{}) (sent bool, clientErr, err error) {
   642  	mj.SignMessage(t.priv, msg)
   643  	rawMsg, err := json.Marshal(msg)
   644  	if err != nil {
   645  		return false, nil, err
   646  	}
   647  	return t.requestAnyOneRaw(senders, msg.ID, rawMsg, resp)
   648  }
   649  
   650  func (t *Tatanka) requestAnyOneRaw(senders []tanka.Sender, msgID uint64, rawMsg []byte, resp interface{}) (sent bool, clientErr, err error) {
   651  	for _, sender := range senders {
   652  		var errChan = make(chan error)
   653  		if err := sender.RequestRaw(msgID, rawMsg, func(msg *msgjson.Message) {
   654  			if err := msg.UnmarshalResult(&resp); err != nil {
   655  				errChan <- err
   656  				return
   657  			}
   658  			errChan <- nil
   659  		}); err != nil {
   660  			peerID := sender.PeerID()
   661  			t.log.Errorf("error sending message to %s. msg = %s, err = %v", dex.Bytes(peerID[:]), mj.Truncate(rawMsg), err)
   662  			continue
   663  		}
   664  		select {
   665  		case err := <-errChan:
   666  			if err == nil {
   667  				return true, nil, nil
   668  			}
   669  			// If we get here, that means we got a result from the client, but
   670  			// it didn't parse. This is a client error.
   671  			return false, err, nil
   672  		case <-t.ctx.Done():
   673  			return false, nil, nil
   674  		}
   675  	}
   676  	return false, nil, ErrNoPath
   677  }
   678  
   679  func tankasToSenders(tts []*remoteTatanka) []tanka.Sender {
   680  	senders := make([]tanka.Sender, len(tts))
   681  	for i, tt := range tts {
   682  		senders[i] = tt
   683  	}
   684  	return senders
   685  }