github.com/keltia/go-ipfs@v0.3.8-0.20150909044612-210793031c63/routing/dht/handlers.go (about)

     1  package dht
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  
     7  	proto "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/gogo/protobuf/proto"
     8  	ds "github.com/ipfs/go-ipfs/Godeps/_workspace/src/github.com/jbenet/go-datastore"
     9  	context "github.com/ipfs/go-ipfs/Godeps/_workspace/src/golang.org/x/net/context"
    10  	key "github.com/ipfs/go-ipfs/blocks/key"
    11  	peer "github.com/ipfs/go-ipfs/p2p/peer"
    12  	pb "github.com/ipfs/go-ipfs/routing/dht/pb"
    13  	lgbl "github.com/ipfs/go-ipfs/util/eventlog/loggables"
    14  )
    15  
    16  // The number of closer peers to send on requests.
    17  var CloserPeerCount = 4
    18  
    19  // dhthandler specifies the signature of functions that handle DHT messages.
    20  type dhtHandler func(context.Context, peer.ID, *pb.Message) (*pb.Message, error)
    21  
    22  func (dht *IpfsDHT) handlerForMsgType(t pb.Message_MessageType) dhtHandler {
    23  	switch t {
    24  	case pb.Message_GET_VALUE:
    25  		return dht.handleGetValue
    26  	case pb.Message_PUT_VALUE:
    27  		return dht.handlePutValue
    28  	case pb.Message_FIND_NODE:
    29  		return dht.handleFindPeer
    30  	case pb.Message_ADD_PROVIDER:
    31  		return dht.handleAddProvider
    32  	case pb.Message_GET_PROVIDERS:
    33  		return dht.handleGetProviders
    34  	case pb.Message_PING:
    35  		return dht.handlePing
    36  	default:
    37  		return nil
    38  	}
    39  }
    40  
    41  func (dht *IpfsDHT) handleGetValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
    42  	defer log.EventBegin(ctx, "handleGetValue", p).Done()
    43  	log.Debugf("%s handleGetValue for key: %s", dht.self, pmes.GetKey())
    44  
    45  	// setup response
    46  	resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
    47  
    48  	// first, is there even a key?
    49  	k := pmes.GetKey()
    50  	if k == "" {
    51  		return nil, errors.New("handleGetValue but no key was provided")
    52  		// TODO: send back an error response? could be bad, but the other node's hanging.
    53  	}
    54  
    55  	// let's first check if we have the value locally.
    56  	log.Debugf("%s handleGetValue looking into ds", dht.self)
    57  	dskey := key.Key(k).DsKey()
    58  	iVal, err := dht.datastore.Get(dskey)
    59  	log.Debugf("%s handleGetValue looking into ds GOT %v", dht.self, iVal)
    60  
    61  	// if we got an unexpected error, bail.
    62  	if err != nil && err != ds.ErrNotFound {
    63  		return nil, err
    64  	}
    65  
    66  	// if we have the value, send it back
    67  	if err == nil {
    68  		log.Debugf("%s handleGetValue success!", dht.self)
    69  
    70  		byts, ok := iVal.([]byte)
    71  		if !ok {
    72  			return nil, fmt.Errorf("datastore had non byte-slice value for %v", dskey)
    73  		}
    74  
    75  		rec := new(pb.Record)
    76  		err := proto.Unmarshal(byts, rec)
    77  		if err != nil {
    78  			log.Debug("Failed to unmarshal dht record from datastore")
    79  			return nil, err
    80  		}
    81  
    82  		resp.Record = rec
    83  	}
    84  
    85  	// Find closest peer on given cluster to desired key and reply with that info
    86  	closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount)
    87  	if len(closer) > 0 {
    88  		closerinfos := peer.PeerInfos(dht.peerstore, closer)
    89  		for _, pi := range closerinfos {
    90  			log.Debugf("handleGetValue returning closer peer: '%s'", pi.ID)
    91  			if len(pi.Addrs) < 1 {
    92  				log.Errorf(`no addresses on peer being sent!
    93  					[local:%s]
    94  					[sending:%s]
    95  					[remote:%s]`, dht.self, pi.ID, p)
    96  			}
    97  		}
    98  
    99  		resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), closerinfos)
   100  	}
   101  
   102  	return resp, nil
   103  }
   104  
   105  // Store a value in this peer local storage
   106  func (dht *IpfsDHT) handlePutValue(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
   107  	defer log.EventBegin(ctx, "handlePutValue", p).Done()
   108  	dskey := key.Key(pmes.GetKey()).DsKey()
   109  
   110  	if err := dht.verifyRecordLocally(pmes.GetRecord()); err != nil {
   111  		log.Debugf("Bad dht record in PUT from: %s. %s", key.Key(pmes.GetRecord().GetAuthor()), err)
   112  		return nil, err
   113  	}
   114  
   115  	data, err := proto.Marshal(pmes.GetRecord())
   116  	if err != nil {
   117  		return nil, err
   118  	}
   119  
   120  	err = dht.datastore.Put(dskey, data)
   121  	log.Debugf("%s handlePutValue %v", dht.self, dskey)
   122  	return pmes, err
   123  }
   124  
   125  func (dht *IpfsDHT) handlePing(_ context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
   126  	log.Debugf("%s Responding to ping from %s!\n", dht.self, p)
   127  	return pmes, nil
   128  }
   129  
   130  func (dht *IpfsDHT) handleFindPeer(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
   131  	defer log.EventBegin(ctx, "handleFindPeer", p).Done()
   132  	resp := pb.NewMessage(pmes.GetType(), "", pmes.GetClusterLevel())
   133  	var closest []peer.ID
   134  
   135  	// if looking for self... special case where we send it on CloserPeers.
   136  	if peer.ID(pmes.GetKey()) == dht.self {
   137  		closest = []peer.ID{dht.self}
   138  	} else {
   139  		closest = dht.betterPeersToQuery(pmes, p, CloserPeerCount)
   140  	}
   141  
   142  	if closest == nil {
   143  		log.Infof("%s handleFindPeer %s: could not find anything.", dht.self, p)
   144  		return resp, nil
   145  	}
   146  
   147  	var withAddresses []peer.PeerInfo
   148  	closestinfos := peer.PeerInfos(dht.peerstore, closest)
   149  	for _, pi := range closestinfos {
   150  		if len(pi.Addrs) > 0 {
   151  			withAddresses = append(withAddresses, pi)
   152  			log.Debugf("handleFindPeer: sending back '%s'", pi.ID)
   153  		}
   154  	}
   155  
   156  	resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), withAddresses)
   157  	return resp, nil
   158  }
   159  
   160  func (dht *IpfsDHT) handleGetProviders(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
   161  	lm := make(lgbl.DeferredMap)
   162  	lm["peer"] = func() interface{} { return p.Pretty() }
   163  	defer log.EventBegin(ctx, "handleGetProviders", lm).Done()
   164  
   165  	resp := pb.NewMessage(pmes.GetType(), pmes.GetKey(), pmes.GetClusterLevel())
   166  	key := key.Key(pmes.GetKey())
   167  	lm["key"] = func() interface{} { return key.Pretty() }
   168  
   169  	// debug logging niceness.
   170  	reqDesc := fmt.Sprintf("%s handleGetProviders(%s, %s): ", dht.self, p, key)
   171  	log.Debugf("%s begin", reqDesc)
   172  	defer log.Debugf("%s end", reqDesc)
   173  
   174  	// check if we have this value, to add ourselves as provider.
   175  	has, err := dht.datastore.Has(key.DsKey())
   176  	if err != nil && err != ds.ErrNotFound {
   177  		log.Debugf("unexpected datastore error: %v\n", err)
   178  		has = false
   179  	}
   180  
   181  	// setup providers
   182  	providers := dht.providers.GetProviders(ctx, key)
   183  	if has {
   184  		providers = append(providers, dht.self)
   185  		log.Debugf("%s have the value. added self as provider", reqDesc)
   186  	}
   187  
   188  	if providers != nil && len(providers) > 0 {
   189  		infos := peer.PeerInfos(dht.peerstore, providers)
   190  		resp.ProviderPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
   191  		log.Debugf("%s have %d providers: %s", reqDesc, len(providers), infos)
   192  	}
   193  
   194  	// Also send closer peers.
   195  	closer := dht.betterPeersToQuery(pmes, p, CloserPeerCount)
   196  	if closer != nil {
   197  		infos := peer.PeerInfos(dht.peerstore, closer)
   198  		resp.CloserPeers = pb.PeerInfosToPBPeers(dht.host.Network(), infos)
   199  		log.Debugf("%s have %d closer peers: %s", reqDesc, len(closer), infos)
   200  	}
   201  
   202  	return resp, nil
   203  }
   204  
   205  func (dht *IpfsDHT) handleAddProvider(ctx context.Context, p peer.ID, pmes *pb.Message) (*pb.Message, error) {
   206  	lm := make(lgbl.DeferredMap)
   207  	lm["peer"] = func() interface{} { return p.Pretty() }
   208  
   209  	defer log.EventBegin(ctx, "handleAddProvider", lm).Done()
   210  	key := key.Key(pmes.GetKey())
   211  	lm["key"] = func() interface{} { return key.Pretty() }
   212  
   213  	log.Debugf("%s adding %s as a provider for '%s'\n", dht.self, p, key)
   214  
   215  	// add provider should use the address given in the message
   216  	pinfos := pb.PBPeersToPeerInfos(pmes.GetProviderPeers())
   217  	for _, pi := range pinfos {
   218  		if pi.ID != p {
   219  			// we should ignore this provider reccord! not from originator.
   220  			// (we chould sign them and check signature later...)
   221  			log.Debugf("handleAddProvider received provider %s from %s. Ignore.", pi.ID, p)
   222  			continue
   223  		}
   224  
   225  		if len(pi.Addrs) < 1 {
   226  			log.Debugf("%s got no valid addresses for provider %s. Ignore.", dht.self, p)
   227  			continue
   228  		}
   229  
   230  		log.Infof("received provider %s for %s (addrs: %s)", p, key, pi.Addrs)
   231  		if pi.ID != dht.self { // dont add own addrs.
   232  			// add the received addresses to our peerstore.
   233  			dht.peerstore.AddAddrs(pi.ID, pi.Addrs, peer.ProviderAddrTTL)
   234  		}
   235  		dht.providers.AddProvider(ctx, key, p)
   236  	}
   237  
   238  	return nil, nil
   239  }