github.com/turingchain2020/turingchain@v1.1.21/system/p2p/dht/p2p_test.go (about) 1 package dht 2 3 import ( 4 "context" 5 "crypto/rand" 6 "encoding/hex" 7 "encoding/json" 8 "fmt" 9 "net" 10 "os" 11 "strings" 12 "testing" 13 "time" 14 15 "github.com/turingchain2020/turingchain/client" 16 l "github.com/turingchain2020/turingchain/common/log" 17 p2p2 "github.com/turingchain2020/turingchain/p2p" 18 "github.com/turingchain2020/turingchain/queue" 19 p2pty "github.com/turingchain2020/turingchain/system/p2p/dht/types" 20 "github.com/turingchain2020/turingchain/types" 21 "github.com/turingchain2020/turingchain/util" 22 "github.com/turingchain2020/turingchain/wallet" 23 "github.com/libp2p/go-libp2p" 24 core "github.com/libp2p/go-libp2p-core" 25 "github.com/libp2p/go-libp2p-core/crypto" 26 "github.com/libp2p/go-libp2p-core/host" 27 "github.com/libp2p/go-libp2p-core/metrics" 28 "github.com/libp2p/go-libp2p-core/peer" 29 "github.com/libp2p/go-libp2p-core/protocol" 30 "github.com/multiformats/go-multiaddr" 31 "github.com/stretchr/testify/require" 32 ) 33 34 func init() { 35 l.SetLogLevel("err") 36 } 37 38 func processMsg(q queue.Queue) { 39 go func() { 40 cfg := q.GetConfig() 41 wcli := wallet.New(cfg) 42 client := q.Client() 43 wcli.SetQueueClient(client) 44 //导入种子,解锁钱包 45 password := "a12345678" 46 seed := "cushion canal bitter result harvest sentence ability time steel basket useful ask depth sorry area course purpose search exile chapter mountain project ranch buffalo" 47 saveSeedByPw := &types.SaveSeedByPw{Seed: seed, Passwd: password} 48 _, err := wcli.GetAPI().ExecWalletFunc("wallet", "SaveSeed", saveSeedByPw) 49 if err != nil { 50 return 51 } 52 walletUnLock := &types.WalletUnLock{ 53 Passwd: password, 54 Timeout: 0, 55 WalletOrTicket: false, 56 } 57 58 _, err = wcli.GetAPI().ExecWalletFunc("wallet", "WalletUnLock", walletUnLock) 59 if err != nil { 60 return 61 } 62 }() 63 64 go func() { 65 blockchainKey := "blockchain" 66 client := q.Client() 67 client.Sub(blockchainKey) 68 for msg := range client.Recv() { 69 switch msg.Ty { 70 case types.EventGetBlocks: 71 if req, ok := msg.GetData().(*types.ReqBlocks); ok { 72 if req.Start == 1 { 73 msg.Reply(client.NewMessage(blockchainKey, types.EventBlocks, &types.Transaction{})) 74 } else { 75 msg.Reply(client.NewMessage(blockchainKey, types.EventBlocks, &types.BlockDetails{})) 76 } 77 } else { 78 msg.ReplyErr("Do not support", types.ErrInvalidParam) 79 } 80 81 case types.EventGetHeaders: 82 if req, ok := msg.GetData().(*types.ReqBlocks); ok { 83 if req.Start == 10 { 84 msg.Reply(client.NewMessage(blockchainKey, types.EventHeaders, &types.Transaction{})) 85 } else { 86 msg.Reply(client.NewMessage(blockchainKey, types.EventHeaders, &types.Headers{})) 87 } 88 } else { 89 msg.ReplyErr("Do not support", types.ErrInvalidParam) 90 } 91 92 case types.EventGetLastHeader: 93 msg.Reply(client.NewMessage("p2p", types.EventHeader, &types.Header{Height: 2019})) 94 case types.EventGetBlockHeight: 95 msg.Reply(client.NewMessage("p2p", types.EventReplyBlockHeight, &types.ReplyBlockHeight{Height: 2019})) 96 } 97 } 98 }() 99 100 go func() { 101 mempoolKey := "mempool" 102 client := q.Client() 103 client.Sub(mempoolKey) 104 for msg := range client.Recv() { 105 switch msg.Ty { 106 case types.EventGetMempoolSize: 107 msg.Reply(client.NewMessage("p2p", types.EventMempoolSize, &types.MempoolSize{Size: 0})) 108 } 109 } 110 }() 111 } 112 113 func NewP2p(cfg *types.TuringchainConfig, cli queue.Client) p2p2.IP2P { 114 p2pmgr := p2p2.NewP2PMgr(cfg) 115 p2pmgr.SysAPI, _ = client.New(cli, nil) 116 p2pmgr.Client = cli 117 subCfg := p2pmgr.ChainCfg.GetSubConfig().P2P 118 mcfg := &p2pty.P2PSubConfig{} 119 types.MustDecode(subCfg[p2pty.DHTTypeName], mcfg) 120 mcfg.RelayEnable = true 121 122 dhtcfg, _ := json.Marshal(mcfg) 123 p2p := New(p2pmgr, dhtcfg) 124 p2p.StartP2P() 125 return p2p 126 } 127 128 func testP2PEvent(t *testing.T, qcli queue.Client) { 129 130 msg := qcli.NewMessage("p2p", types.EventBlockBroadcast, &types.Block{}) 131 require.True(t, qcli.Send(msg, false) == nil) 132 133 msg = qcli.NewMessage("p2p", types.EventTxBroadcast, &types.Transaction{}) 134 qcli.Send(msg, false) 135 require.True(t, qcli.Send(msg, false) == nil) 136 137 msg = qcli.NewMessage("p2p", types.EventFetchBlocks, &types.ReqBlocks{}) 138 qcli.Send(msg, false) 139 require.True(t, qcli.Send(msg, false) == nil) 140 141 msg = qcli.NewMessage("p2p", types.EventGetMempool, nil) 142 qcli.Send(msg, false) 143 require.True(t, qcli.Send(msg, false) == nil) 144 145 msg = qcli.NewMessage("p2p", types.EventPeerInfo, &types.P2PGetPeerReq{P2PType: "DHT"}) 146 qcli.Send(msg, false) 147 require.True(t, qcli.Send(msg, false) == nil) 148 149 msg = qcli.NewMessage("p2p", types.EventGetNetInfo, nil) 150 qcli.Send(msg, false) 151 require.True(t, qcli.Send(msg, false) == nil) 152 153 msg = qcli.NewMessage("p2p", types.EventFetchBlockHeaders, &types.ReqBlocks{}) 154 qcli.Send(msg, false) 155 require.True(t, qcli.Send(msg, false) == nil) 156 157 } 158 159 func newHost(subcfg *p2pty.P2PSubConfig, priv crypto.PrivKey, bandwidthTracker metrics.Reporter, maddr multiaddr.Multiaddr) host.Host { 160 p := &P2P{ctx: context.Background(), subCfg: subcfg} 161 options := p.buildHostOptions(priv, bandwidthTracker, maddr, nil) 162 h, err := libp2p.New(context.Background(), options...) 163 if err != nil { 164 return nil 165 } 166 return h 167 } 168 169 func testStreamEOFReSet(t *testing.T) { 170 171 r := rand.Reader 172 prvKey1, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r) 173 if err != nil { 174 panic(err) 175 } 176 r = rand.Reader 177 prvKey2, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r) 178 if err != nil { 179 panic(err) 180 } 181 182 r = rand.Reader 183 prvKey3, _, err := crypto.GenerateKeyPairWithReader(crypto.RSA, 2048, r) 184 if err != nil { 185 panic(err) 186 } 187 188 msgID := "/streamTest" 189 190 var subcfg, subcfg2, subcfg3 p2pty.P2PSubConfig 191 subcfg.Port = 22345 192 maddr, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", subcfg.Port)) 193 if err != nil { 194 panic(err) 195 } 196 h1 := newHost(&subcfg, prvKey1, nil, maddr) 197 t.Log("h1", h1.ID()) 198 //------------------------- 199 200 subcfg2.Port = 22346 201 maddr2, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", subcfg2.Port)) 202 if err != nil { 203 panic(err) 204 } 205 h2 := newHost(&subcfg2, prvKey2, nil, maddr2) 206 t.Log("h2", h2.ID()) 207 //------------------------------------- 208 209 subcfg3.Port = 22347 210 maddr3, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", subcfg3.Port)) 211 if err != nil { 212 panic(err) 213 } 214 h3 := newHost(&subcfg3, prvKey3, nil, maddr3) 215 t.Log("h3", h3.ID()) 216 h1.SetStreamHandler(protocol.ID(msgID), func(s core.Stream) { 217 t.Log("Meow! It worked!") 218 var buf []byte 219 _, err := s.Read(buf) 220 if err != nil { 221 t.Log("readStreamErr", err) 222 } 223 s.Close() 224 }) 225 226 h2.SetStreamHandler(protocol.ID(msgID), func(s core.Stream) { 227 t.Log("H2 Stream! It worked!") 228 s.Close() 229 }) 230 231 h3.SetStreamHandler(protocol.ID(msgID), func(s core.Stream) { 232 t.Log("H3 Stream! It worked!") 233 s.Conn().Close() 234 }) 235 236 h2info := peer.AddrInfo{ 237 ID: h2.ID(), 238 Addrs: h2.Addrs(), 239 } 240 var retrycount int 241 for { 242 err = h1.Connect(context.Background(), h2info) 243 if err != nil { 244 retrycount++ 245 if retrycount > 3 { 246 break 247 } 248 continue 249 } 250 251 break 252 } 253 254 require.NoError(t, err) 255 256 s, err := h1.NewStream(context.Background(), h2.ID(), protocol.ID(msgID)) 257 require.NoError(t, err) 258 259 s.Write([]byte("hello")) 260 var buf = make([]byte, 128) 261 _, err = s.Read(buf) 262 require.True(t, err != nil) 263 if err != nil { 264 //在stream关闭的时候返回EOF 265 t.Log("readStream from H2 Err", err) 266 require.Equal(t, err.Error(), "EOF") 267 } 268 269 h3info := peer.AddrInfo{ 270 ID: h3.ID(), 271 Addrs: h3.Addrs(), 272 } 273 274 err = h1.Connect(context.Background(), h3info) 275 require.NoError(t, err) 276 s, err = h1.NewStream(context.Background(), h3.ID(), protocol.ID(msgID)) 277 require.NoError(t, err) 278 279 s.Write([]byte("hello")) 280 _, err = s.Read(buf) 281 require.True(t, err != nil) 282 if err != nil { 283 //在连接断开的时候,返回 stream reset 284 t.Log("readStream from H3 Err", err) 285 require.Equal(t, err.Error(), "stream reset") 286 } 287 288 } 289 290 func Test_pubkey(t *testing.T) { 291 priv, pub, err := GenPrivPubkey() 292 require.Nil(t, err) 293 require.NotNil(t, priv, pub) 294 pubstr, err := GenPubkey(hex.EncodeToString(priv)) 295 require.Nil(t, err) 296 require.Equal(t, pubstr, hex.EncodeToString(pub)) 297 } 298 299 func testHost(t *testing.T) { 300 mcfg := &p2pty.P2PSubConfig{} 301 302 _, err := GenPubkey("123456") 303 require.NotNil(t, err) 304 305 priv, pub, err := GenPrivPubkey() 306 require.Nil(t, err) 307 t.Log("priv size", len(priv)) 308 cpriv, err := crypto.UnmarshalSecp256k1PrivateKey(priv) 309 require.Nil(t, err) 310 311 maddr, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", 26666)) 312 313 if err != nil { 314 return 315 } 316 mcfg.RelayHop = true 317 mcfg.MaxConnectNum = 10000 318 host := newHost(mcfg, cpriv, nil, maddr) 319 hpub := host.Peerstore().PubKey(host.ID()) 320 hpb, err := hpub.Raw() 321 require.Nil(t, err) 322 require.Equal(t, hpb, pub) 323 host.Close() 324 } 325 326 func testAddrbook(t *testing.T, cfg *types.P2P) { 327 cfg.DbPath = cfg.DbPath + "test/" 328 addrbook := NewAddrBook(cfg) 329 priv, pub := addrbook.GetPrivPubKey() 330 require.Equal(t, priv, "") 331 require.Equal(t, pub, "") 332 var paddrinfos []peer.AddrInfo 333 paddrinfos = append(paddrinfos, peer.AddrInfo{}) 334 addrbook.SaveAddr(paddrinfos) 335 addrbook.Randkey() 336 priv, pub = addrbook.GetPrivPubKey() 337 addrbook.saveKey(priv, pub) 338 ok := addrbook.loadDb() 339 require.True(t, ok) 340 341 } 342 343 func Test_LocalAddr(t *testing.T) { 344 seedip := "120.24.92.123:13803" 345 t.Log("seedip", seedip) 346 spliteIP := strings.Split(seedip, ":") 347 conn, err := net.DialTimeout("tcp4", net.JoinHostPort(spliteIP[0], spliteIP[1]), time.Second) 348 if err != nil { 349 t.Log("Could not dial remote peer") 350 return 351 } 352 353 defer conn.Close() 354 localAddr := conn.LocalAddr().String() 355 t.Log("test localAddr", localAddr) 356 } 357 func Test_Id(t *testing.T) { 358 encodeIDStr := "16Uiu2HAm7vDB7XDuEv8XNPcoPqumVngsjWoogGXENNDXVYMiCJHM" 359 pubkey, err := PeerIDToPubkey(encodeIDStr) 360 require.Nil(t, err) 361 require.Equal(t, pubkey, "02b99bc73bfb522110634d5644d476b21b3171eefab517da0646ef2aba39dbf4a0") 362 363 } 364 func Test_genAddrInfos(t *testing.T) { 365 peer := fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%v", "192.168.105.123", 3001, "16Uiu2HAmK9PAPYoTzHnobzB5nQFnY7p9ZVcJYQ1BgzKCr7izAhbJ") 366 addrinfos := genAddrInfos([]string{peer}) 367 require.NotNil(t, addrinfos) 368 require.Equal(t, 1, len(addrinfos)) 369 peer = fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%v", "afasfase", 3001, "16Uiu2HAmK9PAPYoTzHnobzB5nQFnY7p9ZVcJYQ1BgzKCr7izAhbJ") 370 addrinfos = genAddrInfos([]string{peer}) 371 require.Nil(t, addrinfos) 372 peer2 := fmt.Sprintf("/ip4/%s/tcp/%d/p2p/%v", "192.168.105.124", 3001, "16Uiu2HAkxg1xyu3Ja2MERb3KyBev2CAKXvZwoe6EQgyg2tstg9wr") 373 peer3 := fmt.Sprintf("%v:%d", "192.168.105.124", 3001) 374 addrinfos = genAddrInfos([]string{peer, peer2, peer3}) 375 require.NotNil(t, addrinfos) 376 require.Equal(t, 1, len(addrinfos)) 377 t.Log(addrinfos[0].Addrs[0].String()) 378 } 379 func Test_p2p(t *testing.T) { 380 381 cfg := types.NewTuringchainConfig(types.ReadFile("../../../cmd/turingchain/turingchain.test.toml")) 382 q := queue.New("channel") 383 datadir := util.ResetDatadir(cfg.GetModuleConfig(), "$TEMP/") 384 q.SetConfig(cfg) 385 processMsg(q) 386 387 defer func(path string) { 388 389 if err := os.RemoveAll(path); err != nil { 390 log.Error("removeTestDatadir", "err", err.Error()) 391 } 392 t.Log("removed path", path) 393 }(datadir) 394 395 var tcfg types.P2P 396 tcfg.Driver = "leveldb" 397 tcfg.DbCache = 4 398 tcfg.DbPath = datadir 399 testAddrbook(t, &tcfg) 400 401 var mcfg p2pty.P2PSubConfig 402 types.MustDecode(cfg.GetSubConfig().P2P[p2pty.DHTTypeName], &mcfg) 403 mcfg.DHTDataPath = datadir 404 jcfg, err := json.Marshal(mcfg) 405 require.Nil(t, err) 406 cfg.GetSubConfig().P2P[p2pty.DHTTypeName] = jcfg 407 p2p := NewP2p(cfg, q.Client()) 408 dhtp2p := p2p.(*P2P) 409 t.Log("listpeer", dhtp2p.discovery.ListPeers()) 410 411 err = dhtp2p.discovery.Update(dhtp2p.host.ID()) 412 t.Log("discovery update", err) 413 pinfo := dhtp2p.discovery.FindLocalPeers([]peer.ID{dhtp2p.host.ID()}) 414 t.Log("findlocalPeers", pinfo) 415 //netw := swarmt.GenSwarm(t, context.Background()) 416 //h2 := bhost.NewBlankHost(netw) 417 //dhtp2p.pruePeers(h2.ID(), true) 418 dhtp2p.discovery.Remove(dhtp2p.host.ID()) 419 testP2PEvent(t, q.Client()) 420 421 testStreamEOFReSet(t) 422 testHost(t) 423 p2p.CloseP2P() 424 time.Sleep(time.Second) 425 }