github.com/kaisenlinux/docker.io@v0.0.0-20230510090727-ea55db55fac7/swarmkit/manager/state/raft/util.go (about) 1 package raft 2 3 import ( 4 "context" 5 "net" 6 "time" 7 8 "github.com/docker/swarmkit/api" 9 "github.com/docker/swarmkit/manager/state" 10 "github.com/docker/swarmkit/manager/state/store" 11 grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" 12 "google.golang.org/grpc" 13 "google.golang.org/grpc/credentials" 14 ) 15 16 // dial returns a grpc client connection 17 func dial(addr string, protocol string, creds credentials.TransportCredentials, timeout time.Duration) (*grpc.ClientConn, error) { 18 // gRPC dialer connects to proxy first. Provide a custom dialer here avoid that. 19 grpcOptions := []grpc.DialOption{ 20 grpc.WithBackoffMaxDelay(2 * time.Second), 21 grpc.WithTransportCredentials(creds), 22 grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), 23 grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor), 24 grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { 25 return net.DialTimeout("tcp", addr, timeout) 26 }), 27 } 28 29 if timeout != 0 { 30 grpcOptions = append(grpcOptions, grpc.WithTimeout(timeout)) 31 } 32 33 return grpc.Dial(addr, grpcOptions...) 34 } 35 36 // Register registers the node raft server 37 func Register(server *grpc.Server, node *Node) { 38 api.RegisterRaftServer(server, node) 39 api.RegisterRaftMembershipServer(server, node) 40 } 41 42 // WaitForLeader waits until node observe some leader in cluster. It returns 43 // error if ctx was cancelled before leader appeared. 44 func WaitForLeader(ctx context.Context, n *Node) error { 45 _, err := n.Leader() 46 if err == nil { 47 return nil 48 } 49 ticker := time.NewTicker(50 * time.Millisecond) 50 defer ticker.Stop() 51 for err != nil { 52 select { 53 case <-ticker.C: 54 case <-ctx.Done(): 55 return ctx.Err() 56 } 57 _, err = n.Leader() 58 } 59 return nil 60 } 61 62 // WaitForCluster waits until node observes that the cluster wide config is 63 // committed to raft. This ensures that we can see and serve informations 64 // related to the cluster. 65 func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) { 66 watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), api.EventCreateCluster{}) 67 defer cancel() 68 69 var clusters []*api.Cluster 70 n.MemoryStore().View(func(readTx store.ReadTx) { 71 clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) 72 }) 73 74 if err != nil { 75 return nil, err 76 } 77 78 if len(clusters) == 1 { 79 cluster = clusters[0] 80 } else { 81 select { 82 case e := <-watch: 83 cluster = e.(api.EventCreateCluster).Cluster 84 case <-ctx.Done(): 85 return nil, ctx.Err() 86 } 87 } 88 89 return cluster, nil 90 }