github.com/cilium/cilium@v1.16.2/pkg/hubble/relay/server/server_test.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright Authors of Cilium 3 4 package server 5 6 import ( 7 "context" 8 "errors" 9 "fmt" 10 "io" 11 "net" 12 "net/netip" 13 "path/filepath" 14 "testing" 15 "time" 16 17 "github.com/cilium/fake" 18 "github.com/google/gopacket/layers" 19 "github.com/sirupsen/logrus" 20 "github.com/stretchr/testify/assert" 21 "github.com/stretchr/testify/require" 22 "google.golang.org/grpc" 23 "google.golang.org/grpc/credentials/insecure" 24 "google.golang.org/grpc/reflection" 25 "google.golang.org/protobuf/types/known/fieldmaskpb" 26 27 flowpb "github.com/cilium/cilium/api/v1/flow" 28 observerpb "github.com/cilium/cilium/api/v1/observer" 29 "github.com/cilium/cilium/pkg/hubble/container" 30 "github.com/cilium/cilium/pkg/hubble/observer" 31 "github.com/cilium/cilium/pkg/hubble/observer/observeroption" 32 observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types" 33 "github.com/cilium/cilium/pkg/hubble/parser" 34 "github.com/cilium/cilium/pkg/hubble/parser/getters" 35 peerTypes "github.com/cilium/cilium/pkg/hubble/peer/types" 36 "github.com/cilium/cilium/pkg/hubble/relay/defaults" 37 relayObserver "github.com/cilium/cilium/pkg/hubble/relay/observer" 38 "github.com/cilium/cilium/pkg/hubble/relay/pool" 39 poolTypes "github.com/cilium/cilium/pkg/hubble/relay/pool/types" 40 "github.com/cilium/cilium/pkg/hubble/server" 41 "github.com/cilium/cilium/pkg/hubble/server/serveroption" 42 "github.com/cilium/cilium/pkg/hubble/testutils" 43 "github.com/cilium/cilium/pkg/monitor" 44 monitorAPI "github.com/cilium/cilium/pkg/monitor/api" 45 ) 46 47 var log *logrus.Logger 48 49 func init() { 50 log = logrus.New() 51 log.SetOutput(io.Discard) 52 } 53 54 func noopParser(t testing.TB) *parser.Parser { 55 pp, err := parser.New( 56 log, 57 &testutils.FakeEndpointGetter{ 58 OnGetEndpointInfo: func(ip netip.Addr) (endpoint getters.EndpointInfo, ok bool) { 59 endpoint, ok = endpoints[ip.String()] 60 return 61 }, 62 OnGetEndpointInfoByID: func(id uint16) (endpoint getters.EndpointInfo, ok bool) { 63 return nil, false 64 }, 65 }, 66 &testutils.NoopIdentityGetter, 67 &testutils.NoopDNSGetter, 68 &testutils.NoopIPGetter, 69 &testutils.NoopServiceGetter, 70 &testutils.NoopLinkGetter, 71 &testutils.NoopPodMetadataGetter, 72 ) 73 require.NoError(t, err) 74 return pp 75 } 76 77 var endpoints map[string]*testutils.FakeEndpointInfo 78 79 func init() { 80 endpoints = make(map[string]*testutils.FakeEndpointInfo, 254) 81 for i := 0; i < 254; i++ { 82 ip := fake.IP(fake.WithIPv4()) 83 endpoints[ip] = &testutils.FakeEndpointInfo{ 84 ID: uint64(i), 85 IPv4: net.ParseIP(ip), 86 PodNamespace: fake.K8sNamespace(), 87 PodName: fake.K8sPodName(), 88 Labels: fake.K8sLabels(), 89 } 90 } 91 } 92 93 func getRandomEndpoint() *testutils.FakeEndpointInfo { 94 for _, v := range endpoints { 95 return v 96 } 97 return nil 98 } 99 100 func newHubbleObserver(t testing.TB, nodeName string, numFlows int) *observer.LocalObserverServer { 101 queueSize := numFlows 102 103 pp := noopParser(t) 104 nsMgr := observer.NewNamespaceManager() 105 s, err := observer.NewLocalServer(pp, nsMgr, log, 106 observeroption.WithMaxFlows(container.Capacity65535), 107 observeroption.WithMonitorBuffer(queueSize), 108 ) 109 require.NoError(t, err) 110 111 m := s.GetEventsChannel() 112 113 for i := 0; i < numFlows; i++ { 114 tn := monitor.TraceNotifyV0{Type: byte(monitorAPI.MessageTypeTrace)} 115 src := getRandomEndpoint() 116 dst := getRandomEndpoint() 117 srcMAC, _ := net.ParseMAC(fake.MAC()) 118 dstMAC, _ := net.ParseMAC(fake.MAC()) 119 data := testutils.MustCreateL3L4Payload(tn, 120 &layers.Ethernet{ 121 SrcMAC: srcMAC, 122 DstMAC: dstMAC, 123 EthernetType: layers.EthernetTypeIPv4, 124 }, 125 &layers.IPv4{ 126 SrcIP: src.IPv4, 127 DstIP: dst.IPv4, 128 Protocol: layers.IPProtocolTCP, 129 }, 130 &layers.TCP{ 131 SrcPort: layers.TCPPort(fake.Port()), 132 DstPort: layers.TCPPort(fake.Port()), 133 ACK: true, 134 PSH: i%4 == 0, 135 }) 136 event := &observerTypes.MonitorEvent{ 137 Timestamp: time.Unix(int64(i+1), 0), 138 NodeName: nodeName, 139 Payload: &observerTypes.PerfEvent{ 140 Data: data, 141 CPU: 0, 142 }, 143 } 144 m <- event 145 } 146 147 return s 148 } 149 150 func newHubblePeer(t testing.TB, ctx context.Context, address string, hubbleObserver *observer.LocalObserverServer) { 151 options := []serveroption.Option{ 152 serveroption.WithInsecure(), 153 serveroption.WithUnixSocketListener(address), 154 serveroption.WithObserverService(hubbleObserver), 155 } 156 157 srv, err := server.NewServer(log, options...) 158 require.NoError(t, err) 159 160 go func() { 161 if err := srv.Serve(); err != nil { 162 t.Log(err) 163 t.Fail() 164 } 165 }() 166 167 go func() { 168 <-ctx.Done() 169 close(hubbleObserver.GetEventsChannel()) 170 <-hubbleObserver.GetStopped() 171 srv.Stop() 172 }() 173 } 174 175 func benchmarkRelayGetFlows(b *testing.B, withFieldMask bool) { 176 tmp := b.TempDir() 177 root := "unix://" + filepath.Join(tmp, "peer-") 178 ctx := context.Background() 179 numFlows := b.N 180 numPeers := 2 181 182 // FIXME: number of peers should be constant so that it scales linearly with b.N 183 if numFlows > 65535*2 { 184 numPeers = numFlows/65535 + 1 185 } 186 187 // Create hubble servers listening on unix sockets in temporary directory. 188 type peer struct { 189 name string 190 address string 191 observer *observer.LocalObserverServer 192 } 193 peers := make([]peer, numPeers) 194 flowsScheduled := 0 195 for i := range peers { 196 address := fmt.Sprintf("%s%d.sock", root, i) 197 name := fake.K8sNodeName() 198 numFlowsPerPeer := numFlows / len(peers) 199 if i == len(peers)-1 { 200 numFlowsPerPeer = numFlows - flowsScheduled 201 } 202 // can't retrieve one last flow from the buffer 203 hubbleObserver := newHubbleObserver(b, name, numFlowsPerPeer+1) 204 newHubblePeer(b, ctx, address, hubbleObserver) 205 flowsScheduled += numFlowsPerPeer 206 peers[i] = peer{name, address, hubbleObserver} 207 go hubbleObserver.Start() 208 } 209 210 // Create hubble relay server and connect to all peers from previous step. 211 ccb := pool.GRPCClientConnBuilder{ 212 DialTimeout: defaults.DialTimeout, 213 Options: []grpc.DialOption{ 214 grpc.WithTransportCredentials(insecure.NewCredentials()), 215 grpc.WithBlock(), 216 grpc.FailOnNonTempDialError(true), 217 grpc.WithReturnConnectionError(), 218 }, 219 } 220 plr := &testutils.FakePeerLister{ 221 OnList: func() []poolTypes.Peer { 222 ret := make([]poolTypes.Peer, len(peers)) 223 for i := range peers { 224 conn, err := ccb.ClientConn(peers[i].address, "") 225 require.NoError(b, err) 226 ret[i] = poolTypes.Peer{ 227 Peer: peerTypes.Peer{ 228 Name: peers[i].name, 229 }, 230 Conn: conn, 231 } 232 } 233 return ret 234 }, 235 } 236 observerSrv, err := relayObserver.NewServer( 237 plr, 238 relayObserver.WithLogger(log), 239 ) 240 require.NoError(b, err) 241 242 grpcServer := grpc.NewServer() 243 observerpb.RegisterObserverServer(grpcServer, observerSrv) 244 reflection.Register(grpcServer) 245 246 socket, err := net.Listen("tcp", "localhost:0") 247 require.NoError(b, err) 248 249 go grpcServer.Serve(socket) 250 defer grpcServer.Stop() 251 252 conn, err := ccb.ClientConn(socket.Addr().String(), "") 253 require.NoError(b, err) 254 client := observerpb.NewObserverClient(conn) 255 256 // Make sure that all peers are connected 257 nodesResp, err := client.GetNodes(ctx, &observerpb.GetNodesRequest{}) 258 require.NoError(b, err) 259 require.Equal(b, numPeers, len(nodesResp.Nodes)) 260 261 getFlowsReq := new(observerpb.GetFlowsRequest) 262 if withFieldMask { 263 fieldmask, err := fieldmaskpb.New(&flowpb.Flow{}, "time", 264 "verdict", "drop_reason", 265 "traffic_direction", "trace_observation_point", "Summary", 266 "source.ID", "source.pod_name", "source.namespace", 267 "destination.ID", "destination.pod_name", "destination.namespace", 268 "l4.TCP.source_port", 269 ) 270 require.NoError(b, err) 271 getFlowsReq.FieldMask = fieldmask 272 getFlowsReq.Experimental = &observerpb.GetFlowsRequest_Experimental{ 273 FieldMask: fieldmask, 274 } 275 } 276 found := make([]*observerpb.Flow, 0, numFlows) 277 b.StartTimer() 278 c, err := client.GetFlows(ctx, getFlowsReq) 279 require.NoError(b, err) 280 281 for { 282 flow, err := c.Recv() 283 if errors.Is(err, io.EOF) { 284 break 285 } 286 require.NoError(b, err) 287 switch f := flow.ResponseTypes.(type) { 288 case *observerpb.GetFlowsResponse_Flow: 289 found = append(found, f.Flow) 290 case *observerpb.GetFlowsResponse_NodeStatus: 291 } 292 } 293 assert.Equal(b, numFlows, len(found)) 294 b.StopTimer() 295 296 for _, f := range found { 297 assert.NotEmpty(b, f.Source.PodName) 298 assert.NotEmpty(b, f.Destination.PodName) 299 assert.NotZero(b, f.Time) 300 assert.NotEmpty(b, f.Summary) 301 assert.NotZero(b, f.L4.GetTCP().SourcePort) 302 } 303 } 304 305 func BenchmarkRelayGetFlowsWithFieldMask(b *testing.B) { 306 benchmarkRelayGetFlows(b, true) 307 } 308 309 func BenchmarkRelayGetFlowsWithoutFieldMask(b *testing.B) { 310 benchmarkRelayGetFlows(b, false) 311 }