github.com/cilium/cilium@v1.16.2/pkg/hubble/observer/local_observer_test.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright Authors of Hubble
     3  
     4  package observer
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"io"
    10  	"net"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/cilium/fake"
    15  	"github.com/google/gopacket/layers"
    16  	"github.com/sirupsen/logrus"
    17  	"github.com/stretchr/testify/assert"
    18  	"github.com/stretchr/testify/require"
    19  	"google.golang.org/protobuf/proto"
    20  	"google.golang.org/protobuf/types/known/fieldmaskpb"
    21  	"google.golang.org/protobuf/types/known/timestamppb"
    22  
    23  	flowpb "github.com/cilium/cilium/api/v1/flow"
    24  	observerpb "github.com/cilium/cilium/api/v1/observer"
    25  	hubv1 "github.com/cilium/cilium/pkg/hubble/api/v1"
    26  	"github.com/cilium/cilium/pkg/hubble/container"
    27  	"github.com/cilium/cilium/pkg/hubble/observer/observeroption"
    28  	observerTypes "github.com/cilium/cilium/pkg/hubble/observer/types"
    29  	"github.com/cilium/cilium/pkg/hubble/parser"
    30  	"github.com/cilium/cilium/pkg/hubble/testutils"
    31  	"github.com/cilium/cilium/pkg/monitor"
    32  	monitorAPI "github.com/cilium/cilium/pkg/monitor/api"
    33  	"github.com/cilium/cilium/pkg/node"
    34  	"github.com/cilium/cilium/pkg/node/types"
    35  )
    36  
    37  var (
    38  	log       *logrus.Logger
    39  	nsManager = NewNamespaceManager()
    40  )
    41  
    42  func init() {
    43  	log = logrus.New()
    44  	log.SetOutput(io.Discard)
    45  }
    46  
    47  func noopParser(t *testing.T) *parser.Parser {
    48  	pp, err := parser.New(
    49  		log,
    50  		&testutils.NoopEndpointGetter,
    51  		&testutils.NoopIdentityGetter,
    52  		&testutils.NoopDNSGetter,
    53  		&testutils.NoopIPGetter,
    54  		&testutils.NoopServiceGetter,
    55  		&testutils.NoopLinkGetter,
    56  		&testutils.NoopPodMetadataGetter,
    57  	)
    58  	require.NoError(t, err)
    59  	return pp
    60  }
    61  
    62  func TestNewLocalServer(t *testing.T) {
    63  	pp := noopParser(t)
    64  	s, err := NewLocalServer(pp, nsManager, log)
    65  	require.NoError(t, err)
    66  	assert.NotNil(t, s.GetStopped())
    67  	assert.NotNil(t, s.GetPayloadParser())
    68  	assert.NotNil(t, s.GetRingBuffer())
    69  	assert.NotNil(t, s.GetLogger())
    70  	assert.NotNil(t, s.GetEventsChannel())
    71  }
    72  
    73  func TestLocalObserverServer_ServerStatus(t *testing.T) {
    74  	pp := noopParser(t)
    75  	s, err := NewLocalServer(pp, nsManager, log, observeroption.WithMaxFlows(container.Capacity1))
    76  	require.NoError(t, err)
    77  	res, err := s.ServerStatus(context.Background(), &observerpb.ServerStatusRequest{})
    78  	require.NoError(t, err)
    79  	assert.Equal(t, uint64(0), res.SeenFlows)
    80  	assert.Equal(t, uint64(0), res.NumFlows)
    81  	assert.Equal(t, uint64(1), res.MaxFlows)
    82  	assert.Equal(t, float64(0), res.FlowsRate)
    83  }
    84  
    85  func TestGetFlowRate(t *testing.T) {
    86  	type event struct {
    87  		offset int
    88  		event  interface{}
    89  	}
    90  
    91  	tcs := map[string]struct {
    92  		ringCap container.Capacity
    93  		events  []event
    94  		rate    float64
    95  	}{
    96  		"0.5 Flow/s": {
    97  			events: []event{
    98  				{offset: 2000},
    99  				{offset: 4000},
   100  				{offset: 6000},
   101  				{offset: 8000},
   102  				{offset: 10000},
   103  				{offset: 12000},
   104  				{offset: 14000},
   105  				{offset: 16000},
   106  			},
   107  			rate: 0.5,
   108  		},
   109  		"2 Flow/s": {
   110  			events: []event{
   111  				{offset: 500},
   112  				{offset: 1000},
   113  				{offset: 1500},
   114  				{offset: 2000},
   115  				{offset: 2500},
   116  				{offset: 3000},
   117  				{offset: 3500},
   118  				{offset: 4000},
   119  			},
   120  			rate: 2,
   121  		},
   122  		"1 Flow/s  Full buffer": {
   123  			ringCap: container.Capacity7,
   124  			events: []event{
   125  				{offset: 1000},
   126  				{offset: 2000},
   127  				{offset: 3000},
   128  				{offset: 4000},
   129  				{offset: 5000},
   130  				{offset: 6000},
   131  				{offset: 7000},
   132  				{offset: 8000},
   133  				{offset: 9000},
   134  				{offset: 10000},
   135  			},
   136  			rate: 1,
   137  		},
   138  		"0.15 Flow/s  with flows older than 1 min": {
   139  			events: []event{
   140  				{offset: 1000},
   141  				{offset: 2000},
   142  				{offset: 3000},
   143  				{offset: 4000},
   144  				{offset: 5000},
   145  				{offset: 6000},
   146  				{offset: 7000},
   147  				{offset: 8000},
   148  				{offset: 9000},
   149  				{offset: 61000},
   150  			},
   151  			rate: 0.15,
   152  		},
   153  		"1 Flow/s  with non flow events": {
   154  			events: []event{
   155  				{offset: 1000},
   156  				{offset: 2000},
   157  				{
   158  					offset: 2500,
   159  					event:  &flowpb.AgentEvent{},
   160  				},
   161  				{offset: 3000},
   162  				{offset: 4000},
   163  				{
   164  					offset: 2500,
   165  					event:  &flowpb.DebugEvent{},
   166  				},
   167  				{offset: 5000},
   168  				{offset: 6000},
   169  				{offset: 7000},
   170  			},
   171  			rate: 1,
   172  		},
   173  	}
   174  	now := time.Now()
   175  
   176  	for name, tc := range tcs {
   177  		t.Run(name, func(t *testing.T) {
   178  			var c container.Capacity = container.Capacity63
   179  			if tc.ringCap != nil {
   180  				c = tc.ringCap
   181  			}
   182  			ring := container.NewRing(c)
   183  			for i := len(tc.events) - 1; i >= 0; i-- {
   184  				ev := tc.events[i].event
   185  				if ev == nil {
   186  					// Default is flow
   187  					ev = &flowpb.Flow{}
   188  				}
   189  				ring.Write(&hubv1.Event{
   190  					Timestamp: timestamppb.New(now.Add(-1 * time.Duration(tc.events[i].offset) * time.Millisecond)),
   191  					Event:     ev,
   192  				})
   193  			}
   194  			// Dummy value so that we can actually read all flows
   195  			ring.Write(&hubv1.Event{
   196  				Timestamp: timestamppb.New(now.Add(time.Second)),
   197  			})
   198  			rate, err := getFlowRate(ring, now)
   199  			assert.NoError(t, err)
   200  			assert.Equal(t, tc.rate, rate)
   201  		})
   202  	}
   203  }
   204  
   205  func TestLocalObserverServer_GetFlows(t *testing.T) {
   206  	numFlows := 100
   207  	queueSize := 0
   208  	i := 0
   209  
   210  	var output []*observerpb.Flow
   211  	fakeServer := &testutils.FakeGetFlowsServer{
   212  		OnSend: func(response *observerpb.GetFlowsResponse) error {
   213  			assert.Equal(t, response.GetTime(), response.GetFlow().GetTime())
   214  			assert.Equal(t, response.GetNodeName(), response.GetFlow().GetNodeName())
   215  			output = append(output, proto.Clone(response.GetFlow()).(*flowpb.Flow))
   216  			i++
   217  			return nil
   218  		},
   219  		FakeGRPCServerStream: &testutils.FakeGRPCServerStream{
   220  			OnContext: func() context.Context {
   221  				return context.Background()
   222  			},
   223  		},
   224  	}
   225  
   226  	pp := noopParser(t)
   227  	s, err := NewLocalServer(pp, nsManager, log,
   228  		observeroption.WithMaxFlows(container.Capacity127),
   229  		observeroption.WithMonitorBuffer(queueSize),
   230  	)
   231  	require.NoError(t, err)
   232  	go s.Start()
   233  
   234  	m := s.GetEventsChannel()
   235  	input := make([]*observerpb.Flow, numFlows)
   236  
   237  	for i := 0; i < numFlows; i++ {
   238  		tn := monitor.TraceNotifyV0{Type: byte(monitorAPI.MessageTypeTrace)}
   239  		macOnly := func(mac string) net.HardwareAddr {
   240  			m, _ := net.ParseMAC(mac)
   241  			return m
   242  		}
   243  		data := testutils.MustCreateL3L4Payload(tn, &layers.Ethernet{
   244  			SrcMAC: macOnly(fake.MAC()),
   245  			DstMAC: macOnly(fake.MAC()),
   246  		})
   247  
   248  		event := &observerTypes.MonitorEvent{
   249  			Timestamp: time.Unix(int64(i), 0),
   250  			NodeName:  fmt.Sprintf("node #%03d", i),
   251  			Payload: &observerTypes.PerfEvent{
   252  				Data: data,
   253  				CPU:  0,
   254  			},
   255  		}
   256  		m <- event
   257  		ev, err := pp.Decode(event)
   258  		require.NoError(t, err)
   259  		input[i] = ev.GetFlow()
   260  	}
   261  	close(s.GetEventsChannel())
   262  	<-s.GetStopped()
   263  
   264  	// testing getting recent events
   265  	req := &observerpb.GetFlowsRequest{Number: uint64(10)}
   266  	err = s.GetFlows(req, fakeServer)
   267  	assert.NoError(t, err)
   268  	assert.Equal(t, req.Number, uint64(i))
   269  
   270  	// instead of looking at exactly the last 10, we look at the last 10, minus
   271  	// 1, because the last event is inaccessible due to how the ring buffer
   272  	// works.
   273  	last10Input := input[numFlows-11 : numFlows-1]
   274  	for i := range output {
   275  		assert.True(t, proto.Equal(last10Input[i], output[i]))
   276  	}
   277  
   278  	// Clear out the output slice, as we're making another request
   279  	output = nil
   280  	i = 0
   281  	// testing getting earliest events
   282  	req = &observerpb.GetFlowsRequest{Number: uint64(10), First: true}
   283  	err = s.GetFlows(req, fakeServer)
   284  	assert.NoError(t, err)
   285  	assert.Equal(t, req.Number, uint64(i))
   286  
   287  	first10Input := input[0:10]
   288  	for i := range output {
   289  		assert.True(t, proto.Equal(first10Input[i], output[i]))
   290  	}
   291  
   292  	// Clear out the output slice, as we're making another request
   293  	output = nil
   294  	i = 0
   295  	// testing getting subset of fields with field mask
   296  	fmPaths := []string{"trace_observation_point", "ethernet.source"}
   297  	req = &observerpb.GetFlowsRequest{
   298  		Number:    uint64(10),
   299  		FieldMask: &fieldmaskpb.FieldMask{Paths: fmPaths},
   300  		Experimental: &observerpb.GetFlowsRequest_Experimental{
   301  			FieldMask: &fieldmaskpb.FieldMask{Paths: fmPaths},
   302  		},
   303  	}
   304  	err = s.GetFlows(req, fakeServer)
   305  	assert.NoError(t, err)
   306  	assert.Equal(t, req.Number, uint64(i))
   307  
   308  	for i, out := range output {
   309  		assert.Equal(t, last10Input[i].TraceObservationPoint, out.TraceObservationPoint)
   310  		assert.Equal(t, last10Input[i].Ethernet.Source, out.Ethernet.Source)
   311  		assert.Empty(t, out.Ethernet.Destination)
   312  		assert.Empty(t, out.Verdict)
   313  		assert.Empty(t, out.Summary)
   314  		// Keeps original as is
   315  		assert.NotEmpty(t, last10Input[i].Summary)
   316  	}
   317  
   318  	// Clear out the output slice, as we're making another request
   319  	output = nil
   320  	i = 0
   321  	// testing getting all fields with field mask
   322  	req = &observerpb.GetFlowsRequest{
   323  		Number:    uint64(10),
   324  		FieldMask: &fieldmaskpb.FieldMask{Paths: []string{""}},
   325  		Experimental: &observerpb.GetFlowsRequest_Experimental{
   326  			FieldMask: &fieldmaskpb.FieldMask{Paths: []string{""}},
   327  		},
   328  	}
   329  	err = s.GetFlows(req, fakeServer)
   330  	assert.EqualError(t, err, "invalid fieldmask")
   331  }
   332  
   333  func TestLocalObserverServer_GetAgentEvents(t *testing.T) {
   334  	numEvents := 100
   335  	queueSize := 0
   336  	req := &observerpb.GetAgentEventsRequest{
   337  		Number: uint64(numEvents),
   338  	}
   339  	cidr := "10.0.0.0/8"
   340  	agentEventsReceived := 0
   341  	agentStartedReceived := 0
   342  	fakeServer := &testutils.FakeGetAgentEventsServer{
   343  		OnSend: func(response *observerpb.GetAgentEventsResponse) error {
   344  			switch ev := response.GetAgentEvent(); ev.GetType() {
   345  			case flowpb.AgentEventType_AGENT_STARTED:
   346  				startEvent := response.GetAgentEvent().GetAgentStart()
   347  				assert.NotNil(t, startEvent)
   348  				assert.Equal(t, startEvent.GetTime().GetSeconds(), int64(42))
   349  				assert.Equal(t, startEvent.GetTime().GetNanos(), int32(1))
   350  				agentStartedReceived++
   351  			case flowpb.AgentEventType_IPCACHE_UPSERTED:
   352  				ipcacheUpdate := response.GetAgentEvent().GetIpcacheUpdate()
   353  				assert.NotNil(t, ipcacheUpdate)
   354  				assert.Equal(t, cidr, ipcacheUpdate.GetCidr())
   355  			case flowpb.AgentEventType_SERVICE_DELETED:
   356  				serviceDelete := response.GetAgentEvent().GetServiceDelete()
   357  				assert.NotNil(t, serviceDelete)
   358  			default:
   359  				assert.Fail(t, "unexpected agent event", ev)
   360  			}
   361  			agentEventsReceived++
   362  			return nil
   363  		},
   364  		FakeGRPCServerStream: &testutils.FakeGRPCServerStream{
   365  			OnContext: func() context.Context {
   366  				return context.Background()
   367  			},
   368  		},
   369  	}
   370  
   371  	pp := noopParser(t)
   372  	s, err := NewLocalServer(pp, nsManager, log,
   373  		observeroption.WithMonitorBuffer(queueSize),
   374  	)
   375  	require.NoError(t, err)
   376  	go s.Start()
   377  
   378  	m := s.GetEventsChannel()
   379  	for i := 0; i < numEvents; i++ {
   380  		ts := time.Unix(int64(i), 0)
   381  		node := fmt.Sprintf("node #%03d", i)
   382  		var msg monitorAPI.AgentNotifyMessage
   383  		if i == 0 {
   384  			msg = monitorAPI.StartMessage(time.Unix(42, 1))
   385  		} else if i%2 == 1 {
   386  			msg = monitorAPI.IPCacheUpsertedMessage(cidr, uint32(i), nil, net.ParseIP("10.1.5.4"), nil, 0xff, "default", "foobar")
   387  		} else {
   388  			msg = monitorAPI.ServiceDeleteMessage(uint32(i))
   389  		}
   390  		m <- &observerTypes.MonitorEvent{
   391  			Timestamp: ts,
   392  			NodeName:  node,
   393  			Payload: &observerTypes.AgentEvent{
   394  				Type:    monitorAPI.MessageTypeAgent,
   395  				Message: msg,
   396  			},
   397  		}
   398  	}
   399  	close(s.GetEventsChannel())
   400  	<-s.GetStopped()
   401  	err = s.GetAgentEvents(req, fakeServer)
   402  	assert.NoError(t, err)
   403  	assert.Equal(t, 1, agentStartedReceived)
   404  	// FIXME:
   405  	// This should be assert.Equals(t, numEvents, agentEventsReceived)
   406  	// A bug in the ring buffer prevents this from succeeding
   407  	assert.Greater(t, agentEventsReceived, 0)
   408  }
   409  
   410  func TestLocalObserverServer_GetFlows_Follow_Since(t *testing.T) {
   411  	numFlows := 100
   412  	queueSize := 0
   413  
   414  	since := time.Unix(5, 0)
   415  	sinceProto := timestamppb.New(since)
   416  	assert.NoError(t, sinceProto.CheckValid())
   417  	req := &observerpb.GetFlowsRequest{
   418  		Since:  sinceProto,
   419  		Follow: true,
   420  	}
   421  
   422  	pp := noopParser(t)
   423  	s, err := NewLocalServer(pp, nsManager, log,
   424  		observeroption.WithMaxFlows(container.Capacity127),
   425  		observeroption.WithMonitorBuffer(queueSize),
   426  	)
   427  	require.NoError(t, err)
   428  	go s.Start()
   429  
   430  	generateFlows := func(from, to int, m chan<- *observerTypes.MonitorEvent) {
   431  		for i := from; i < to; i++ {
   432  			tn := monitor.TraceNotifyV0{Type: byte(monitorAPI.MessageTypeTrace)}
   433  			data := testutils.MustCreateL3L4Payload(tn)
   434  			m <- &observerTypes.MonitorEvent{
   435  				Timestamp: time.Unix(int64(i), 0),
   436  				NodeName:  fmt.Sprintf("node #%03d", i),
   437  				Payload: &observerTypes.PerfEvent{
   438  					Data: data,
   439  					CPU:  0,
   440  				},
   441  			}
   442  		}
   443  	}
   444  
   445  	// produce first half of flows before request and second half during request
   446  	m := s.GetEventsChannel()
   447  	generateFlows(0, numFlows/2, m)
   448  
   449  	receivedFlows := 0
   450  	fakeServer := &testutils.FakeGetFlowsServer{
   451  		OnSend: func(response *observerpb.GetFlowsResponse) error {
   452  			receivedFlows++
   453  			assert.Equal(t, response.GetTime(), response.GetFlow().GetTime())
   454  			assert.Equal(t, response.GetNodeName(), response.GetFlow().GetNodeName())
   455  
   456  			assert.NoError(t, response.GetTime().CheckValid())
   457  			ts := response.GetTime().AsTime()
   458  			assert.True(t, !ts.Before(since), "flow had invalid timestamp. ts=%s, since=%s", ts, since)
   459  
   460  			// start producing flows once we have seen the most recent one.
   461  			// Most recently produced flow has timestamp (numFlows/2)-1, but is
   462  			// inaccessible to readers due to the way the ring buffer works
   463  			if int(ts.Unix()) == (numFlows/2)-2 {
   464  				go func() {
   465  					generateFlows(numFlows/2, numFlows, m)
   466  					close(m)
   467  				}()
   468  			}
   469  
   470  			// terminate the request once we have seen enough flows.
   471  			// we expected to see all generated flows, minus the ones filtered
   472  			// out by 'since', minus the one inaccessible in the ring buffer
   473  			if receivedFlows == numFlows-int(since.Unix())-1 {
   474  				// this will terminate the follow request
   475  				return io.EOF
   476  			}
   477  
   478  			return nil
   479  		},
   480  		FakeGRPCServerStream: &testutils.FakeGRPCServerStream{
   481  			OnContext: func() context.Context {
   482  				return context.Background()
   483  			},
   484  		},
   485  	}
   486  
   487  	err = s.GetFlows(req, fakeServer)
   488  	<-s.GetStopped()
   489  	assert.Equal(t, err, io.EOF)
   490  }
   491  
   492  func TestHooks(t *testing.T) {
   493  	numFlows := 10
   494  	queueSize := 0
   495  
   496  	seenFlows := int64(0)
   497  	skipEveryNFlows := int64(2)
   498  	onMonitorEventFirst := func(ctx context.Context, event *observerTypes.MonitorEvent) (bool, error) {
   499  		seenFlows++
   500  
   501  		assert.Equal(t, event.Timestamp.Unix(), seenFlows-1)
   502  		if seenFlows%skipEveryNFlows == 0 {
   503  			return true, nil
   504  		}
   505  		return false, nil
   506  	}
   507  	onMonitorEventSecond := func(ctx context.Context, event *observerTypes.MonitorEvent) (bool, error) {
   508  		if seenFlows%skipEveryNFlows == 0 {
   509  			assert.Fail(t, "server did not break loop after onMonitorEventFirst")
   510  		}
   511  		return false, nil
   512  	}
   513  	onDecodedFlow := func(ctx context.Context, f *flowpb.Flow) (bool, error) {
   514  		if seenFlows%skipEveryNFlows == 0 {
   515  			assert.Fail(t, "server did not stop decoding after onMonitorEventFirst")
   516  		}
   517  		return false, nil
   518  	}
   519  
   520  	pp := noopParser(t)
   521  	s, err := NewLocalServer(pp, nsManager, log,
   522  		observeroption.WithMaxFlows(container.Capacity15),
   523  		observeroption.WithMonitorBuffer(queueSize),
   524  		observeroption.WithOnMonitorEventFunc(onMonitorEventFirst),
   525  		observeroption.WithOnMonitorEventFunc(onMonitorEventSecond),
   526  		observeroption.WithOnDecodedFlowFunc(onDecodedFlow),
   527  	)
   528  	require.NoError(t, err)
   529  	go s.Start()
   530  
   531  	m := s.GetEventsChannel()
   532  	for i := 0; i < numFlows; i++ {
   533  		tn := monitor.TraceNotifyV0{Type: byte(monitorAPI.MessageTypeTrace)}
   534  		data := testutils.MustCreateL3L4Payload(tn)
   535  		m <- &observerTypes.MonitorEvent{
   536  			Timestamp: time.Unix(int64(i), 0),
   537  			NodeName:  fmt.Sprintf("node #%03d", i),
   538  			Payload: &observerTypes.PerfEvent{
   539  				Data: data,
   540  				CPU:  0,
   541  			},
   542  		}
   543  	}
   544  	close(s.GetEventsChannel())
   545  	<-s.GetStopped()
   546  	assert.Equal(t, int64(numFlows), seenFlows)
   547  }
   548  
   549  func TestLocalObserverServer_OnFlowDelivery(t *testing.T) {
   550  	numFlows := 100
   551  	queueSize := 0
   552  	req := &observerpb.GetFlowsRequest{Number: uint64(100)}
   553  	flowsReceived := 0
   554  	fakeServer := &testutils.FakeGetFlowsServer{
   555  		OnSend: func(response *observerpb.GetFlowsResponse) error {
   556  			assert.Equal(t, response.GetTime(), response.GetFlow().GetTime())
   557  			assert.Equal(t, response.GetNodeName(), response.GetFlow().GetNodeName())
   558  			flowsReceived++
   559  			return nil
   560  		},
   561  		FakeGRPCServerStream: &testutils.FakeGRPCServerStream{
   562  			OnContext: func() context.Context {
   563  				return context.Background()
   564  			},
   565  		},
   566  	}
   567  
   568  	count := 0
   569  	onFlowDelivery := func(ctx context.Context, f *flowpb.Flow) (bool, error) {
   570  		count++
   571  		if count%2 == 0 {
   572  			return true, nil
   573  		}
   574  		return false, nil
   575  	}
   576  
   577  	pp := noopParser(t)
   578  	s, err := NewLocalServer(pp, nsManager, log,
   579  		observeroption.WithMaxFlows(container.Capacity127),
   580  		observeroption.WithMonitorBuffer(queueSize),
   581  		observeroption.WithOnFlowDeliveryFunc(onFlowDelivery),
   582  	)
   583  	require.NoError(t, err)
   584  	go s.Start()
   585  
   586  	m := s.GetEventsChannel()
   587  	for i := 0; i < numFlows; i++ {
   588  		tn := monitor.TraceNotifyV0{Type: byte(monitorAPI.MessageTypeTrace)}
   589  		data := testutils.MustCreateL3L4Payload(tn)
   590  		m <- &observerTypes.MonitorEvent{
   591  			Timestamp: time.Unix(int64(i), 0),
   592  			NodeName:  fmt.Sprintf("node #%03d", i),
   593  			Payload: &observerTypes.PerfEvent{
   594  				Data: data,
   595  				CPU:  0,
   596  			},
   597  		}
   598  	}
   599  	close(s.GetEventsChannel())
   600  	<-s.GetStopped()
   601  	err = s.GetFlows(req, fakeServer)
   602  	assert.NoError(t, err)
   603  	// Only every second flow should have been received
   604  	assert.Equal(t, flowsReceived, numFlows/2)
   605  }
   606  
   607  func TestLocalObserverServer_OnGetFlows(t *testing.T) {
   608  	numFlows := 100
   609  	queueSize := 0
   610  	req := &observerpb.GetFlowsRequest{Number: uint64(100)}
   611  	flowsReceived := 0
   612  	fakeServer := &testutils.FakeGetFlowsServer{
   613  		OnSend: func(response *observerpb.GetFlowsResponse) error {
   614  			assert.Equal(t, response.GetTime(), response.GetFlow().GetTime())
   615  			assert.Equal(t, response.GetNodeName(), response.GetFlow().GetNodeName())
   616  			flowsReceived++
   617  			return nil
   618  		},
   619  		FakeGRPCServerStream: &testutils.FakeGRPCServerStream{
   620  			OnContext: func() context.Context {
   621  				return context.Background()
   622  			},
   623  		},
   624  	}
   625  
   626  	type contextKey string
   627  	key := contextKey("foo")
   628  	onGetFlows := func(ctx context.Context, req *observerpb.GetFlowsRequest) (context.Context, error) {
   629  		return context.WithValue(ctx, key, 10), nil
   630  	}
   631  
   632  	onFlowDelivery := func(ctx context.Context, f *flowpb.Flow) (bool, error) {
   633  		// Pass if context is available
   634  		if ctx.Value(key) != nil {
   635  			return false, nil
   636  		}
   637  		return true, nil
   638  	}
   639  
   640  	pp := noopParser(t)
   641  	s, err := NewLocalServer(pp, nsManager, log,
   642  		observeroption.WithMaxFlows(container.Capacity127),
   643  		observeroption.WithMonitorBuffer(queueSize),
   644  		observeroption.WithOnFlowDeliveryFunc(onFlowDelivery),
   645  		observeroption.WithOnGetFlowsFunc(onGetFlows),
   646  	)
   647  	require.NoError(t, err)
   648  	go s.Start()
   649  
   650  	m := s.GetEventsChannel()
   651  	for i := 0; i < numFlows; i++ {
   652  		tn := monitor.TraceNotifyV0{Type: byte(monitorAPI.MessageTypeTrace)}
   653  		data := testutils.MustCreateL3L4Payload(tn)
   654  		m <- &observerTypes.MonitorEvent{
   655  			Timestamp: time.Unix(int64(i), 0),
   656  			NodeName:  fmt.Sprintf("node #%03d", i),
   657  			Payload: &observerTypes.PerfEvent{
   658  				Data: data,
   659  				CPU:  0,
   660  			},
   661  		}
   662  	}
   663  	close(s.GetEventsChannel())
   664  	<-s.GetStopped()
   665  	err = s.GetFlows(req, fakeServer)
   666  	assert.NoError(t, err)
   667  	// FIXME:
   668  	// This should be assert.Equal(t, numFlows, flowsReceived)
   669  	// A bug in the ring buffer prevents this from succeeding
   670  	assert.Greater(t, flowsReceived, 0)
   671  }
   672  
   673  // TestLocalObserverServer_NodeLabels test the LocalNodeWatcher integration
   674  // with the observer.
   675  func TestLocalObserverServer_NodeLabels(t *testing.T) {
   676  	ctx, cancel := context.WithCancel(context.Background())
   677  	defer cancel()
   678  
   679  	// local node stuff setup.
   680  	localNode := node.LocalNode{
   681  		Node: types.Node{
   682  			Name: "ip-1-2-3-4.us-west-2.compute.internal",
   683  			Labels: map[string]string{
   684  				"kubernetes.io/arch":            "amd64",
   685  				"kubernetes.io/os":              "linux",
   686  				"kubernetes.io/hostname":        "ip-1-2-3-4.us-west-2.compute.internal",
   687  				"topology.kubernetes.io/region": "us-west-2",
   688  				"topology.kubernetes.io/zone":   "us-west-2d",
   689  			},
   690  		},
   691  	}
   692  	localNodeWatcher, err := NewLocalNodeWatcher(ctx, node.NewTestLocalNodeStore(localNode))
   693  	require.NoError(t, err)
   694  	require.NotNil(t, localNodeWatcher)
   695  
   696  	// fake hubble server setup.
   697  	flowsReceived := 0
   698  	req := &observerpb.GetFlowsRequest{Number: uint64(1)}
   699  	fakeServer := &testutils.FakeGetFlowsServer{
   700  		OnSend: func(response *observerpb.GetFlowsResponse) error {
   701  			// NOTE: a bit hacky to directly access the localNodeWatcher cache,
   702  			// but we don't have any use yet for an accessor method beyond this
   703  			// package local test.
   704  			localNodeWatcher.mu.Lock()
   705  			expected := localNodeWatcher.cache.labels
   706  			localNodeWatcher.mu.Unlock()
   707  			assert.Equal(t, expected, response.GetFlow().GetNodeLabels())
   708  			flowsReceived++
   709  			return nil
   710  		},
   711  		FakeGRPCServerStream: &testutils.FakeGRPCServerStream{
   712  			OnContext: func() context.Context {
   713  				return ctx
   714  			},
   715  		},
   716  	}
   717  
   718  	// local hubble observer setup.
   719  	s, err := NewLocalServer(noopParser(t), nsManager, log,
   720  		observeroption.WithOnDecodedFlow(localNodeWatcher),
   721  	)
   722  	require.NoError(t, err)
   723  	go s.Start()
   724  
   725  	// simulate a new monitor event.
   726  	m := s.GetEventsChannel()
   727  	tn := monitor.TraceNotifyV0{Type: byte(monitorAPI.MessageTypeTrace)}
   728  	data := testutils.MustCreateL3L4Payload(tn)
   729  	// NOTE: we need to send an extra event into Hubble's ring buffer to see
   730  	// the first one sent.
   731  	for range 2 {
   732  		m <- &observerTypes.MonitorEvent{
   733  			Timestamp: time.Now(),
   734  			NodeName:  localNode.Name,
   735  			Payload: &observerTypes.PerfEvent{
   736  				Data: data,
   737  				CPU:  0,
   738  			},
   739  		}
   740  	}
   741  	close(s.GetEventsChannel())
   742  	<-s.GetStopped()
   743  
   744  	// ensure that we've seen a flow.
   745  	err = s.GetFlows(req, fakeServer)
   746  	assert.NoError(t, err)
   747  	assert.Equal(t, 1, flowsReceived)
   748  }
   749  
   750  func TestLocalObserverServer_GetNamespaces(t *testing.T) {
   751  	pp := noopParser(t)
   752  	nsManager := NewNamespaceManager()
   753  	nsManager.AddNamespace(&observerpb.Namespace{
   754  		Namespace: "zzz",
   755  	})
   756  	nsManager.AddNamespace(&observerpb.Namespace{
   757  		Namespace: "bbb",
   758  		Cluster:   "some-cluster",
   759  	})
   760  	nsManager.AddNamespace(&observerpb.Namespace{
   761  		Namespace: "aaa",
   762  		Cluster:   "some-cluster",
   763  	})
   764  	s, err := NewLocalServer(pp, nsManager, log, observeroption.WithMaxFlows(container.Capacity1))
   765  	require.NoError(t, err)
   766  	res, err := s.GetNamespaces(context.Background(), &observerpb.GetNamespacesRequest{})
   767  	require.NoError(t, err)
   768  	expected := &observerpb.GetNamespacesResponse{
   769  		Namespaces: []*observerpb.Namespace{
   770  			{
   771  				Namespace: "zzz",
   772  			},
   773  			{
   774  				Namespace: "aaa",
   775  				Cluster:   "some-cluster",
   776  			},
   777  			{
   778  				Namespace: "bbb",
   779  				Cluster:   "some-cluster",
   780  			},
   781  		},
   782  	}
   783  	assert.Equal(t, expected, res)
   784  }
   785  
   786  func Benchmark_TrackNamespaces(b *testing.B) {
   787  	pp, err := parser.New(
   788  		log,
   789  		&testutils.NoopEndpointGetter,
   790  		&testutils.NoopIdentityGetter,
   791  		&testutils.NoopDNSGetter,
   792  		&testutils.NoopIPGetter,
   793  		&testutils.NoopServiceGetter,
   794  		&testutils.NoopLinkGetter,
   795  		&testutils.NoopPodMetadataGetter,
   796  	)
   797  	if err != nil {
   798  		b.Fatal(err)
   799  	}
   800  
   801  	nsManager := NewNamespaceManager()
   802  	s, err := NewLocalServer(pp, nsManager, log, observeroption.WithMaxFlows(container.Capacity1))
   803  	if err != nil {
   804  		b.Fatal(err)
   805  	}
   806  	f := &flowpb.Flow{
   807  		Source:      &flowpb.Endpoint{Namespace: "foo"},
   808  		Destination: &flowpb.Endpoint{Namespace: "bar"},
   809  	}
   810  
   811  	b.ReportAllocs()
   812  	b.ResetTimer()
   813  	for i := 0; i < b.N; i++ {
   814  		s.trackNamespaces(f)
   815  	}
   816  }