github.com/cilium/cilium@v1.16.2/pkg/proxy/logger/logger_test.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright Authors of Cilium 3 4 package logger 5 6 import ( 7 "bytes" 8 "context" 9 "encoding/gob" 10 "net" 11 "strings" 12 "sync" 13 "testing" 14 15 "github.com/cilium/dns" 16 "github.com/cilium/hive/cell" 17 "github.com/cilium/hive/hivetest" 18 19 "github.com/cilium/cilium/pkg/hive" 20 "github.com/cilium/cilium/pkg/maps/eventsmap" 21 "github.com/cilium/cilium/pkg/monitor/agent" 22 "github.com/cilium/cilium/pkg/monitor/agent/listener" 23 "github.com/cilium/cilium/pkg/monitor/api" 24 "github.com/cilium/cilium/pkg/monitor/payload" 25 "github.com/cilium/cilium/pkg/node" 26 "github.com/cilium/cilium/pkg/proxy/accesslog" 27 "github.com/cilium/cilium/pkg/u8proto" 28 ) 29 30 // mockLogRecord is a log entry similar to the one used in fqdn.go for 31 // DNS related events notification. 32 func mockLogRecord() *LogRecord { 33 return NewLogRecord( 34 accesslog.TypeResponse, 35 false, 36 func(lr *LogRecord) { 37 lr.LogRecord.TransportProtocol = accesslog.TransportProtocol( 38 u8proto.ProtoIDs[strings.ToLower("udp")], 39 ) 40 }, 41 LogTags.Verdict( 42 accesslog.VerdictForwarded, 43 "just a benchmark", 44 ), 45 LogTags.Addressing(context.Background(), AddressingInfo{ 46 DstIPPort: "15478", 47 DstIdentity: 16, 48 SrcIPPort: "53", 49 SrcIdentity: 1, 50 }), 51 LogTags.DNS(&accesslog.LogRecordDNS{ 52 Query: "data.test.svc.cluster.local", 53 IPs: []net.IP{ 54 net.IPv4(1, 1, 1, 1), 55 net.IPv4(2, 2, 2, 2), 56 net.IPv4(3, 3, 3, 3), 57 }, 58 TTL: 43200, 59 CNAMEs: []string{ 60 "alt1.test.svc.cluster.local", 61 "alt2.test.svc.cluster.local", 62 }, 63 ObservationSource: accesslog.DNSSourceProxy, 64 RCode: dns.RcodeSuccess, 65 QTypes: []uint16{dns.TypeA, dns.TypeAAAA}, 66 AnswerTypes: []uint16{dns.TypeA, dns.TypeAAAA}, 67 }), 68 ) 69 } 70 71 // MockMonitorListener is a mock type used to implement the listener.MonitorListener interface 72 // for benchmarking purposes. 73 // Specifically, it mimics the behavior of agent.listenerv1_2 74 type MockMonitorListener struct { 75 queue chan *payload.Payload 76 } 77 78 // NewMockMonitorListener returns a MockMonitorListener ready to be used in the benchmarks below. 79 func NewMockMonitorListener(queueSize int) *MockMonitorListener { 80 return &MockMonitorListener{ 81 queue: make(chan *payload.Payload, queueSize), 82 } 83 } 84 85 // Drain will start the draining of the listener internal queue using a separate goroutine. 86 func (ml *MockMonitorListener) Drain(ctx context.Context, wg *sync.WaitGroup) { 87 go func() { 88 defer wg.Done() 89 90 for { 91 select { 92 case <-ctx.Done(): 93 return 94 case pl := <-ml.queue: 95 var bb bytes.Buffer 96 _ = pl.EncodeBinary(gob.NewEncoder(&bb)) 97 } 98 } 99 }() 100 } 101 102 // Enqueue sends the payload passed as parameter to the listener internal queue for processing. 103 func (ml *MockMonitorListener) Enqueue(pl *payload.Payload) { 104 select { 105 case ml.queue <- pl: 106 default: 107 // listener queue is full, dropping message 108 } 109 } 110 111 // Version returns the API version supported by this listener. 112 func (ml *MockMonitorListener) Version() listener.Version { 113 return listener.Version1_2 114 } 115 116 // Close stops the listener. It is a no-op for MockMonitorListener. 117 func (ml *MockMonitorListener) Close() { 118 } 119 120 // MockLogNotifier is a mock type used to implement the LogRecordNotifier interface for 121 // benchmarking purposes. 122 // Specifically, it mimics the behavior of the Daemon and its implementation of the 123 // NewProxyLogRecord method. 124 type MockLogNotifier struct { 125 monitorAgent agent.Agent 126 } 127 128 // NewMockLogNotifier returns a MockLogNotifier ready to be used in the benchmarks below. 129 func NewMockLogNotifier(monitor agent.Agent) *MockLogNotifier { 130 return &MockLogNotifier{monitor} 131 } 132 133 // NewProxyLogRecord sends the event to the monitor agent to notify the listeners. 134 func (n *MockLogNotifier) NewProxyLogRecord(l *LogRecord) error { 135 return n.monitorAgent.SendEvent(api.MessageTypeAccessLog, l.LogRecord) 136 } 137 138 // RegisterNewListener adds a listener to the MockLogNotifier. 139 func (n *MockLogNotifier) RegisterNewListener(l listener.MonitorListener) { 140 n.monitorAgent.RegisterNewListener(l) 141 } 142 143 var benchCases = []struct { 144 name string 145 nRecords int 146 }{ 147 { 148 name: "OneRecord", 149 nRecords: 1, 150 }, 151 { 152 name: "TenRecords", 153 nRecords: 10, 154 }, 155 { 156 name: "HundredRecords", 157 nRecords: 100, 158 }, 159 { 160 name: "ThousandRecords", 161 nRecords: 1000, 162 }, 163 } 164 165 func benchWithoutListeners(b *testing.B) { 166 node.WithTestLocalNodeStore(func() { 167 record := mockLogRecord() 168 for _, bm := range benchCases { 169 b.Run(bm.name, func(b *testing.B) { 170 b.ReportAllocs() 171 for i := 0; i < b.N; i++ { 172 // Each goroutine will deliver a single notification concurrently. 173 // This is done to simulate what happens when a high rate of DNS 174 // related events trigger one `notifyOnDNSMsg` callback each and 175 // consequently the event logging. 176 var wg sync.WaitGroup 177 for j := 0; j < bm.nRecords; j++ { 178 wg.Add(1) 179 go func() { 180 defer wg.Done() 181 record.Log() 182 }() 183 } 184 wg.Wait() 185 } 186 }) 187 } 188 }) 189 } 190 191 func benchWithListeners(listener *MockMonitorListener, b *testing.B) { 192 node.WithTestLocalNodeStore(func() { 193 record := mockLogRecord() 194 for _, bm := range benchCases { 195 b.Run(bm.name, func(b *testing.B) { 196 ctx, cancel := context.WithCancel(context.Background()) 197 198 var wg sync.WaitGroup 199 wg.Add(1) 200 listener.Drain(ctx, &wg) 201 202 b.ReportAllocs() 203 b.ResetTimer() 204 for i := 0; i < b.N; i++ { 205 // Each goroutine will deliver a single notification concurrently. 206 // This is done to simulate what happens when a high rate of DNS 207 // related events trigger one `notifyOnDNSMsg` callback each and 208 // consequently the event logging. 209 var logWg sync.WaitGroup 210 for j := 0; j < bm.nRecords; j++ { 211 logWg.Add(1) 212 go func() { 213 defer logWg.Done() 214 record.Log() 215 }() 216 } 217 logWg.Wait() 218 } 219 b.StopTimer() 220 221 // wait for listener cleanup 222 cancel() 223 wg.Wait() 224 }) 225 } 226 }) 227 } 228 229 func BenchmarkLogNotifierWithNoListeners(b *testing.B) { 230 bench := cell.Invoke(func(lc cell.Lifecycle, monitor agent.Agent) error { 231 notifier := NewMockLogNotifier(monitor) 232 SetNotifier(notifier) 233 234 lc.Append(cell.Hook{ 235 OnStart: func(ctx cell.HookContext) error { 236 benchWithoutListeners(b) 237 return nil 238 }, 239 OnStop: func(ctx cell.HookContext) error { return nil }, 240 }) 241 242 return nil 243 }) 244 245 h := hive.New( 246 cell.Provide(func() eventsmap.Map { return nil }), 247 agent.Cell, 248 bench, 249 ) 250 251 tlog := hivetest.Logger(b) 252 if err := h.Start(tlog, context.TODO()); err != nil { 253 b.Fatalf("failed to start hive: %v", err) 254 } 255 if err := h.Stop(tlog, context.TODO()); err != nil { 256 b.Fatalf("failed to stop hive: %v", err) 257 } 258 } 259 260 func BenchmarkLogNotifierWithListeners(b *testing.B) { 261 bench := cell.Invoke(func(lc cell.Lifecycle, monitor agent.Agent, cfg agent.AgentConfig, em eventsmap.Map) error { 262 listener := NewMockMonitorListener(cfg.MonitorQueueSize) 263 notifier := NewMockLogNotifier(monitor) 264 notifier.RegisterNewListener(listener) 265 SetNotifier(notifier) 266 267 lc.Append(cell.Hook{ 268 OnStart: func(ctx cell.HookContext) error { 269 benchWithListeners(listener, b) 270 return nil 271 }, 272 OnStop: func(ctx cell.HookContext) error { return nil }, 273 }) 274 275 return nil 276 }) 277 278 h := hive.New( 279 cell.Provide(func() eventsmap.Map { return nil }), 280 agent.Cell, 281 bench, 282 ) 283 284 tlog := hivetest.Logger(b) 285 if err := h.Start(tlog, context.TODO()); err != nil { 286 b.Fatalf("failed to start hive: %v", err) 287 } 288 if err := h.Stop(tlog, context.TODO()); err != nil { 289 b.Fatalf("failed to stop hive: %v", err) 290 } 291 }