github.com/cilium/cilium@v1.16.2/pkg/datapath/linux/config/config_test.go (about) 1 // SPDX-License-Identifier: Apache-2.0 2 // Copyright Authors of Cilium 3 4 package config 5 6 import ( 7 "bytes" 8 "context" 9 "errors" 10 "fmt" 11 "io" 12 "net/netip" 13 "strings" 14 "testing" 15 16 "github.com/cilium/ebpf/rlimit" 17 "github.com/cilium/hive/cell" 18 "github.com/cilium/hive/hivetest" 19 "github.com/spf13/afero" 20 "github.com/stretchr/testify/require" 21 "github.com/vishvananda/netlink" 22 23 "github.com/cilium/cilium/pkg/cidr" 24 fakeTypes "github.com/cilium/cilium/pkg/datapath/fake/types" 25 dpdef "github.com/cilium/cilium/pkg/datapath/linux/config/defines" 26 "github.com/cilium/cilium/pkg/datapath/linux/sysctl" 27 "github.com/cilium/cilium/pkg/datapath/loader" 28 "github.com/cilium/cilium/pkg/datapath/tables" 29 datapath "github.com/cilium/cilium/pkg/datapath/types" 30 "github.com/cilium/cilium/pkg/hive" 31 "github.com/cilium/cilium/pkg/maps/nodemap" 32 "github.com/cilium/cilium/pkg/maps/nodemap/fake" 33 "github.com/cilium/cilium/pkg/option" 34 "github.com/cilium/cilium/pkg/testutils" 35 ) 36 37 var ( 38 dummyNodeCfg = datapath.LocalNodeConfiguration{ 39 NodeIPv4: ipv4DummyAddr.AsSlice(), 40 NodeIPv6: ipv6DummyAddr.AsSlice(), 41 CiliumInternalIPv4: ipv4DummyAddr.AsSlice(), 42 CiliumInternalIPv6: ipv6DummyAddr.AsSlice(), 43 AllocCIDRIPv4: cidr.MustParseCIDR("10.147.0.0/16"), 44 LoopbackIPv4: ipv4DummyAddr.AsSlice(), 45 Devices: []*tables.Device{}, 46 NodeAddresses: []tables.NodeAddress{}, 47 HostEndpointID: 1, 48 } 49 dummyDevCfg = testutils.NewTestEndpoint() 50 dummyEPCfg = testutils.NewTestEndpoint() 51 ipv4DummyAddr = netip.MustParseAddr("192.0.2.3") 52 ipv6DummyAddr = netip.MustParseAddr("2001:db08:0bad:cafe:600d:bee2:0bad:cafe") 53 ) 54 55 func setupConfigSuite(tb testing.TB) { 56 testutils.PrivilegedTest(tb) 57 58 tb.Helper() 59 60 require.NoError(tb, rlimit.RemoveMemlock(), "Failed to remove memory limits") 61 62 option.Config.EnableHostLegacyRouting = true // Disable obtaining direct routing device. 63 } 64 65 type badWriter struct{} 66 67 func (b *badWriter) Write(p []byte) (int, error) { 68 return 0, errors.New("bad write :(") 69 } 70 71 type writeFn func(io.Writer, datapath.ConfigWriter) error 72 73 func writeConfig(t *testing.T, header string, write writeFn) { 74 tests := []struct { 75 description string 76 output io.Writer 77 wantErr bool 78 }{ 79 { 80 description: "successful write to an in-memory buffer", 81 output: &bytes.Buffer{}, 82 wantErr: false, 83 }, 84 { 85 description: "write to a failing writer", 86 output: &badWriter{}, 87 wantErr: true, 88 }, 89 } 90 for _, test := range tests { 91 var writer datapath.ConfigWriter 92 t.Logf(" Testing %s configuration: %s", header, test.description) 93 h := hive.New( 94 provideNodemap, 95 cell.Provide( 96 fakeTypes.NewNodeAddressing, 97 func() sysctl.Sysctl { return sysctl.NewDirectSysctl(afero.NewOsFs(), "/proc") }, 98 NewHeaderfileWriter, 99 ), 100 cell.Invoke(func(writer_ datapath.ConfigWriter) { 101 writer = writer_ 102 }), 103 ) 104 105 tlog := hivetest.Logger(t) 106 require.NoError(t, h.Start(tlog, context.TODO())) 107 t.Cleanup(func() { require.Nil(t, h.Stop(tlog, context.TODO())) }) 108 err := write(test.output, writer) 109 require.True(t, test.wantErr == (err != nil), "wantErr=%v, err=%s", test.wantErr, err) 110 } 111 } 112 113 func TestWriteNodeConfig(t *testing.T) { 114 setupConfigSuite(t) 115 writeConfig(t, "node", func(w io.Writer, dp datapath.ConfigWriter) error { 116 return dp.WriteNodeConfig(w, &dummyNodeCfg) 117 }) 118 } 119 120 func TestWriteNetdevConfig(t *testing.T) { 121 writeConfig(t, "netdev", func(w io.Writer, dp datapath.ConfigWriter) error { 122 return dp.WriteNetdevConfig(w, dummyDevCfg.GetOptions()) 123 }) 124 } 125 126 func TestWriteEndpointConfig(t *testing.T) { 127 writeConfig(t, "endpoint", func(w io.Writer, dp datapath.ConfigWriter) error { 128 return dp.WriteEndpointConfig(w, &dummyNodeCfg, &dummyEPCfg) 129 }) 130 131 // Create copy of config option so that it can be restored at the end of 132 // this test. In the future, we'd like to parallelize running unit tests. 133 // As it stands, this test would not be ready to parallelize until we 134 // remove our dependency on globals (e.g. option.Config). 135 oldEnableIPv6 := option.Config.EnableIPv6 136 defer func() { 137 option.Config.EnableIPv6 = oldEnableIPv6 138 }() 139 140 testRun := func(te *testutils.TestEndpoint) ([]byte, map[string]uint64) { 141 cfg := &HeaderfileWriter{} 142 varSub := loader.ELFVariableSubstitutions(te) 143 144 var buf bytes.Buffer 145 cfg.writeStaticData(nil, &buf, te) 146 147 return buf.Bytes(), varSub 148 } 149 150 lxcIPs := []string{"LXC_IP_1", "LXC_IP_2"} 151 152 tests := []struct { 153 description string 154 template testutils.TestEndpoint // Represents template bpf prog 155 endpoint testutils.TestEndpoint // Represents normal endpoint bpf prog 156 preTestRun func(t *testutils.TestEndpoint, e *testutils.TestEndpoint) 157 templateExp bool 158 endpointExp bool 159 }{ 160 { 161 description: "IPv6 is disabled, endpoint does not have an IPv6 addr", 162 template: testutils.NewTestEndpoint(), 163 endpoint: testutils.NewTestEndpoint(), 164 preTestRun: func(t *testutils.TestEndpoint, e *testutils.TestEndpoint) { 165 option.Config.EnableIPv6 = false 166 t.IPv6 = ipv6DummyAddr // Template bpf prog always has dummy IPv6 167 e.IPv6 = netip.Addr{} // This endpoint does not have an IPv6 addr 168 }, 169 templateExp: true, 170 endpointExp: false, 171 }, 172 { 173 description: "IPv6 is disabled, endpoint does have an IPv6 addr", 174 template: testutils.NewTestEndpoint(), 175 endpoint: testutils.NewTestEndpoint(), 176 preTestRun: func(t *testutils.TestEndpoint, e *testutils.TestEndpoint) { 177 option.Config.EnableIPv6 = false 178 t.IPv6 = ipv6DummyAddr // Template bpf prog always has dummy IPv6 179 e.IPv6 = ipv6DummyAddr // This endpoint does have an IPv6 addr 180 }, 181 templateExp: true, 182 endpointExp: true, 183 }, 184 { 185 description: "IPv6 is enabled", 186 template: testutils.NewTestEndpoint(), 187 endpoint: testutils.NewTestEndpoint(), 188 preTestRun: func(t *testutils.TestEndpoint, e *testutils.TestEndpoint) { 189 option.Config.EnableIPv6 = true 190 t.IPv6 = ipv6DummyAddr 191 e.IPv6 = ipv6DummyAddr 192 }, 193 templateExp: true, 194 endpointExp: true, 195 }, 196 { 197 description: "IPv6 is enabled, endpoint does not have IPv6 address", 198 template: testutils.NewTestEndpoint(), 199 endpoint: testutils.NewTestEndpoint(), 200 preTestRun: func(t *testutils.TestEndpoint, e *testutils.TestEndpoint) { 201 option.Config.EnableIPv6 = true 202 t.IPv6 = ipv6DummyAddr 203 e.IPv6 = netip.Addr{} 204 }, 205 templateExp: true, 206 endpointExp: false, 207 }, 208 } 209 for _, test := range tests { 210 t.Logf("Testing %s", test.description) 211 test.preTestRun(&test.template, &test.endpoint) 212 213 b, vsub := testRun(&test.template) 214 require.Equal(t, test.templateExp, bytes.Contains(b, []byte("DEFINE_IPV6"))) 215 assertKeysInsideMap(t, vsub, lxcIPs, test.templateExp) 216 217 b, vsub = testRun(&test.endpoint) 218 require.Equal(t, test.endpointExp, bytes.Contains(b, []byte("DEFINE_IPV6"))) 219 assertKeysInsideMap(t, vsub, lxcIPs, test.endpointExp) 220 } 221 } 222 223 func TestWriteStaticData(t *testing.T) { 224 cfg := &HeaderfileWriter{} 225 ep := &dummyEPCfg 226 227 varSub := loader.ELFVariableSubstitutions(ep) 228 mapSub := loader.ELFMapSubstitutions(ep) 229 230 var buf bytes.Buffer 231 cfg.writeStaticData(nil, &buf, ep) 232 b := buf.Bytes() 233 for k := range varSub { 234 for _, suffix := range []string{"_1", "_2"} { 235 // Variables with these suffixes are implemented via 236 // multiple 64-bit values. The header define doesn't 237 // include these numbers though, so strip them. 238 if strings.HasSuffix(k, suffix) { 239 k = strings.TrimSuffix(k, suffix) 240 break 241 } 242 } 243 require.Equal(t, true, bytes.Contains(b, []byte(k))) 244 } 245 for _, v := range mapSub { 246 t.Logf("Ensuring config has %s", v) 247 require.Equal(t, true, bytes.Contains(b, []byte(v))) 248 } 249 } 250 251 func assertKeysInsideMap(t *testing.T, m map[string]uint64, keys []string, want bool) { 252 for _, v := range keys { 253 _, ok := m[v] 254 require.Equal(t, want, ok) 255 } 256 } 257 258 func createMainLink(name string, t *testing.T) *netlink.Dummy { 259 link := &netlink.Dummy{ 260 LinkAttrs: netlink.LinkAttrs{ 261 Name: name, 262 }, 263 } 264 err := netlink.LinkAdd(link) 265 require.NoError(t, err) 266 267 return link 268 } 269 270 func createVlanLink(vlanId int, mainLink *netlink.Dummy, t *testing.T) *netlink.Vlan { 271 link := &netlink.Vlan{ 272 LinkAttrs: netlink.LinkAttrs{ 273 Name: fmt.Sprintf("%s.%d", mainLink.Name, vlanId), 274 ParentIndex: mainLink.Index, 275 }, 276 VlanProtocol: netlink.VLAN_PROTOCOL_8021Q, 277 VlanId: vlanId, 278 } 279 err := netlink.LinkAdd(link) 280 require.Nil(t, err) 281 282 return link 283 } 284 285 func TestVLANBypassConfig(t *testing.T) { 286 setupConfigSuite(t) 287 288 var devs []*tables.Device 289 290 main1 := createMainLink("dummy0", t) 291 devs = append(devs, &tables.Device{Name: main1.Name, Index: main1.Index}) 292 defer func() { 293 netlink.LinkDel(main1) 294 }() 295 296 // Define set of vlans which we want to allow. 297 allow := map[int]bool{ 298 4000: true, 299 4001: true, 300 4003: true, 301 } 302 303 for i := 4000; i < 4003; i++ { 304 vlan := createVlanLink(i, main1, t) 305 if allow[i] { 306 devs = append(devs, &tables.Device{Index: vlan.Index, Name: vlan.Name}) 307 } 308 defer func() { 309 netlink.LinkDel(vlan) 310 }() 311 } 312 313 main2 := createMainLink("dummy1", t) 314 devs = append(devs, &tables.Device{Name: main2.Name, Index: main2.Index}) 315 defer func() { 316 netlink.LinkDel(main2) 317 }() 318 319 for i := 4003; i < 4006; i++ { 320 vlan := createVlanLink(i, main2, t) 321 if allow[i] { 322 devs = append(devs, &tables.Device{Index: vlan.Index, Name: vlan.Name}) 323 } 324 defer func() { 325 netlink.LinkDel(vlan) 326 }() 327 } 328 329 option.Config.VLANBPFBypass = []int{4004} 330 m, err := vlanFilterMacros(devs) 331 require.Equal(t, nil, err) 332 require.Equal(t, fmt.Sprintf(`switch (ifindex) { \ 333 case %d: \ 334 switch (vlan_id) { \ 335 case 4000: \ 336 case 4001: \ 337 return true; \ 338 } \ 339 break; \ 340 case %d: \ 341 switch (vlan_id) { \ 342 case 4003: \ 343 case 4004: \ 344 return true; \ 345 } \ 346 break; \ 347 } \ 348 return false;`, main1.Index, main2.Index), m) 349 350 option.Config.VLANBPFBypass = []int{4002, 4004, 4005} 351 _, err = vlanFilterMacros(devs) 352 require.Error(t, err) 353 354 option.Config.VLANBPFBypass = []int{0} 355 m, err = vlanFilterMacros(devs) 356 require.Nil(t, err) 357 require.Equal(t, "return true", m) 358 } 359 360 func TestWriteNodeConfigExtraDefines(t *testing.T) { 361 testutils.PrivilegedTest(t) 362 setupConfigSuite(t) 363 364 var ( 365 na datapath.NodeAddressing 366 ) 367 h := hive.New( 368 cell.Provide( 369 fakeTypes.NewNodeAddressing, 370 ), 371 cell.Invoke(func( 372 nodeaddressing datapath.NodeAddressing, 373 ) { 374 na = nodeaddressing 375 }), 376 ) 377 378 tlog := hivetest.Logger(t) 379 require.NoError(t, h.Start(tlog, context.TODO())) 380 t.Cleanup(func() { h.Stop(tlog, context.TODO()) }) 381 382 var buffer bytes.Buffer 383 384 // Assert that configurations are propagated when all generated extra defines are valid 385 cfg, err := NewHeaderfileWriter(WriterParams{ 386 NodeAddressing: na, 387 NodeExtraDefines: nil, 388 NodeExtraDefineFns: []dpdef.Fn{ 389 func() (dpdef.Map, error) { return dpdef.Map{"FOO": "0x1", "BAR": "0x2"}, nil }, 390 func() (dpdef.Map, error) { return dpdef.Map{"BAZ": "0x3"}, nil }, 391 }, 392 Sysctl: sysctl.NewDirectSysctl(afero.NewOsFs(), "/proc"), 393 NodeMap: fake.NewFakeNodeMapV2(), 394 }) 395 require.NoError(t, err) 396 397 buffer.Reset() 398 require.NoError(t, cfg.WriteNodeConfig(&buffer, &dummyNodeCfg)) 399 400 output := buffer.String() 401 require.Contains(t, output, "define FOO 0x1\n") 402 require.Contains(t, output, "define BAR 0x2\n") 403 require.Contains(t, output, "define BAZ 0x3\n") 404 405 // Assert that an error is returned when one extra define function returns an error 406 cfg, err = NewHeaderfileWriter(WriterParams{ 407 NodeAddressing: fakeTypes.NewNodeAddressing(), 408 NodeExtraDefines: nil, 409 NodeExtraDefineFns: []dpdef.Fn{ 410 func() (dpdef.Map, error) { return nil, errors.New("failing on purpose") }, 411 }, 412 Sysctl: sysctl.NewDirectSysctl(afero.NewOsFs(), "/proc"), 413 NodeMap: fake.NewFakeNodeMapV2(), 414 }) 415 require.NoError(t, err) 416 417 buffer.Reset() 418 require.Error(t, cfg.WriteNodeConfig(&buffer, &dummyNodeCfg)) 419 420 // Assert that an error is returned when one extra define would overwrite an already existing entry 421 cfg, err = NewHeaderfileWriter(WriterParams{ 422 NodeAddressing: fakeTypes.NewNodeAddressing(), 423 NodeExtraDefines: nil, 424 NodeExtraDefineFns: []dpdef.Fn{ 425 func() (dpdef.Map, error) { return dpdef.Map{"FOO": "0x1", "BAR": "0x2"}, nil }, 426 func() (dpdef.Map, error) { return dpdef.Map{"FOO": "0x3"}, nil }, 427 }, 428 Sysctl: sysctl.NewDirectSysctl(afero.NewOsFs(), "/proc"), 429 NodeMap: fake.NewFakeNodeMapV2(), 430 }) 431 require.NoError(t, err) 432 433 buffer.Reset() 434 require.Error(t, cfg.WriteNodeConfig(&buffer, &dummyNodeCfg)) 435 } 436 437 func TestNewHeaderfileWriter(t *testing.T) { 438 testutils.PrivilegedTest(t) 439 setupConfigSuite(t) 440 441 a := dpdef.Map{"A": "1"} 442 var buffer bytes.Buffer 443 444 _, err := NewHeaderfileWriter(WriterParams{ 445 NodeAddressing: fakeTypes.NewNodeAddressing(), 446 NodeExtraDefines: []dpdef.Map{a, a}, 447 NodeExtraDefineFns: nil, 448 Sysctl: sysctl.NewDirectSysctl(afero.NewOsFs(), "/proc"), 449 NodeMap: fake.NewFakeNodeMapV2(), 450 }) 451 452 require.Error(t, err, "duplicate keys should be rejected") 453 454 cfg, err := NewHeaderfileWriter(WriterParams{ 455 NodeAddressing: fakeTypes.NewNodeAddressing(), 456 NodeExtraDefines: []dpdef.Map{a}, 457 NodeExtraDefineFns: nil, 458 Sysctl: sysctl.NewDirectSysctl(afero.NewOsFs(), "/proc"), 459 NodeMap: fake.NewFakeNodeMapV2(), 460 }) 461 require.NoError(t, err) 462 require.NoError(t, cfg.WriteNodeConfig(&buffer, &dummyNodeCfg)) 463 require.Contains(t, buffer.String(), "define A 1\n") 464 } 465 466 var provideNodemap = cell.Provide(func() nodemap.MapV2 { 467 return fake.NewFakeNodeMapV2() 468 })