github.com/cilium/ebpf@v0.16.0/map_test.go (about) 1 package ebpf 2 3 import ( 4 "bytes" 5 "errors" 6 "fmt" 7 "math" 8 "os" 9 "path/filepath" 10 "sort" 11 "testing" 12 "unsafe" 13 14 "github.com/cilium/ebpf/asm" 15 "github.com/cilium/ebpf/btf" 16 "github.com/cilium/ebpf/internal" 17 "github.com/cilium/ebpf/internal/sys" 18 "github.com/cilium/ebpf/internal/testutils" 19 "github.com/cilium/ebpf/internal/unix" 20 21 "github.com/go-quicktest/qt" 22 ) 23 24 var ( 25 spec1 = &MapSpec{ 26 Name: "foo", 27 Type: Hash, 28 KeySize: 4, 29 ValueSize: 4, 30 MaxEntries: 1, 31 Pinning: PinByName, 32 } 33 ) 34 35 // newHash returns a new Map of type Hash. Cleanup is handled automatically. 36 func newHash(t *testing.T) *Map { 37 hash, err := NewMap(&MapSpec{ 38 Type: Hash, 39 KeySize: 5, 40 ValueSize: 4, 41 MaxEntries: 10, 42 }) 43 if err != nil { 44 t.Fatal(err) 45 } 46 t.Cleanup(func() { hash.Close() }) 47 return hash 48 } 49 50 func TestMap(t *testing.T) { 51 m := createArray(t) 52 53 t.Log(m) 54 55 if err := m.Put(uint32(0), uint32(42)); err != nil { 56 t.Fatal("Can't put:", err) 57 } 58 if err := m.Put(uint32(1), uint32(4242)); err != nil { 59 t.Fatal("Can't put:", err) 60 } 61 62 m2, err := m.Clone() 63 if err != nil { 64 t.Fatal("Can't clone map:", err) 65 } 66 defer m2.Close() 67 68 m.Close() 69 m = m2 70 71 var v uint32 72 if err := m.Lookup(uint32(0), &v); err != nil { 73 t.Fatal("Can't lookup 0:", err) 74 } 75 if v != 42 { 76 t.Error("Want value 42, got", v) 77 } 78 79 sliceVal := make([]uint32, 1) 80 qt.Assert(t, qt.IsNil(m.Lookup(uint32(0), sliceVal))) 81 qt.Assert(t, qt.DeepEquals(sliceVal, []uint32{42})) 82 83 var slice []byte 84 qt.Assert(t, qt.IsNil(m.Lookup(uint32(0), &slice))) 85 qt.Assert(t, qt.DeepEquals(slice, internal.NativeEndian.AppendUint32(nil, 42))) 86 87 var k uint32 88 if err := m.NextKey(uint32(0), &k); err != nil { 89 t.Fatal("Can't get:", err) 90 } 91 if k != 1 { 92 t.Error("Want key 1, got", k) 93 } 94 } 95 96 func TestMapSpecCopy(t *testing.T) { 97 a := &MapSpec{ 98 "foo", 99 Hash, 100 4, 101 4, 102 1, 103 1, 104 PinByName, 105 1, 106 []MapKV{{1, 2}}, // Can't copy Contents, use value types 107 true, 108 nil, // InnerMap 109 bytes.NewReader(nil), 110 &btf.Int{}, 111 &btf.Int{}, 112 } 113 a.InnerMap = a 114 115 qt.Check(t, qt.IsNil((*MapSpec)(nil).Copy())) 116 qt.Assert(t, testutils.IsDeepCopy(a.Copy(), a)) 117 } 118 119 func TestMapBatch(t *testing.T) { 120 if err := haveBatchAPI(); err != nil { 121 t.Skipf("batch api not available: %v", err) 122 } 123 124 contents := []uint32{ 125 42, 4242, 23, 2323, 126 } 127 128 mustNewMap := func(t *testing.T, mapType MapType, max uint32) *Map { 129 m, err := NewMap(&MapSpec{ 130 Type: mapType, 131 KeySize: 4, 132 ValueSize: 4, 133 MaxEntries: max, 134 }) 135 if err != nil { 136 t.Fatal(err) 137 } 138 t.Cleanup(func() { m.Close() }) 139 return m 140 } 141 142 keysAndValuesForMap := func(m *Map, contents []uint32) (keys, values []uint32, stride int) { 143 possibleCPU := 1 144 if m.Type().hasPerCPUValue() { 145 possibleCPU = MustPossibleCPU() 146 } 147 148 keys = make([]uint32, 0, len(contents)) 149 values = make([]uint32, 0, len(contents)*possibleCPU) 150 for key, value := range contents { 151 keys = append(keys, uint32(key)) 152 for i := 0; i < possibleCPU; i++ { 153 values = append(values, value*uint32((i+1))) 154 } 155 } 156 157 return keys, values, possibleCPU 158 } 159 160 for _, typ := range []MapType{Array, PerCPUArray} { 161 t.Run(typ.String(), func(t *testing.T) { 162 if typ == PerCPUArray { 163 // https://lore.kernel.org/bpf/20210424214510.806627-2-pctammela@mojatatu.com/ 164 testutils.SkipOnOldKernel(t, "5.13", "batched ops support for percpu array") 165 } 166 167 m := mustNewMap(t, typ, uint32(len(contents))) 168 keys, values, _ := keysAndValuesForMap(m, contents) 169 count, err := m.BatchUpdate(keys, values, nil) 170 qt.Assert(t, qt.IsNil(err)) 171 qt.Assert(t, qt.Equals(count, len(contents))) 172 173 lookupKeys := make([]uint32, len(keys)) 174 lookupValues := make([]uint32, len(values)) 175 176 var cursor MapBatchCursor 177 count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) 178 qt.Assert(t, qt.IsNil(err)) 179 qt.Assert(t, qt.Equals(count, len(contents))) 180 qt.Assert(t, qt.ContentEquals(lookupKeys, keys)) 181 qt.Assert(t, qt.ContentEquals(lookupValues, values)) 182 183 count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) 184 qt.Assert(t, qt.ErrorIs(err, ErrKeyNotExist)) 185 qt.Assert(t, qt.Equals(count, 0)) 186 }) 187 } 188 189 for _, typ := range []MapType{Hash, PerCPUHash} { 190 t.Run(typ.String(), func(t *testing.T) { 191 m := mustNewMap(t, typ, uint32(len(contents))) 192 keys, values, stride := keysAndValuesForMap(m, contents) 193 count, err := m.BatchUpdate(keys, values, nil) 194 qt.Assert(t, qt.IsNil(err)) 195 qt.Assert(t, qt.Equals(count, len(contents))) 196 197 // BPF hash tables seem to have lots of collisions when keys 198 // are following a sequence. 199 // This causes ENOSPC since a single large bucket may be larger 200 // than the batch size. We work around this by making the batch size 201 // equal to the map size. 202 lookupKeys := make([]uint32, len(keys)) 203 lookupValues := make([]uint32, len(values)) 204 205 var cursor MapBatchCursor 206 count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) 207 qt.Assert(t, qt.ErrorIs(err, ErrKeyNotExist)) 208 qt.Assert(t, qt.Equals(count, len(contents))) 209 210 qt.Assert(t, qt.ContentEquals(lookupKeys, keys)) 211 qt.Assert(t, qt.ContentEquals(lookupValues, values)) 212 213 cursor = MapBatchCursor{} 214 count, err = m.BatchLookupAndDelete(&cursor, lookupKeys, lookupValues, nil) 215 qt.Assert(t, qt.ErrorIs(err, ErrKeyNotExist)) 216 qt.Assert(t, qt.Equals(count, len(contents))) 217 218 qt.Assert(t, qt.ContentEquals(lookupKeys, keys)) 219 qt.Assert(t, qt.ContentEquals(lookupValues, values)) 220 221 if stride > 1 { 222 values := make([]uint32, stride) 223 qt.Assert(t, qt.ErrorIs(m.Lookup(uint32(0), values), ErrKeyNotExist)) 224 } else { 225 var v uint32 226 qt.Assert(t, qt.ErrorIs(m.Lookup(uint32(0), &v), ErrKeyNotExist)) 227 } 228 }) 229 } 230 } 231 232 func TestMapBatchCursorReuse(t *testing.T) { 233 spec := &MapSpec{ 234 Type: Array, 235 KeySize: 4, 236 ValueSize: 4, 237 MaxEntries: 4, 238 } 239 240 arr1, err := NewMap(spec) 241 if err != nil { 242 t.Fatal(err) 243 } 244 defer arr1.Close() 245 246 arr2, err := NewMap(spec) 247 if err != nil { 248 t.Fatal(err) 249 } 250 defer arr2.Close() 251 252 tmp := make([]uint32, 2) 253 254 var cursor MapBatchCursor 255 _, err = arr1.BatchLookup(&cursor, tmp, tmp, nil) 256 testutils.SkipIfNotSupported(t, err) 257 qt.Assert(t, qt.IsNil(err)) 258 259 _, err = arr2.BatchLookup(&cursor, tmp, tmp, nil) 260 qt.Assert(t, qt.IsNotNil(err)) 261 } 262 263 func TestMapLookupKeyTooSmall(t *testing.T) { 264 m := createArray(t) 265 defer m.Close() 266 267 var small uint16 268 qt.Assert(t, qt.IsNil(m.Put(uint32(0), uint32(1234)))) 269 qt.Assert(t, qt.IsNotNil(m.Lookup(uint32(0), &small))) 270 } 271 272 func TestMapLookupKeyNotFoundAllocations(t *testing.T) { 273 m := createArray(t) 274 defer m.Close() 275 var key, out uint32 = 3, 0 276 277 allocs := testing.AllocsPerRun(5, func() { 278 _ = m.Lookup(&key, &out) 279 }) 280 qt.Assert(t, qt.Equals(allocs, float64(0))) 281 } 282 283 func TestBatchAPIMapDelete(t *testing.T) { 284 if err := haveBatchAPI(); err != nil { 285 t.Skipf("batch api not available: %v", err) 286 } 287 m, err := NewMap(&MapSpec{ 288 Type: Hash, 289 KeySize: 4, 290 ValueSize: 4, 291 MaxEntries: 10, 292 }) 293 if err != nil { 294 t.Fatal(err) 295 } 296 defer m.Close() 297 298 var ( 299 keys = []uint32{0, 1} 300 values = []uint32{42, 4242} 301 ) 302 303 count, err := m.BatchUpdate(keys, values, nil) 304 if err != nil { 305 t.Fatalf("BatchUpdate: %v", err) 306 } 307 if count != len(keys) { 308 t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) 309 } 310 311 var v uint32 312 if err := m.Lookup(uint32(0), &v); err != nil { 313 t.Fatal("Can't lookup 0:", err) 314 } 315 if v != 42 { 316 t.Error("Want value 42, got", v) 317 } 318 319 count, err = m.BatchDelete(keys, nil) 320 if err != nil { 321 t.Fatalf("BatchDelete: %v", err) 322 } 323 if count != len(keys) { 324 t.Fatalf("BatchDelete: expected %d deletions got %d", len(keys), count) 325 } 326 327 if err := m.Lookup(uint32(0), &v); !errors.Is(err, ErrKeyNotExist) { 328 t.Fatalf("Lookup should have failed with error, %v, instead error is %v", ErrKeyNotExist, err) 329 } 330 } 331 332 func TestMapClose(t *testing.T) { 333 m := createArray(t) 334 335 if err := m.Close(); err != nil { 336 t.Fatal("Can't close map:", err) 337 } 338 339 if err := m.Put(uint32(0), uint32(42)); !errors.Is(err, sys.ErrClosedFd) { 340 t.Fatal("Put doesn't check for closed fd", err) 341 } 342 343 if _, err := m.LookupBytes(uint32(0)); !errors.Is(err, sys.ErrClosedFd) { 344 t.Fatal("Get doesn't check for closed fd", err) 345 } 346 } 347 348 func TestBatchMapWithLock(t *testing.T) { 349 testutils.SkipOnOldKernel(t, "5.13", "MAP BATCH BPF_F_LOCK") 350 file := testutils.NativeFile(t, "testdata/map_spin_lock-%s.elf") 351 spec, err := LoadCollectionSpec(file) 352 if err != nil { 353 t.Fatal("Can't parse ELF:", err) 354 } 355 356 coll, err := NewCollection(spec) 357 if err != nil { 358 t.Fatal("Can't parse ELF:", err) 359 } 360 defer coll.Close() 361 362 type spinLockValue struct { 363 Cnt uint32 364 Padding uint32 365 } 366 367 m, ok := coll.Maps["spin_lock_map"] 368 if !ok { 369 t.Fatal(err) 370 } 371 372 keys := []uint32{0, 1} 373 values := []spinLockValue{{Cnt: 42}, {Cnt: 4242}} 374 count, err := m.BatchUpdate(keys, values, &BatchOptions{ElemFlags: uint64(UpdateLock)}) 375 if err != nil { 376 t.Fatalf("BatchUpdate: %v", err) 377 } 378 if count != len(keys) { 379 t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) 380 } 381 382 var cursor MapBatchCursor 383 lookupKeys := make([]uint32, 2) 384 lookupValues := make([]spinLockValue, 2) 385 count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, &BatchOptions{ElemFlags: uint64(LookupLock)}) 386 if !errors.Is(err, ErrKeyNotExist) { 387 t.Fatalf("BatchLookup: %v", err) 388 } 389 if count != 2 { 390 t.Fatalf("BatchLookup: expected two keys, got %d", count) 391 } 392 393 cursor = MapBatchCursor{} 394 deleteKeys := []uint32{0, 1} 395 deleteValues := make([]spinLockValue, 2) 396 count, err = m.BatchLookupAndDelete(&cursor, deleteKeys, deleteValues, nil) 397 if !errors.Is(err, ErrKeyNotExist) { 398 t.Fatalf("BatchLookupAndDelete: %v", err) 399 } 400 if count != 2 { 401 t.Fatalf("BatchLookupAndDelete: expected two keys, got %d", count) 402 } 403 } 404 405 func TestMapWithLock(t *testing.T) { 406 testutils.SkipOnOldKernel(t, "5.13", "MAP BPF_F_LOCK") 407 file := testutils.NativeFile(t, "testdata/map_spin_lock-%s.elf") 408 spec, err := LoadCollectionSpec(file) 409 if err != nil { 410 t.Fatal("Can't parse ELF:", err) 411 } 412 413 coll, err := NewCollection(spec) 414 if err != nil { 415 t.Fatal("Can't parse ELF:", err) 416 } 417 defer coll.Close() 418 419 type spinLockValue struct { 420 Cnt uint32 421 Padding uint32 422 } 423 424 m, ok := coll.Maps["spin_lock_map"] 425 if !ok { 426 t.Fatal(err) 427 } 428 429 key := uint32(1) 430 value := spinLockValue{Cnt: 5} 431 err = m.Update(key, value, UpdateLock) 432 if err != nil { 433 t.Fatal(err) 434 } 435 436 value.Cnt = 0 437 err = m.LookupWithFlags(&key, &value, LookupLock) 438 if err != nil { 439 t.Fatal(err) 440 } 441 if value.Cnt != 5 { 442 t.Fatalf("Want value 5, got %d", value.Cnt) 443 } 444 445 t.Run("LookupAndDelete", func(t *testing.T) { 446 testutils.SkipOnOldKernel(t, "5.14", "LOOKUP_AND_DELETE flags") 447 448 value.Cnt = 0 449 err = m.LookupAndDeleteWithFlags(&key, &value, LookupLock) 450 if err != nil { 451 t.Fatal(err) 452 } 453 if value.Cnt != 5 { 454 t.Fatalf("Want value 5, got %d", value.Cnt) 455 } 456 457 err = m.LookupWithFlags(&key, &value, LookupLock) 458 if err != nil && !errors.Is(err, ErrKeyNotExist) { 459 t.Fatal(err) 460 } 461 }) 462 } 463 464 func TestMapCloneNil(t *testing.T) { 465 m, err := (*Map)(nil).Clone() 466 if err != nil { 467 t.Fatal(err) 468 } 469 470 if m != nil { 471 t.Fatal("Cloning a nil map doesn't return nil") 472 } 473 } 474 475 func TestMapPin(t *testing.T) { 476 m := createArray(t) 477 478 if err := m.Put(uint32(0), uint32(42)); err != nil { 479 t.Fatal("Can't put:", err) 480 } 481 482 tmp := testutils.TempBPFFS(t) 483 path := filepath.Join(tmp, "map") 484 485 if err := m.Pin(path); err != nil { 486 testutils.SkipIfNotSupported(t, err) 487 t.Fatal(err) 488 } 489 490 pinned := m.IsPinned() 491 qt.Assert(t, qt.IsTrue(pinned)) 492 493 m.Close() 494 495 m, err := LoadPinnedMap(path, nil) 496 testutils.SkipIfNotSupported(t, err) 497 if err != nil { 498 t.Fatal(err) 499 } 500 defer m.Close() 501 502 var v uint32 503 if err := m.Lookup(uint32(0), &v); err != nil { 504 t.Fatal("Can't lookup 0:", err) 505 } 506 if v != 42 { 507 t.Error("Want value 42, got", v) 508 } 509 } 510 511 func TestNestedMapPin(t *testing.T) { 512 m, err := NewMap(&MapSpec{ 513 Type: ArrayOfMaps, 514 KeySize: 4, 515 ValueSize: 4, 516 MaxEntries: 2, 517 InnerMap: &MapSpec{ 518 Type: Array, 519 KeySize: 4, 520 ValueSize: 4, 521 MaxEntries: 1, 522 }, 523 }) 524 testutils.SkipIfNotSupported(t, err) 525 if err != nil { 526 t.Fatal(err) 527 } 528 defer m.Close() 529 530 tmp, err := os.MkdirTemp("/sys/fs/bpf", "ebpf-test") 531 if err != nil { 532 t.Fatal(err) 533 } 534 defer os.RemoveAll(tmp) 535 536 path := filepath.Join(tmp, "nested") 537 if err := m.Pin(path); err != nil { 538 t.Fatal(err) 539 } 540 m.Close() 541 542 m, err = LoadPinnedMap(path, nil) 543 testutils.SkipIfNotSupported(t, err) 544 if err != nil { 545 t.Fatal(err) 546 } 547 defer m.Close() 548 } 549 550 func TestNestedMapPinNested(t *testing.T) { 551 if _, err := NewMap(&MapSpec{ 552 Type: ArrayOfMaps, 553 KeySize: 4, 554 ValueSize: 4, 555 MaxEntries: 2, 556 InnerMap: &MapSpec{ 557 Name: "inner", 558 Type: Array, 559 KeySize: 4, 560 ValueSize: 4, 561 MaxEntries: 1, 562 Pinning: PinByName, 563 }, 564 }); err == nil { 565 t.Error("Inner maps should not be pinnable") 566 } 567 } 568 569 func TestMapPinMultiple(t *testing.T) { 570 testutils.SkipOnOldKernel(t, "4.9", "atomic re-pinning was introduced in 4.9 series") 571 572 tmp := testutils.TempBPFFS(t) 573 574 spec := spec1.Copy() 575 576 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 577 if err != nil { 578 t.Fatal("Can't create map:", err) 579 } 580 defer m1.Close() 581 pinned := m1.IsPinned() 582 qt.Assert(t, qt.IsTrue(pinned)) 583 584 newPath := filepath.Join(tmp, "bar") 585 err = m1.Pin(newPath) 586 testutils.SkipIfNotSupported(t, err) 587 qt.Assert(t, qt.IsNil(err)) 588 oldPath := filepath.Join(tmp, spec.Name) 589 if _, err := os.Stat(oldPath); err == nil { 590 t.Fatal("Previous pinned map path still exists:", err) 591 } 592 m2, err := LoadPinnedMap(newPath, nil) 593 qt.Assert(t, qt.IsNil(err)) 594 pinned = m2.IsPinned() 595 qt.Assert(t, qt.IsTrue(pinned)) 596 defer m2.Close() 597 } 598 599 func TestMapPinWithEmptyPath(t *testing.T) { 600 m := createArray(t) 601 602 err := m.Pin("") 603 604 qt.Assert(t, qt.Not(qt.IsNil(err))) 605 } 606 607 func TestMapPinFailReplace(t *testing.T) { 608 tmp := testutils.TempBPFFS(t) 609 spec := spec1.Copy() 610 spec2 := spec1.Copy() 611 spec2.Name = spec1.Name + "bar" 612 613 m, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 614 if err != nil { 615 t.Fatal("Failed to create map:", err) 616 } 617 defer m.Close() 618 m2, err := NewMapWithOptions(spec2, MapOptions{PinPath: tmp}) 619 if err != nil { 620 t.Fatal("Failed to create map2:", err) 621 } 622 defer m2.Close() 623 qt.Assert(t, qt.IsTrue(m.IsPinned())) 624 newPath := filepath.Join(tmp, spec2.Name) 625 626 qt.Assert(t, qt.Not(qt.IsNil(m.Pin(newPath))), qt.Commentf("Pin didn't"+ 627 " fail new path from replacing an existing path")) 628 } 629 630 func TestMapUnpin(t *testing.T) { 631 tmp := testutils.TempBPFFS(t) 632 spec := spec1.Copy() 633 634 m, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 635 if err != nil { 636 t.Fatal("Failed to create map:", err) 637 } 638 defer m.Close() 639 640 pinned := m.IsPinned() 641 qt.Assert(t, qt.IsTrue(pinned)) 642 path := filepath.Join(tmp, spec.Name) 643 m2, err := LoadPinnedMap(path, nil) 644 testutils.SkipIfNotSupported(t, err) 645 qt.Assert(t, qt.IsNil(err)) 646 defer m2.Close() 647 648 if err = m.Unpin(); err != nil { 649 t.Fatal("Failed to unpin map:", err) 650 } 651 if _, err := os.Stat(path); err == nil { 652 t.Fatal("Pinned map path still exists after unpinning:", err) 653 } 654 } 655 656 func TestMapLoadPinned(t *testing.T) { 657 tmp := testutils.TempBPFFS(t) 658 659 spec := spec1.Copy() 660 661 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 662 qt.Assert(t, qt.IsNil(err)) 663 defer m1.Close() 664 pinned := m1.IsPinned() 665 qt.Assert(t, qt.IsTrue(pinned)) 666 667 path := filepath.Join(tmp, spec.Name) 668 m2, err := LoadPinnedMap(path, nil) 669 testutils.SkipIfNotSupported(t, err) 670 qt.Assert(t, qt.IsNil(err)) 671 defer m2.Close() 672 pinned = m2.IsPinned() 673 qt.Assert(t, qt.IsTrue(pinned)) 674 } 675 676 func TestMapLoadReusePinned(t *testing.T) { 677 for _, typ := range []MapType{Array, Hash, DevMap, DevMapHash} { 678 t.Run(typ.String(), func(t *testing.T) { 679 if typ == DevMap { 680 testutils.SkipOnOldKernel(t, "4.14", "devmap") 681 } 682 if typ == DevMapHash { 683 testutils.SkipOnOldKernel(t, "5.4", "devmap_hash") 684 } 685 tmp := testutils.TempBPFFS(t) 686 spec := &MapSpec{ 687 Name: "pinmap", 688 Type: typ, 689 KeySize: 4, 690 ValueSize: 4, 691 MaxEntries: 1, 692 Pinning: PinByName, 693 } 694 695 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 696 qt.Assert(t, qt.IsNil(err)) 697 defer m1.Close() 698 699 m2, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 700 qt.Assert(t, qt.IsNil(err)) 701 defer m2.Close() 702 }) 703 } 704 } 705 706 func TestMapLoadPinnedUnpin(t *testing.T) { 707 tmp := testutils.TempBPFFS(t) 708 709 spec := spec1.Copy() 710 711 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 712 qt.Assert(t, qt.IsNil(err)) 713 defer m1.Close() 714 pinned := m1.IsPinned() 715 qt.Assert(t, qt.IsTrue(pinned)) 716 717 path := filepath.Join(tmp, spec.Name) 718 m2, err := LoadPinnedMap(path, nil) 719 testutils.SkipIfNotSupported(t, err) 720 qt.Assert(t, qt.IsNil(err)) 721 defer m2.Close() 722 err = m1.Unpin() 723 qt.Assert(t, qt.IsNil(err)) 724 err = m2.Unpin() 725 qt.Assert(t, qt.IsNil(err)) 726 } 727 728 func TestMapLoadPinnedWithOptions(t *testing.T) { 729 // Introduced in commit 6e71b04a8224. 730 testutils.SkipOnOldKernel(t, "4.15", "file_flags in BPF_OBJ_GET") 731 732 array := createArray(t) 733 734 tmp := testutils.TempBPFFS(t) 735 736 path := filepath.Join(tmp, "map") 737 if err := array.Pin(path); err != nil { 738 t.Fatal(err) 739 } 740 if err := array.Put(uint32(0), uint32(123)); err != nil { 741 t.Fatal(err) 742 } 743 array.Close() 744 745 t.Run("read-only", func(t *testing.T) { 746 array, err := LoadPinnedMap(path, &LoadPinOptions{ 747 ReadOnly: true, 748 }) 749 testutils.SkipIfNotSupported(t, err) 750 if err != nil { 751 t.Fatal("Can't load map:", err) 752 } 753 defer array.Close() 754 755 if err := array.Put(uint32(0), uint32(1)); !errors.Is(err, unix.EPERM) { 756 t.Fatal("Expected EPERM from Put, got", err) 757 } 758 }) 759 760 t.Run("write-only", func(t *testing.T) { 761 array, err := LoadPinnedMap(path, &LoadPinOptions{ 762 WriteOnly: true, 763 }) 764 testutils.SkipIfNotSupported(t, err) 765 if err != nil { 766 t.Fatal("Can't load map:", err) 767 } 768 defer array.Close() 769 770 var value uint32 771 if err := array.Lookup(uint32(0), &value); !errors.Is(err, unix.EPERM) { 772 t.Fatal("Expected EPERM from Lookup, got", err) 773 } 774 }) 775 } 776 777 func TestMapPinFlags(t *testing.T) { 778 tmp := testutils.TempBPFFS(t) 779 780 spec := &MapSpec{ 781 Name: "map", 782 Type: Array, 783 KeySize: 4, 784 ValueSize: 4, 785 MaxEntries: 1, 786 Pinning: PinByName, 787 } 788 789 m, err := NewMapWithOptions(spec, MapOptions{ 790 PinPath: tmp, 791 }) 792 qt.Assert(t, qt.IsNil(err)) 793 m.Close() 794 795 _, err = NewMapWithOptions(spec, MapOptions{ 796 PinPath: tmp, 797 LoadPinOptions: LoadPinOptions{ 798 Flags: math.MaxUint32, 799 }, 800 }) 801 if !errors.Is(err, unix.EINVAL) { 802 t.Fatal("Invalid flags should trigger EINVAL:", err) 803 } 804 } 805 806 func createArray(t *testing.T) *Map { 807 t.Helper() 808 809 m, err := NewMap(&MapSpec{ 810 Type: Array, 811 KeySize: 4, 812 ValueSize: 4, 813 MaxEntries: 2, 814 }) 815 if err != nil { 816 t.Fatal(err) 817 } 818 t.Cleanup(func() { m.Close() }) 819 return m 820 } 821 822 func TestMapQueue(t *testing.T) { 823 testutils.SkipOnOldKernel(t, "4.20", "map type queue") 824 825 m, err := NewMap(&MapSpec{ 826 Type: Queue, 827 ValueSize: 4, 828 MaxEntries: 2, 829 }) 830 if err != nil { 831 t.Fatal(err) 832 } 833 defer m.Close() 834 835 for _, v := range []uint32{42, 4242} { 836 if err := m.Put(nil, v); err != nil { 837 t.Fatalf("Can't put %d: %s", v, err) 838 } 839 } 840 841 var v uint32 842 if err := m.LookupAndDelete(nil, &v); err != nil { 843 t.Fatal("Can't lookup and delete element:", err) 844 } 845 if v != 42 { 846 t.Error("Want value 42, got", v) 847 } 848 849 v = 0 850 if err := m.LookupAndDelete(nil, unsafe.Pointer(&v)); err != nil { 851 t.Fatal("Can't lookup and delete element using unsafe.Pointer:", err) 852 } 853 if v != 4242 { 854 t.Error("Want value 4242, got", v) 855 } 856 857 if err := m.LookupAndDelete(nil, &v); !errors.Is(err, ErrKeyNotExist) { 858 t.Fatal("Lookup and delete on empty Queue:", err) 859 } 860 } 861 862 func TestMapInMap(t *testing.T) { 863 for _, typ := range []MapType{ArrayOfMaps, HashOfMaps} { 864 t.Run(typ.String(), func(t *testing.T) { 865 spec := &MapSpec{ 866 Type: typ, 867 KeySize: 4, 868 MaxEntries: 2, 869 InnerMap: &MapSpec{ 870 Type: Array, 871 KeySize: 4, 872 ValueSize: 4, 873 MaxEntries: 2, 874 }, 875 } 876 877 inner, err := NewMap(spec.InnerMap) 878 if err != nil { 879 t.Fatal(err) 880 } 881 if err := inner.Put(uint32(1), uint32(4242)); err != nil { 882 t.Fatal(err) 883 } 884 defer inner.Close() 885 886 outer, err := NewMap(spec) 887 testutils.SkipIfNotSupported(t, err) 888 if err != nil { 889 t.Fatal(err) 890 } 891 defer outer.Close() 892 893 if err := outer.Put(uint32(0), inner); err != nil { 894 t.Fatal("Can't put inner map:", err) 895 } 896 897 var inner2 *Map 898 if err := outer.Lookup(uint32(0), &inner2); err != nil { 899 t.Fatal("Can't lookup 0:", err) 900 } 901 defer inner2.Close() 902 903 var v uint32 904 if err := inner2.Lookup(uint32(1), &v); err != nil { 905 t.Fatal("Can't lookup 1 in inner2:", err) 906 } 907 908 if v != 4242 { 909 t.Error("Expected value 4242, got", v) 910 } 911 912 inner2.Close() 913 914 // Make sure we can still access the original map 915 if err := inner.Lookup(uint32(1), &v); err != nil { 916 t.Fatal("Can't lookup 1 in inner:", err) 917 } 918 919 if v != 4242 { 920 t.Error("Expected value 4242, got", v) 921 } 922 }) 923 } 924 } 925 926 func TestNewMapInMapFromFD(t *testing.T) { 927 nested, err := NewMap(&MapSpec{ 928 Type: ArrayOfMaps, 929 KeySize: 4, 930 MaxEntries: 2, 931 InnerMap: &MapSpec{ 932 Type: Array, 933 KeySize: 4, 934 ValueSize: 4, 935 MaxEntries: 2, 936 }, 937 }) 938 testutils.SkipIfNotSupported(t, err) 939 if err != nil { 940 t.Fatal(err) 941 } 942 defer nested.Close() 943 944 // Do not copy this, use Clone instead. 945 another, err := NewMapFromFD(dupFD(t, nested.FD())) 946 if err != nil { 947 t.Fatal("Can't create a new nested map from an FD") 948 } 949 another.Close() 950 } 951 952 func TestPerfEventArray(t *testing.T) { 953 specs := []*MapSpec{ 954 {Type: PerfEventArray}, 955 {Type: PerfEventArray, KeySize: 4}, 956 {Type: PerfEventArray, ValueSize: 4}, 957 } 958 959 for _, spec := range specs { 960 m, err := NewMap(spec) 961 if err != nil { 962 t.Errorf("Can't create perf event array from %v: %s", spec, err) 963 } else { 964 m.Close() 965 } 966 } 967 } 968 969 func createMapInMap(t *testing.T, typ MapType) *Map { 970 t.Helper() 971 972 spec := &MapSpec{ 973 Type: typ, 974 KeySize: 4, 975 MaxEntries: 2, 976 InnerMap: &MapSpec{ 977 Type: Array, 978 KeySize: 4, 979 ValueSize: 4, 980 MaxEntries: 2, 981 }, 982 } 983 984 m, err := NewMap(spec) 985 testutils.SkipIfNotSupported(t, err) 986 if err != nil { 987 t.Fatal(err) 988 } 989 return m 990 } 991 992 func TestMapInMapValueSize(t *testing.T) { 993 spec := &MapSpec{ 994 Type: ArrayOfMaps, 995 KeySize: 4, 996 ValueSize: 0, 997 MaxEntries: 2, 998 InnerMap: &MapSpec{ 999 Type: Array, 1000 KeySize: 4, 1001 ValueSize: 4, 1002 MaxEntries: 2, 1003 }, 1004 } 1005 1006 m, err := NewMap(spec) 1007 testutils.SkipIfNotSupported(t, err) 1008 if err != nil { 1009 t.Fatal(err) 1010 } 1011 m.Close() 1012 1013 spec.ValueSize = 4 1014 m, err = NewMap(spec) 1015 if err != nil { 1016 t.Fatal(err) 1017 } 1018 m.Close() 1019 1020 spec.ValueSize = 1 1021 if _, err := NewMap(spec); err == nil { 1022 t.Fatal("Expected an error") 1023 } 1024 } 1025 1026 func TestIterateEmptyMap(t *testing.T) { 1027 makeMap := func(t *testing.T, mapType MapType) *Map { 1028 m, err := NewMap(&MapSpec{ 1029 Type: mapType, 1030 KeySize: 4, 1031 ValueSize: 8, 1032 MaxEntries: 2, 1033 }) 1034 if errors.Is(err, unix.EINVAL) { 1035 t.Skip(mapType, "is not supported") 1036 } 1037 if err != nil { 1038 t.Fatal("Can't create map:", err) 1039 } 1040 t.Cleanup(func() { m.Close() }) 1041 return m 1042 } 1043 1044 for _, mapType := range []MapType{ 1045 Hash, 1046 SockHash, 1047 } { 1048 t.Run(mapType.String(), func(t *testing.T) { 1049 m := makeMap(t, mapType) 1050 entries := m.Iterate() 1051 1052 var key string 1053 var value uint64 1054 if entries.Next(&key, &value) != false { 1055 t.Error("Empty hash should not be iterable") 1056 } 1057 if err := entries.Err(); err != nil { 1058 t.Error("Empty hash shouldn't return an error:", err) 1059 } 1060 }) 1061 } 1062 1063 for _, mapType := range []MapType{ 1064 Array, 1065 SockMap, 1066 } { 1067 t.Run(mapType.String(), func(t *testing.T) { 1068 m := makeMap(t, mapType) 1069 entries := m.Iterate() 1070 var key string 1071 var value uint64 1072 for entries.Next(&key, &value) { 1073 // Some empty arrays like sockmap don't return any keys. 1074 } 1075 if err := entries.Err(); err != nil { 1076 t.Error("Empty array shouldn't return an error:", err) 1077 } 1078 }) 1079 } 1080 } 1081 1082 func TestMapIterate(t *testing.T) { 1083 hash, err := NewMap(&MapSpec{ 1084 Type: Hash, 1085 KeySize: 5, 1086 ValueSize: 4, 1087 MaxEntries: 2, 1088 }) 1089 if err != nil { 1090 t.Fatal(err) 1091 } 1092 defer hash.Close() 1093 1094 if err := hash.Put("hello", uint32(21)); err != nil { 1095 t.Fatal(err) 1096 } 1097 1098 if err := hash.Put("world", uint32(42)); err != nil { 1099 t.Fatal(err) 1100 } 1101 1102 var key string 1103 var value uint32 1104 var keys []string 1105 1106 entries := hash.Iterate() 1107 for entries.Next(&key, &value) { 1108 keys = append(keys, key) 1109 } 1110 1111 if err := entries.Err(); err != nil { 1112 t.Fatal(err) 1113 } 1114 1115 sort.Strings(keys) 1116 1117 if n := len(keys); n != 2 { 1118 t.Fatal("Expected to get 2 keys, have", n) 1119 } 1120 if keys[0] != "hello" { 1121 t.Error("Expected index 0 to be hello, got", keys[0]) 1122 } 1123 if keys[1] != "world" { 1124 t.Error("Expected index 1 to be hello, got", keys[1]) 1125 } 1126 } 1127 1128 func TestMapIteratorAllocations(t *testing.T) { 1129 arr, err := NewMap(&MapSpec{ 1130 Type: Array, 1131 KeySize: 4, 1132 ValueSize: 4, 1133 MaxEntries: 10, 1134 }) 1135 if err != nil { 1136 t.Fatal(err) 1137 } 1138 defer arr.Close() 1139 1140 var k, v uint32 1141 iter := arr.Iterate() 1142 1143 // AllocsPerRun warms up the function for us. 1144 allocs := testing.AllocsPerRun(int(arr.MaxEntries()-1), func() { 1145 if !iter.Next(&k, &v) { 1146 t.Fatal("Next failed") 1147 } 1148 }) 1149 1150 qt.Assert(t, qt.Equals(allocs, float64(0))) 1151 } 1152 1153 func TestMapBatchLookupAllocations(t *testing.T) { 1154 testutils.SkipIfNotSupported(t, haveBatchAPI()) 1155 1156 arr, err := NewMap(&MapSpec{ 1157 Type: Array, 1158 KeySize: 4, 1159 ValueSize: 4, 1160 MaxEntries: 10, 1161 }) 1162 if err != nil { 1163 t.Fatal(err) 1164 } 1165 defer arr.Close() 1166 1167 var cursor MapBatchCursor 1168 tmp := make([]uint32, 2) 1169 input := any(tmp) 1170 1171 // AllocsPerRun warms up the function for us. 1172 allocs := testing.AllocsPerRun(1, func() { 1173 _, err := arr.BatchLookup(&cursor, input, input, nil) 1174 if err != nil { 1175 t.Fatal(err) 1176 } 1177 }) 1178 1179 qt.Assert(t, qt.Equals(allocs, 0)) 1180 } 1181 1182 func TestMapIterateHashKeyOneByteFull(t *testing.T) { 1183 hash, err := NewMap(&MapSpec{ 1184 Type: Hash, 1185 KeySize: 1, 1186 ValueSize: 1, 1187 MaxEntries: 256, 1188 }) 1189 if err != nil { 1190 t.Fatal(err) 1191 } 1192 defer hash.Close() 1193 1194 for i := 0; i < int(hash.MaxEntries()); i++ { 1195 if err := hash.Put(uint8(i), uint8(i)); err != nil { 1196 t.Fatal(err) 1197 } 1198 } 1199 var key uint8 1200 var value uint8 1201 var keys int 1202 1203 entries := hash.Iterate() 1204 for entries.Next(&key, &value) { 1205 if key != value { 1206 t.Fatalf("Expected key == value, got key %v value %v", key, value) 1207 } 1208 keys++ 1209 } 1210 1211 if err := entries.Err(); err != nil { 1212 t.Fatal(err) 1213 } 1214 1215 if keys != int(hash.MaxEntries()) { 1216 t.Fatalf("Expected to get %d keys, have %d", hash.MaxEntries(), keys) 1217 } 1218 } 1219 1220 func TestMapGuessNonExistentKey(t *testing.T) { 1221 tests := []struct { 1222 name string 1223 mapType MapType 1224 keys []uint32 1225 }{ 1226 { 1227 "empty", Hash, []uint32{}, 1228 }, 1229 { 1230 "all zero key", Hash, []uint32{0}, 1231 }, 1232 { 1233 "all ones key", Hash, []uint32{math.MaxUint32}, 1234 }, 1235 { 1236 "alternating bits key", Hash, []uint32{0x5555_5555}, 1237 }, 1238 { 1239 "all special patterns", Hash, []uint32{0, math.MaxUint32, 0x5555_5555}, 1240 }, 1241 { 1242 "empty", Array, []uint32{}, 1243 }, 1244 { 1245 "all zero key", Array, []uint32{0}, 1246 }, 1247 { 1248 "full", Array, []uint32{0, 1}, 1249 }, 1250 } 1251 1252 for _, tt := range tests { 1253 t.Run(fmt.Sprintf("%s: %s", tt.mapType, tt.name), func(t *testing.T) { 1254 maxEntries := uint32(len(tt.keys)) 1255 if maxEntries == 0 { 1256 maxEntries = 1 1257 } 1258 1259 m, err := NewMap(&MapSpec{ 1260 Type: tt.mapType, 1261 KeySize: 4, 1262 ValueSize: 4, 1263 MaxEntries: maxEntries, 1264 }) 1265 if err != nil { 1266 t.Fatal(err) 1267 } 1268 defer m.Close() 1269 1270 for _, key := range tt.keys { 1271 if err := m.Put(key, key); err != nil { 1272 t.Fatal(err) 1273 } 1274 } 1275 1276 guess, err := m.guessNonExistentKey() 1277 if err != nil { 1278 t.Fatal(err) 1279 } 1280 1281 if len(guess) != int(m.keySize) { 1282 t.Fatal("Guessed key has wrong size") 1283 } 1284 1285 var value uint32 1286 if err := m.Lookup(guess, &value); !errors.Is(err, unix.ENOENT) { 1287 t.Fatal("Doesn't return ENOENT:", err) 1288 } 1289 }) 1290 } 1291 1292 t.Run("Hash: full", func(t *testing.T) { 1293 const n = math.MaxUint8 + 1 1294 1295 hash, err := NewMap(&MapSpec{ 1296 Type: Hash, 1297 KeySize: 1, 1298 ValueSize: 1, 1299 MaxEntries: n, 1300 }) 1301 if err != nil { 1302 t.Fatal(err) 1303 } 1304 defer hash.Close() 1305 1306 for i := 0; i < n; i++ { 1307 if err := hash.Put(uint8(i), uint8(i)); err != nil { 1308 t.Fatal(err) 1309 } 1310 } 1311 1312 _, err = hash.guessNonExistentKey() 1313 if err == nil { 1314 t.Fatal("guessNonExistentKey doesn't return error on full hash table") 1315 } 1316 }) 1317 } 1318 1319 func TestNotExist(t *testing.T) { 1320 hash := newHash(t) 1321 1322 var tmp uint32 1323 err := hash.Lookup("hello", &tmp) 1324 if !errors.Is(err, ErrKeyNotExist) { 1325 t.Error("Lookup doesn't return ErrKeyNotExist") 1326 } 1327 1328 buf, err := hash.LookupBytes("hello") 1329 if err != nil { 1330 t.Error("Looking up non-existent key return an error:", err) 1331 } 1332 if buf != nil { 1333 t.Error("LookupBytes returns non-nil buffer for non-existent key") 1334 } 1335 1336 if err := hash.Delete("hello"); !errors.Is(err, ErrKeyNotExist) { 1337 t.Error("Deleting unknown key doesn't return ErrKeyNotExist", err) 1338 } 1339 1340 var k = []byte{1, 2, 3, 4, 5} 1341 if err := hash.NextKey(&k, &tmp); !errors.Is(err, ErrKeyNotExist) { 1342 t.Error("Looking up next key in empty map doesn't return a non-existing error", err) 1343 } 1344 1345 if err := hash.NextKey(nil, &tmp); !errors.Is(err, ErrKeyNotExist) { 1346 t.Error("Looking up next key in empty map doesn't return a non-existing error", err) 1347 } 1348 } 1349 1350 func TestExist(t *testing.T) { 1351 hash := newHash(t) 1352 1353 if err := hash.Put("hello", uint32(21)); err != nil { 1354 t.Errorf("Failed to put key/value pair into hash: %v", err) 1355 } 1356 1357 if err := hash.Update("hello", uint32(42), UpdateNoExist); !errors.Is(err, ErrKeyExist) { 1358 t.Error("Updating existing key doesn't return ErrKeyExist") 1359 } 1360 } 1361 1362 func TestIterateMapInMap(t *testing.T) { 1363 const idx = uint32(1) 1364 1365 parent := createMapInMap(t, ArrayOfMaps) 1366 defer parent.Close() 1367 1368 a := createArray(t) 1369 1370 if err := parent.Put(idx, a); err != nil { 1371 t.Fatal(err) 1372 } 1373 1374 var ( 1375 key uint32 1376 m *Map 1377 entries = parent.Iterate() 1378 ) 1379 1380 if !entries.Next(&key, &m) { 1381 t.Fatal("Iterator encountered error:", entries.Err()) 1382 } 1383 m.Close() 1384 1385 if key != 1 { 1386 t.Error("Iterator didn't skip first entry") 1387 } 1388 1389 if m == nil { 1390 t.Fatal("Map is nil") 1391 } 1392 } 1393 1394 func TestPerCPUMarshaling(t *testing.T) { 1395 for _, typ := range []MapType{PerCPUHash, PerCPUArray, LRUCPUHash} { 1396 t.Run(typ.String(), func(t *testing.T) { 1397 numCPU := MustPossibleCPU() 1398 if numCPU < 2 { 1399 t.Skip("Test requires at least two CPUs") 1400 } 1401 if typ == PerCPUHash || typ == PerCPUArray { 1402 testutils.SkipOnOldKernel(t, "4.6", "per-CPU hash and array") 1403 } 1404 if typ == LRUCPUHash { 1405 testutils.SkipOnOldKernel(t, "4.10", "LRU per-CPU hash") 1406 } 1407 1408 arr, err := NewMap(&MapSpec{ 1409 Type: typ, 1410 KeySize: 4, 1411 ValueSize: 5, 1412 MaxEntries: 1, 1413 }) 1414 if err != nil { 1415 t.Fatal(err) 1416 } 1417 defer arr.Close() 1418 1419 values := []*customEncoding{ 1420 {"hello"}, 1421 {"world"}, 1422 } 1423 if err := arr.Put(uint32(0), values); err != nil { 1424 t.Fatal(err) 1425 } 1426 1427 // Make sure unmarshaling works on slices containing pointers 1428 retrievedVal := make([]*customEncoding, numCPU) 1429 if err := arr.Lookup(uint32(0), retrievedVal); err == nil { 1430 t.Fatal("Slices with nil values should generate error") 1431 } 1432 for i := range retrievedVal { 1433 retrievedVal[i] = &customEncoding{} 1434 } 1435 if err := arr.Lookup(uint32(0), retrievedVal); err != nil { 1436 t.Fatal("Can't retrieve key 0:", err) 1437 } 1438 var retrieved []*customEncoding 1439 if err := arr.Lookup(uint32(0), &retrieved); err != nil { 1440 t.Fatal("Can't retrieve key 0:", err) 1441 } 1442 1443 for i, want := range []string{"HELLO", "WORLD"} { 1444 if retrieved[i] == nil { 1445 t.Error("First item is nil") 1446 } else if have := retrieved[i].data; have != want { 1447 t.Errorf("Put doesn't use BinaryMarshaler, expected %s but got %s", want, have) 1448 } 1449 } 1450 1451 }) 1452 } 1453 } 1454 1455 type bpfCgroupStorageKey struct { 1456 CgroupInodeId uint64 1457 AttachType AttachType 1458 _ [4]byte // Padding 1459 } 1460 1461 func TestCgroupPerCPUStorageMarshaling(t *testing.T) { 1462 numCPU := MustPossibleCPU() 1463 if numCPU < 2 { 1464 t.Skip("Test requires at least two CPUs") 1465 } 1466 testutils.SkipOnOldKernel(t, "5.9", "per-CPU CGoup storage with write from user space support") 1467 1468 cgroup := testutils.CreateCgroup(t) 1469 1470 arr, err := NewMap(&MapSpec{ 1471 Type: PerCPUCGroupStorage, 1472 KeySize: uint32(unsafe.Sizeof(bpfCgroupStorageKey{})), 1473 ValueSize: uint32(unsafe.Sizeof(uint64(0))), 1474 }) 1475 if err != nil { 1476 t.Fatal(err) 1477 } 1478 t.Cleanup(func() { 1479 arr.Close() 1480 }) 1481 1482 prog, err := NewProgram(&ProgramSpec{ 1483 Type: CGroupSKB, 1484 AttachType: AttachCGroupInetEgress, 1485 License: "MIT", 1486 Instructions: asm.Instructions{ 1487 asm.LoadMapPtr(asm.R1, arr.FD()), 1488 asm.Mov.Imm(asm.R2, 0), 1489 asm.FnGetLocalStorage.Call(), 1490 asm.Mov.Imm(asm.R0, 0), 1491 asm.Return(), 1492 }, 1493 }) 1494 if err != nil { 1495 t.Fatal(err) 1496 } 1497 defer prog.Close() 1498 1499 progAttachAttrs := sys.ProgAttachAttr{ 1500 TargetFdOrIfindex: uint32(cgroup.Fd()), 1501 AttachBpfFd: uint32(prog.FD()), 1502 AttachType: uint32(AttachCGroupInetEgress), 1503 AttachFlags: 0, 1504 ReplaceBpfFd: 0, 1505 } 1506 err = sys.ProgAttach(&progAttachAttrs) 1507 if err != nil { 1508 t.Fatal(err) 1509 } 1510 defer func() { 1511 attr := sys.ProgDetachAttr{ 1512 TargetFdOrIfindex: uint32(cgroup.Fd()), 1513 AttachBpfFd: uint32(prog.FD()), 1514 AttachType: uint32(AttachCGroupInetEgress), 1515 } 1516 if err := sys.ProgDetach(&attr); err != nil { 1517 t.Fatal(err) 1518 } 1519 }() 1520 1521 var mapKey = &bpfCgroupStorageKey{ 1522 CgroupInodeId: testutils.GetCgroupIno(t, cgroup), 1523 AttachType: AttachCGroupInetEgress, 1524 } 1525 1526 values := []uint64{1, 2} 1527 if err := arr.Put(mapKey, values); err != nil { 1528 t.Fatalf("Can't set cgroup %s storage: %s", cgroup.Name(), err) 1529 } 1530 1531 var retrieved []uint64 1532 if err := arr.Lookup(mapKey, &retrieved); err != nil { 1533 t.Fatalf("Can't retrieve cgroup %s storage: %s", cgroup.Name(), err) 1534 } 1535 1536 for i, want := range []uint64{1, 2} { 1537 if retrieved[i] == 0 { 1538 t.Errorf("Item %d is 0", i) 1539 } else if have := retrieved[i]; have != want { 1540 t.Errorf("PerCPUCGroupStorage map is not correctly unmarshaled, expected %d but got %d", want, have) 1541 } 1542 } 1543 } 1544 1545 func TestMapMarshalUnsafe(t *testing.T) { 1546 m, err := NewMap(&MapSpec{ 1547 Type: Hash, 1548 KeySize: 4, 1549 ValueSize: 4, 1550 MaxEntries: 1, 1551 }) 1552 if err != nil { 1553 t.Fatal(err) 1554 } 1555 defer m.Close() 1556 1557 key := uint32(1) 1558 value := uint32(42) 1559 1560 if err := m.Put(unsafe.Pointer(&key), unsafe.Pointer(&value)); err != nil { 1561 t.Fatal(err) 1562 } 1563 1564 var res uint32 1565 if err := m.Lookup(unsafe.Pointer(&key), unsafe.Pointer(&res)); err != nil { 1566 t.Fatal("Can't get item:", err) 1567 } 1568 1569 var sum uint32 1570 iter := m.Iterate() 1571 for iter.Next(&key, unsafe.Pointer(&res)) { 1572 sum += res 1573 } 1574 if err := iter.Err(); err != nil { 1575 t.Fatal(err) 1576 } 1577 1578 if res != 42 { 1579 t.Fatalf("Expected 42, got %d", res) 1580 } 1581 1582 iter = m.Iterate() 1583 iter.Next(unsafe.Pointer(&key), &res) 1584 if err := iter.Err(); err != nil { 1585 t.Error(err) 1586 } 1587 if key != 1 { 1588 t.Errorf("Expected key 1, got %d", key) 1589 } 1590 1591 if err := m.Delete(unsafe.Pointer(&key)); err != nil { 1592 t.Fatal("Can't delete:", err) 1593 } 1594 } 1595 1596 func TestMapName(t *testing.T) { 1597 if err := haveObjName(); err != nil { 1598 t.Skip(err) 1599 } 1600 1601 m, err := NewMap(&MapSpec{ 1602 Name: "test", 1603 Type: Array, 1604 KeySize: 4, 1605 ValueSize: 4, 1606 MaxEntries: 1, 1607 }) 1608 if err != nil { 1609 t.Fatal(err) 1610 } 1611 defer m.Close() 1612 1613 var info sys.MapInfo 1614 if err := sys.ObjInfo(m.fd, &info); err != nil { 1615 t.Fatal(err) 1616 } 1617 1618 if name := unix.ByteSliceToString(info.Name[:]); name != "test" { 1619 t.Error("Expected name to be test, got", name) 1620 } 1621 } 1622 1623 func TestMapFromFD(t *testing.T) { 1624 m := createArray(t) 1625 1626 if err := m.Put(uint32(0), uint32(123)); err != nil { 1627 t.Fatal(err) 1628 } 1629 1630 // If you're thinking about copying this, don't. Use 1631 // Clone() instead. 1632 m2, err := NewMapFromFD(dupFD(t, m.FD())) 1633 testutils.SkipIfNotSupported(t, err) 1634 if err != nil { 1635 t.Fatal(err) 1636 } 1637 defer m2.Close() 1638 1639 var val uint32 1640 if err := m2.Lookup(uint32(0), &val); err != nil { 1641 t.Fatal("Can't look up key:", err) 1642 } 1643 1644 if val != 123 { 1645 t.Error("Wrong value") 1646 } 1647 } 1648 1649 func TestMapContents(t *testing.T) { 1650 spec := &MapSpec{ 1651 Type: Array, 1652 KeySize: 4, 1653 ValueSize: 4, 1654 MaxEntries: 2, 1655 Contents: []MapKV{ 1656 {uint32(0), uint32(23)}, 1657 {uint32(1), uint32(42)}, 1658 }, 1659 } 1660 1661 m, err := NewMap(spec) 1662 if err != nil { 1663 t.Fatal("Can't create map:", err) 1664 } 1665 defer m.Close() 1666 1667 var value uint32 1668 if err := m.Lookup(uint32(0), &value); err != nil { 1669 t.Error("Can't look up key 0:", err) 1670 } else if value != 23 { 1671 t.Errorf("Incorrect value for key 0, expected 23, have %d", value) 1672 } 1673 1674 if err := m.Lookup(uint32(1), &value); err != nil { 1675 t.Error("Can't look up key 1:", err) 1676 } else if value != 42 { 1677 t.Errorf("Incorrect value for key 0, expected 23, have %d", value) 1678 } 1679 1680 spec.Contents = []MapKV{ 1681 // Key is larger than MaxEntries 1682 {uint32(14), uint32(0)}, 1683 } 1684 1685 if _, err = NewMap(spec); err == nil { 1686 t.Error("Invalid contents should be rejected") 1687 } 1688 } 1689 1690 func TestMapFreeze(t *testing.T) { 1691 arr := createArray(t) 1692 1693 err := arr.Freeze() 1694 testutils.SkipIfNotSupported(t, err) 1695 1696 if err != nil { 1697 t.Fatal("Can't freeze map:", err) 1698 } 1699 1700 if err := arr.Put(uint32(0), uint32(1)); err == nil { 1701 t.Error("Freeze doesn't prevent modification from user space") 1702 } 1703 } 1704 1705 func TestMapGetNextID(t *testing.T) { 1706 testutils.SkipOnOldKernel(t, "4.13", "bpf_map_get_next_id") 1707 var next MapID 1708 var err error 1709 1710 // Ensure there is at least one map on the system. 1711 _ = newHash(t) 1712 1713 if next, err = MapGetNextID(MapID(0)); err != nil { 1714 t.Fatal("Can't get next ID:", err) 1715 } 1716 if next == MapID(0) { 1717 t.Fatal("Expected next ID other than 0") 1718 } 1719 1720 // As there can be multiple eBPF maps, we loop over all of them and 1721 // make sure, the IDs increase and the last call will return ErrNotExist 1722 for { 1723 last := next 1724 if next, err = MapGetNextID(last); err != nil { 1725 if !errors.Is(err, os.ErrNotExist) { 1726 t.Fatal("Expected ErrNotExist, got:", err) 1727 } 1728 break 1729 } 1730 if next <= last { 1731 t.Fatalf("Expected next ID (%d) to be higher than the last ID (%d)", next, last) 1732 } 1733 } 1734 } 1735 1736 func TestNewMapFromID(t *testing.T) { 1737 hash := newHash(t) 1738 1739 info, err := hash.Info() 1740 testutils.SkipIfNotSupported(t, err) 1741 if err != nil { 1742 t.Fatal("Couldn't get map info:", err) 1743 } 1744 1745 id, ok := info.ID() 1746 if !ok { 1747 t.Skip("Map ID not supported") 1748 } 1749 1750 hash2, err := NewMapFromID(id) 1751 if err != nil { 1752 t.Fatalf("Can't get map for ID %d: %v", id, err) 1753 } 1754 hash2.Close() 1755 1756 // As there can be multiple maps, we use max(uint32) as MapID to trigger an expected error. 1757 _, err = NewMapFromID(MapID(math.MaxUint32)) 1758 if !errors.Is(err, os.ErrNotExist) { 1759 t.Fatal("Expected ErrNotExist, got:", err) 1760 } 1761 } 1762 1763 func TestMapPinning(t *testing.T) { 1764 tmp := testutils.TempBPFFS(t) 1765 1766 spec := &MapSpec{ 1767 Name: "test", 1768 Type: Hash, 1769 KeySize: 4, 1770 ValueSize: 4, 1771 MaxEntries: 1, 1772 Pinning: PinByName, 1773 } 1774 1775 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 1776 if err != nil { 1777 t.Fatal("Can't create map:", err) 1778 } 1779 defer m1.Close() 1780 pinned := m1.IsPinned() 1781 qt.Assert(t, qt.IsTrue(pinned)) 1782 1783 m1Info, err := m1.Info() 1784 qt.Assert(t, qt.IsNil(err)) 1785 1786 if err := m1.Put(uint32(0), uint32(42)); err != nil { 1787 t.Fatal("Can't write value:", err) 1788 } 1789 1790 m2, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 1791 testutils.SkipIfNotSupported(t, err) 1792 if err != nil { 1793 t.Fatal("Can't create map:", err) 1794 } 1795 defer m2.Close() 1796 1797 m2Info, err := m2.Info() 1798 qt.Assert(t, qt.IsNil(err)) 1799 1800 if m1ID, ok := m1Info.ID(); ok { 1801 m2ID, _ := m2Info.ID() 1802 qt.Assert(t, qt.Equals(m2ID, m1ID)) 1803 } 1804 1805 var value uint32 1806 if err := m2.Lookup(uint32(0), &value); err != nil { 1807 t.Fatal("Can't read from map:", err) 1808 } 1809 1810 if value != 42 { 1811 t.Fatal("Pinning doesn't use pinned maps") 1812 } 1813 1814 spec.KeySize = 8 1815 spec.ValueSize = 8 1816 m3, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 1817 if err == nil { 1818 m3.Close() 1819 t.Fatalf("Opening a pinned map with a mismatching spec did not fail") 1820 } 1821 if !errors.Is(err, ErrMapIncompatible) { 1822 t.Fatalf("Opening a pinned map with a mismatching spec failed with the wrong error") 1823 } 1824 1825 // Check if error string mentions both KeySize and ValueSize. 1826 qt.Assert(t, qt.StringContains(err.Error(), "KeySize")) 1827 qt.Assert(t, qt.StringContains(err.Error(), "ValueSize")) 1828 } 1829 1830 func TestMapHandle(t *testing.T) { 1831 testutils.SkipOnOldKernel(t, "4.18", "btf_id in map info") 1832 1833 kv := &btf.Int{Size: 4} 1834 m, err := NewMap(&MapSpec{ 1835 Type: Hash, 1836 KeySize: kv.Size, 1837 ValueSize: kv.Size, 1838 Key: kv, 1839 Value: kv, 1840 MaxEntries: 1, 1841 }) 1842 qt.Assert(t, qt.IsNil(err)) 1843 defer m.Close() 1844 1845 h, err := m.Handle() 1846 qt.Assert(t, qt.IsNil(err)) 1847 qt.Assert(t, qt.IsNotNil(h)) 1848 defer h.Close() 1849 1850 spec, err := h.Spec(nil) 1851 qt.Assert(t, qt.IsNil(err)) 1852 1853 typ, err := spec.TypeByID(1) 1854 qt.Assert(t, qt.IsNil(err)) 1855 qt.Assert(t, qt.ContentEquals(typ, btf.Type(kv))) 1856 } 1857 1858 func TestPerfEventArrayCompatible(t *testing.T) { 1859 ms := &MapSpec{ 1860 Type: PerfEventArray, 1861 } 1862 1863 m, err := NewMap(ms) 1864 qt.Assert(t, qt.IsNil(err)) 1865 defer m.Close() 1866 1867 qt.Assert(t, qt.IsNil(ms.Compatible(m))) 1868 1869 ms.MaxEntries = m.MaxEntries() - 1 1870 qt.Assert(t, qt.IsNotNil(ms.Compatible(m))) 1871 } 1872 1873 type benchValue struct { 1874 ID uint32 1875 Val16 uint16 1876 Val16_2 uint16 1877 Name [8]byte 1878 LID uint64 1879 } 1880 1881 type customBenchValue benchValue 1882 1883 func (cbv *customBenchValue) UnmarshalBinary(buf []byte) error { 1884 cbv.ID = internal.NativeEndian.Uint32(buf) 1885 cbv.Val16 = internal.NativeEndian.Uint16(buf[4:]) 1886 cbv.Val16_2 = internal.NativeEndian.Uint16(buf[6:]) 1887 copy(cbv.Name[:], buf[8:]) 1888 cbv.LID = internal.NativeEndian.Uint64(buf[16:]) 1889 return nil 1890 } 1891 1892 func (cbv *customBenchValue) MarshalBinary() ([]byte, error) { 1893 buf := make([]byte, 24) 1894 internal.NativeEndian.PutUint32(buf, cbv.ID) 1895 internal.NativeEndian.PutUint16(buf[4:], cbv.Val16) 1896 internal.NativeEndian.PutUint16(buf[6:], cbv.Val16_2) 1897 copy(buf[8:], cbv.Name[:]) 1898 internal.NativeEndian.PutUint64(buf[16:], cbv.LID) 1899 return buf, nil 1900 } 1901 1902 type benchKey struct { 1903 id uint64 1904 } 1905 1906 func (bk *benchKey) MarshalBinary() ([]byte, error) { 1907 buf := make([]byte, 8) 1908 internal.NativeEndian.PutUint64(buf, bk.id) 1909 return buf, nil 1910 } 1911 1912 func BenchmarkMarshaling(b *testing.B) { 1913 newMap := func(valueSize uint32) *Map { 1914 m, err := NewMap(&MapSpec{ 1915 Type: Hash, 1916 KeySize: 8, 1917 ValueSize: valueSize, 1918 MaxEntries: 1, 1919 }) 1920 if err != nil { 1921 b.Fatal(err) 1922 } 1923 return m 1924 } 1925 1926 key := uint64(0) 1927 1928 m := newMap(24) 1929 if err := m.Put(key, benchValue{}); err != nil { 1930 b.Fatal(err) 1931 } 1932 b.Cleanup(func() { m.Close() }) 1933 1934 b.Run("ValueUnmarshalReflect", func(b *testing.B) { 1935 b.ReportAllocs() 1936 b.ResetTimer() 1937 1938 var value benchValue 1939 1940 for i := 0; i < b.N; i++ { 1941 err := m.Lookup(unsafe.Pointer(&key), &value) 1942 if err != nil { 1943 b.Fatal("Can't get key:", err) 1944 } 1945 } 1946 }) 1947 1948 b.Run("KeyMarshalReflect", func(b *testing.B) { 1949 b.ReportAllocs() 1950 b.ResetTimer() 1951 1952 var value benchValue 1953 1954 for i := 0; i < b.N; i++ { 1955 err := m.Lookup(&key, unsafe.Pointer(&value)) 1956 if err != nil { 1957 b.Fatal("Can't get key:", err) 1958 } 1959 } 1960 }) 1961 1962 b.Run("ValueBinaryUnmarshaler", func(b *testing.B) { 1963 b.ReportAllocs() 1964 b.ResetTimer() 1965 1966 var value customBenchValue 1967 1968 for i := 0; i < b.N; i++ { 1969 err := m.Lookup(unsafe.Pointer(&key), &value) 1970 if err != nil { 1971 b.Fatal("Can't get key:", err) 1972 } 1973 } 1974 }) 1975 1976 b.Run("KeyBinaryMarshaler", func(b *testing.B) { 1977 b.ReportAllocs() 1978 b.ResetTimer() 1979 1980 var key benchKey 1981 var value customBenchValue 1982 1983 for i := 0; i < b.N; i++ { 1984 err := m.Lookup(&key, unsafe.Pointer(&value)) 1985 if err != nil { 1986 b.Fatal("Can't get key:", err) 1987 } 1988 } 1989 }) 1990 1991 b.Run("KeyValueUnsafe", func(b *testing.B) { 1992 b.ReportAllocs() 1993 b.ResetTimer() 1994 1995 var value benchValue 1996 1997 for i := 0; i < b.N; i++ { 1998 err := m.Lookup(unsafe.Pointer(&key), unsafe.Pointer(&value)) 1999 if err != nil { 2000 b.Fatal("Can't get key:", err) 2001 } 2002 } 2003 }) 2004 } 2005 2006 func BenchmarkPerCPUMarshalling(b *testing.B) { 2007 key := uint64(1) 2008 val := make([]uint64, MustPossibleCPU()) 2009 for i := range val { 2010 val[i] = uint64(i) 2011 } 2012 2013 m, err := NewMap(&MapSpec{ 2014 Type: PerCPUHash, 2015 KeySize: 8, 2016 ValueSize: 8, 2017 MaxEntries: 1, 2018 }) 2019 if err != nil { 2020 b.Fatal(err) 2021 } 2022 2023 b.Cleanup(func() { m.Close() }) 2024 if err := m.Put(key, val[0:]); err != nil { 2025 b.Fatal(err) 2026 } 2027 2028 b.Run("reflection", func(b *testing.B) { 2029 b.ReportAllocs() 2030 b.ResetTimer() 2031 2032 var value []uint64 2033 2034 for i := 0; i < b.N; i++ { 2035 err := m.Lookup(unsafe.Pointer(&key), &value) 2036 if err != nil { 2037 b.Fatal("Can't get key:", err) 2038 } 2039 } 2040 }) 2041 } 2042 2043 func BenchmarkMap(b *testing.B) { 2044 m, err := NewMap(&MapSpec{ 2045 Type: Hash, 2046 KeySize: 4, 2047 ValueSize: 4, 2048 MaxEntries: 1, 2049 }) 2050 if err != nil { 2051 b.Fatal(err) 2052 } 2053 b.Cleanup(func() { m.Close() }) 2054 2055 if err := m.Put(uint32(0), uint32(42)); err != nil { 2056 b.Fatal(err) 2057 } 2058 2059 b.Run("Lookup", func(b *testing.B) { 2060 var key, value uint32 2061 2062 b.ReportAllocs() 2063 2064 for i := 0; i < b.N; i++ { 2065 err := m.Lookup(unsafe.Pointer(&key), unsafe.Pointer(&value)) 2066 if err != nil { 2067 b.Fatal(err) 2068 } 2069 } 2070 }) 2071 2072 b.Run("Update", func(b *testing.B) { 2073 var key, value uint32 2074 2075 b.ReportAllocs() 2076 2077 for i := 0; i < b.N; i++ { 2078 err := m.Update(unsafe.Pointer(&key), unsafe.Pointer(&value), UpdateAny) 2079 if err != nil { 2080 b.Fatal(err) 2081 } 2082 } 2083 }) 2084 2085 b.Run("NextKey", func(b *testing.B) { 2086 var key uint32 2087 2088 b.ReportAllocs() 2089 2090 for i := 0; i < b.N; i++ { 2091 err := m.NextKey(nil, unsafe.Pointer(&key)) 2092 if err != nil { 2093 b.Fatal(err) 2094 } 2095 } 2096 }) 2097 2098 b.Run("Delete", func(b *testing.B) { 2099 var key uint32 2100 2101 b.ReportAllocs() 2102 2103 for i := 0; i < b.N; i++ { 2104 err := m.Delete(unsafe.Pointer(&key)) 2105 if err != nil && !errors.Is(err, ErrKeyNotExist) { 2106 b.Fatal(err) 2107 } 2108 } 2109 }) 2110 } 2111 2112 func BenchmarkIterate(b *testing.B) { 2113 for _, mt := range []MapType{Hash, PerCPUHash} { 2114 m, err := NewMap(&MapSpec{ 2115 Type: mt, 2116 KeySize: 8, 2117 ValueSize: 8, 2118 MaxEntries: 1000, 2119 }) 2120 if err != nil { 2121 b.Fatal(err) 2122 } 2123 b.Cleanup(func() { 2124 m.Close() 2125 }) 2126 possibleCPU := 1 2127 if m.Type().hasPerCPUValue() { 2128 possibleCPU = MustPossibleCPU() 2129 } 2130 var ( 2131 n = m.MaxEntries() 2132 keys = make([]uint64, n) 2133 values = make([]uint64, n*uint32(possibleCPU)) 2134 ) 2135 2136 for i := 0; uint32(i) < n; i++ { 2137 keys[i] = uint64(i) 2138 for j := 0; j < possibleCPU; j++ { 2139 values[i] = uint64((i * possibleCPU) + j) 2140 } 2141 } 2142 2143 _, err = m.BatchUpdate(keys, values, nil) 2144 testutils.SkipIfNotSupported(b, err) 2145 qt.Assert(b, qt.IsNil(err)) 2146 2147 b.Run(m.Type().String(), func(b *testing.B) { 2148 b.Run("MapIterator", func(b *testing.B) { 2149 var k uint64 2150 v := make([]uint64, possibleCPU) 2151 2152 b.ReportAllocs() 2153 b.ResetTimer() 2154 2155 for i := 0; i < b.N; i++ { 2156 iter := m.Iterate() 2157 for iter.Next(&k, v) { 2158 continue 2159 } 2160 if err := iter.Err(); err != nil { 2161 b.Fatal(err) 2162 } 2163 } 2164 }) 2165 2166 b.Run("MapIteratorDelete", func(b *testing.B) { 2167 var k uint64 2168 v := make([]uint64, possibleCPU) 2169 2170 b.ReportAllocs() 2171 b.ResetTimer() 2172 2173 for i := 0; i < b.N; i++ { 2174 b.StopTimer() 2175 if _, err := m.BatchUpdate(keys, values, nil); err != nil { 2176 b.Fatal(err) 2177 } 2178 b.StartTimer() 2179 2180 iter := m.Iterate() 2181 for iter.Next(&k, &v) { 2182 if err := m.Delete(&k); err != nil { 2183 b.Fatal(err) 2184 } 2185 } 2186 if err := iter.Err(); err != nil { 2187 b.Fatal(err) 2188 } 2189 } 2190 }) 2191 2192 b.Run("BatchLookup", func(b *testing.B) { 2193 k := make([]uint64, m.MaxEntries()) 2194 v := make([]uint64, m.MaxEntries()*uint32(possibleCPU)) 2195 2196 b.ReportAllocs() 2197 b.ResetTimer() 2198 2199 for i := 0; i < b.N; i++ { 2200 var cursor MapBatchCursor 2201 for { 2202 _, err := m.BatchLookup(&cursor, k, v, nil) 2203 if errors.Is(err, ErrKeyNotExist) { 2204 break 2205 } 2206 if err != nil { 2207 b.Fatal(err) 2208 } 2209 } 2210 } 2211 }) 2212 2213 b.Run("BatchLookupAndDelete", func(b *testing.B) { 2214 k := make([]uint64, m.MaxEntries()) 2215 v := make([]uint64, m.MaxEntries()*uint32(possibleCPU)) 2216 2217 b.ReportAllocs() 2218 b.ResetTimer() 2219 2220 for i := 0; i < b.N; i++ { 2221 b.StopTimer() 2222 if _, err := m.BatchUpdate(keys, values, nil); err != nil { 2223 b.Fatal(err) 2224 } 2225 b.StartTimer() 2226 2227 var cursor MapBatchCursor 2228 for { 2229 _, err := m.BatchLookupAndDelete(&cursor, k, v, nil) 2230 if errors.Is(err, ErrKeyNotExist) { 2231 break 2232 } 2233 if err != nil { 2234 b.Fatal(err) 2235 } 2236 } 2237 } 2238 }) 2239 2240 b.Run("BatchDelete", func(b *testing.B) { 2241 b.ReportAllocs() 2242 b.ResetTimer() 2243 2244 for i := 0; i < b.N; i++ { 2245 b.StopTimer() 2246 if _, err := m.BatchUpdate(keys, values, nil); err != nil { 2247 b.Fatal(err) 2248 } 2249 b.StartTimer() 2250 2251 if _, err := m.BatchDelete(keys, nil); err != nil { 2252 b.Fatal(err) 2253 } 2254 } 2255 }) 2256 }) 2257 } 2258 } 2259 2260 // Per CPU maps store a distinct value for each CPU. They are useful 2261 // to collect metrics. 2262 func ExampleMap_perCPU() { 2263 arr, err := NewMap(&MapSpec{ 2264 Type: PerCPUArray, 2265 KeySize: 4, 2266 ValueSize: 4, 2267 MaxEntries: 2, 2268 }) 2269 if err != nil { 2270 panic(err) 2271 } 2272 defer arr.Close() 2273 2274 possibleCPUs := MustPossibleCPU() 2275 perCPUValues := map[uint32]uint32{ 2276 0: 4, 2277 1: 5, 2278 } 2279 2280 for k, v := range perCPUValues { 2281 // We set each perCPU slots to the same value. 2282 values := make([]uint32, possibleCPUs) 2283 for i := range values { 2284 values[i] = v 2285 } 2286 if err := arr.Put(k, values); err != nil { 2287 panic(err) 2288 } 2289 } 2290 2291 for k := 0; k < 2; k++ { 2292 var values []uint32 2293 if err := arr.Lookup(uint32(k), &values); err != nil { 2294 panic(err) 2295 } 2296 // Note we will print an unexpected message if this is not true. 2297 fmt.Printf("Value of key %v on all CPUs: %v\n", k, values[0]) 2298 } 2299 var ( 2300 key uint32 2301 entries = arr.Iterate() 2302 ) 2303 2304 var values []uint32 2305 for entries.Next(&key, &values) { 2306 expected, ok := perCPUValues[key] 2307 if !ok { 2308 fmt.Printf("Unexpected key %v\n", key) 2309 continue 2310 } 2311 2312 for i, n := range values { 2313 if n != expected { 2314 fmt.Printf("Key %v, Value for cpu %v is %v not %v\n", 2315 key, i, n, expected) 2316 } 2317 } 2318 } 2319 2320 if err := entries.Err(); err != nil { 2321 panic(err) 2322 } 2323 // Output: 2324 // Value of key 0 on all CPUs: 4 2325 // Value of key 1 on all CPUs: 5 2326 } 2327 2328 // It is possible to use unsafe.Pointer to avoid marshalling 2329 // and copy overhead. It is the responsibility of the caller to ensure 2330 // the correct size of unsafe.Pointers. 2331 // 2332 // Note that using unsafe.Pointer is only marginally faster than 2333 // implementing Marshaler on the type. 2334 func ExampleMap_zeroCopy() { 2335 hash, err := NewMap(&MapSpec{ 2336 Type: Hash, 2337 KeySize: 5, 2338 ValueSize: 4, 2339 MaxEntries: 10, 2340 }) 2341 if err != nil { 2342 panic(err) 2343 } 2344 defer hash.Close() 2345 2346 key := [5]byte{'h', 'e', 'l', 'l', 'o'} 2347 value := uint32(23) 2348 2349 if err := hash.Put(unsafe.Pointer(&key), unsafe.Pointer(&value)); err != nil { 2350 panic(err) 2351 } 2352 2353 value = 0 2354 if err := hash.Lookup(unsafe.Pointer(&key), unsafe.Pointer(&value)); err != nil { 2355 panic("can't get value:" + err.Error()) 2356 } 2357 2358 fmt.Printf("The value is: %d\n", value) 2359 // Output: The value is: 23 2360 } 2361 2362 func ExampleMap_NextKey() { 2363 hash, err := NewMap(&MapSpec{ 2364 Type: Hash, 2365 KeySize: 5, 2366 ValueSize: 4, 2367 MaxEntries: 10, 2368 Contents: []MapKV{ 2369 {"hello", uint32(21)}, 2370 {"world", uint32(42)}, 2371 }, 2372 }) 2373 if err != nil { 2374 panic(err) 2375 } 2376 defer hash.Close() 2377 2378 var cur, next string 2379 var keys []string 2380 2381 for err = hash.NextKey(nil, &next); ; err = hash.NextKey(cur, &next) { 2382 if errors.Is(err, ErrKeyNotExist) { 2383 break 2384 } 2385 if err != nil { 2386 panic(err) 2387 } 2388 keys = append(keys, next) 2389 cur = next 2390 } 2391 2392 // Order of keys is non-deterministic due to randomized map seed 2393 sort.Strings(keys) 2394 fmt.Printf("Keys are %v\n", keys) 2395 // Output: Keys are [hello world] 2396 } 2397 2398 // ExampleMap_Iterate demonstrates how to iterate over all entries 2399 // in a map. 2400 func ExampleMap_Iterate() { 2401 hash, err := NewMap(&MapSpec{ 2402 Type: Hash, 2403 KeySize: 5, 2404 ValueSize: 4, 2405 MaxEntries: 10, 2406 Contents: []MapKV{ 2407 {"hello", uint32(21)}, 2408 {"world", uint32(42)}, 2409 }, 2410 }) 2411 if err != nil { 2412 panic(err) 2413 } 2414 defer hash.Close() 2415 2416 var ( 2417 key string 2418 value uint32 2419 entries = hash.Iterate() 2420 ) 2421 2422 values := make(map[string]uint32) 2423 for entries.Next(&key, &value) { 2424 // Order of keys is non-deterministic due to randomized map seed 2425 values[key] = value 2426 } 2427 2428 if err := entries.Err(); err != nil { 2429 panic(fmt.Sprint("Iterator encountered an error:", err)) 2430 } 2431 2432 for k, v := range values { 2433 fmt.Printf("key: %s, value: %d\n", k, v) 2434 } 2435 2436 // Unordered output: 2437 // key: hello, value: 21 2438 // key: world, value: 42 2439 } 2440 2441 // It is possible to iterate nested maps and program arrays by 2442 // unmarshaling into a *Map or *Program. 2443 func ExampleMap_Iterate_nestedMapsAndProgramArrays() { 2444 inner := &MapSpec{ 2445 Type: Array, 2446 KeySize: 4, 2447 ValueSize: 4, 2448 MaxEntries: 2, 2449 Contents: []MapKV{ 2450 {uint32(0), uint32(1)}, 2451 {uint32(1), uint32(2)}, 2452 }, 2453 } 2454 im, err := NewMap(inner) 2455 if err != nil { 2456 panic(err) 2457 } 2458 defer im.Close() 2459 2460 outer := &MapSpec{ 2461 Type: ArrayOfMaps, 2462 InnerMap: inner, 2463 KeySize: 4, 2464 ValueSize: 4, 2465 MaxEntries: 10, 2466 Contents: []MapKV{ 2467 {uint32(0), im}, 2468 }, 2469 } 2470 arrayOfMaps, err := NewMap(outer) 2471 if errors.Is(err, internal.ErrNotSupported) { 2472 // Fake the output if on very old kernel. 2473 fmt.Println("outerKey: 0") 2474 fmt.Println("\tinnerKey 0 innerValue 1") 2475 fmt.Println("\tinnerKey 1 innerValue 2") 2476 return 2477 } 2478 if err != nil { 2479 panic(err) 2480 } 2481 defer arrayOfMaps.Close() 2482 2483 var ( 2484 key uint32 2485 m *Map 2486 entries = arrayOfMaps.Iterate() 2487 ) 2488 for entries.Next(&key, &m) { 2489 // Make sure that the iterated map is closed after 2490 // we are done. 2491 defer m.Close() 2492 2493 // Order of keys is non-deterministic due to randomized map seed 2494 fmt.Printf("outerKey: %v\n", key) 2495 2496 var innerKey, innerValue uint32 2497 items := m.Iterate() 2498 for items.Next(&innerKey, &innerValue) { 2499 fmt.Printf("\tinnerKey %v innerValue %v\n", innerKey, innerValue) 2500 } 2501 if err := items.Err(); err != nil { 2502 panic(fmt.Sprint("Inner Iterator encountered an error:", err)) 2503 } 2504 } 2505 2506 if err := entries.Err(); err != nil { 2507 panic(fmt.Sprint("Iterator encountered an error:", err)) 2508 } 2509 // Output: 2510 // outerKey: 0 2511 // innerKey 0 innerValue 1 2512 // innerKey 1 innerValue 2 2513 }