github.com/cilium/ebpf@v0.15.1-0.20240517100537-8079b37aa138/map_test.go (about) 1 package ebpf 2 3 import ( 4 "errors" 5 "fmt" 6 "math" 7 "os" 8 "path/filepath" 9 "sort" 10 "testing" 11 "unsafe" 12 13 "github.com/cilium/ebpf/asm" 14 "github.com/cilium/ebpf/internal" 15 "github.com/cilium/ebpf/internal/sys" 16 "github.com/cilium/ebpf/internal/testutils" 17 "github.com/cilium/ebpf/internal/unix" 18 19 "github.com/go-quicktest/qt" 20 ) 21 22 var ( 23 spec1 = &MapSpec{ 24 Name: "foo", 25 Type: Hash, 26 KeySize: 4, 27 ValueSize: 4, 28 MaxEntries: 1, 29 Pinning: PinByName, 30 } 31 ) 32 33 // newHash returns a new Map of type Hash. Cleanup is handled automatically. 34 func newHash(t *testing.T) *Map { 35 hash, err := NewMap(&MapSpec{ 36 Type: Hash, 37 KeySize: 5, 38 ValueSize: 4, 39 MaxEntries: 10, 40 }) 41 if err != nil { 42 t.Fatal(err) 43 } 44 t.Cleanup(func() { hash.Close() }) 45 return hash 46 } 47 48 func TestMap(t *testing.T) { 49 m := createArray(t) 50 51 t.Log(m) 52 53 if err := m.Put(uint32(0), uint32(42)); err != nil { 54 t.Fatal("Can't put:", err) 55 } 56 if err := m.Put(uint32(1), uint32(4242)); err != nil { 57 t.Fatal("Can't put:", err) 58 } 59 60 m2, err := m.Clone() 61 if err != nil { 62 t.Fatal("Can't clone map:", err) 63 } 64 defer m2.Close() 65 66 m.Close() 67 m = m2 68 69 var v uint32 70 if err := m.Lookup(uint32(0), &v); err != nil { 71 t.Fatal("Can't lookup 0:", err) 72 } 73 if v != 42 { 74 t.Error("Want value 42, got", v) 75 } 76 77 sliceVal := make([]uint32, 1) 78 qt.Assert(t, qt.IsNil(m.Lookup(uint32(0), sliceVal))) 79 qt.Assert(t, qt.DeepEquals(sliceVal, []uint32{42})) 80 81 var slice []byte 82 qt.Assert(t, qt.IsNil(m.Lookup(uint32(0), &slice))) 83 qt.Assert(t, qt.DeepEquals(slice, internal.NativeEndian.AppendUint32(nil, 42))) 84 85 var k uint32 86 if err := m.NextKey(uint32(0), &k); err != nil { 87 t.Fatal("Can't get:", err) 88 } 89 if k != 1 { 90 t.Error("Want key 1, got", k) 91 } 92 } 93 94 func TestMapBatch(t *testing.T) { 95 if err := haveBatchAPI(); err != nil { 96 t.Skipf("batch api not available: %v", err) 97 } 98 99 contents := map[uint32]uint32{ 100 0: 42, 1: 4242, 2: 23, 3: 2323, 101 } 102 mustNewMap := func(mapType MapType, max uint32) *Map { 103 m, err := NewMap(&MapSpec{ 104 Type: mapType, 105 KeySize: 4, 106 ValueSize: 4, 107 MaxEntries: max, 108 }) 109 if err != nil { 110 t.Fatal(err) 111 } 112 return m 113 } 114 115 var ( 116 // Make the map large enough to avoid ENOSPC. 117 hashMax uint32 = uint32(len(contents)) * 10 118 arrayMax uint32 = 4 119 ) 120 121 hash := mustNewMap(Hash, hashMax) 122 defer hash.Close() 123 124 array := mustNewMap(Array, arrayMax) 125 defer array.Close() 126 127 hashPerCpu := mustNewMap(PerCPUHash, hashMax) 128 defer hashPerCpu.Close() 129 130 arrayPerCpu := mustNewMap(PerCPUArray, arrayMax) 131 defer arrayPerCpu.Close() 132 133 for _, m := range []*Map{array, hash, arrayPerCpu, hashPerCpu} { 134 t.Run(m.Type().String(), func(t *testing.T) { 135 if m.Type() == PerCPUArray { 136 // https://lore.kernel.org/bpf/20210424214510.806627-2-pctammela@mojatatu.com/ 137 testutils.SkipOnOldKernel(t, "5.13", "batched ops support for percpu array") 138 } 139 possibleCPU := 1 140 if m.Type().hasPerCPUValue() { 141 possibleCPU = MustPossibleCPU() 142 } 143 var keys, values []uint32 144 for key, value := range contents { 145 keys = append(keys, key) 146 for i := 0; i < possibleCPU; i++ { 147 values = append(values, value*uint32((i+1))) 148 } 149 } 150 151 count, err := m.BatchUpdate(keys, values, nil) 152 qt.Assert(t, qt.IsNil(err)) 153 qt.Assert(t, qt.Equals(count, len(contents))) 154 155 n := len(contents) / 2 // cut buffer in half 156 lookupKeys := make([]uint32, n) 157 lookupValues := make([]uint32, n*possibleCPU) 158 159 var cursor MapBatchCursor 160 var total int 161 for { 162 count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, nil) 163 total += count 164 if errors.Is(err, ErrKeyNotExist) { 165 break 166 } 167 qt.Assert(t, qt.IsNil(err)) 168 169 qt.Assert(t, qt.IsTrue(count <= len(lookupKeys))) 170 for i, key := range lookupKeys[:count] { 171 for j := 0; j < possibleCPU; j++ { 172 value := lookupValues[i*possibleCPU+j] 173 expected := contents[key] * uint32(j+1) 174 qt.Assert(t, qt.Equals(value, expected), qt.Commentf("value for key %d should match", key)) 175 } 176 } 177 } 178 qt.Assert(t, qt.Equals(total, len(contents))) 179 180 if m.Type() == Array || m.Type() == PerCPUArray { 181 // Arrays don't support batch delete 182 return 183 } 184 185 cursor = MapBatchCursor{} 186 total = 0 187 for { 188 count, err = m.BatchLookupAndDelete(&cursor, lookupKeys, lookupValues, nil) 189 total += count 190 if errors.Is(err, ErrKeyNotExist) { 191 break 192 } 193 qt.Assert(t, qt.IsNil(err)) 194 195 qt.Assert(t, qt.IsTrue(count <= len(lookupKeys))) 196 for i, key := range lookupKeys[:count] { 197 for j := 0; j < possibleCPU; j++ { 198 value := lookupValues[i*possibleCPU+j] 199 expected := contents[key] * uint32(j+1) 200 qt.Assert(t, qt.Equals(value, expected), qt.Commentf("value for key %d should match", key)) 201 } 202 } 203 } 204 qt.Assert(t, qt.Equals(total, len(contents))) 205 206 if possibleCPU > 1 { 207 values := make([]uint32, possibleCPU) 208 qt.Assert(t, qt.ErrorIs(m.Lookup(uint32(0), values), ErrKeyNotExist)) 209 } else { 210 var v uint32 211 qt.Assert(t, qt.ErrorIs(m.Lookup(uint32(0), &v), ErrKeyNotExist)) 212 } 213 }) 214 } 215 } 216 217 func TestMapBatchCursorReuse(t *testing.T) { 218 spec := &MapSpec{ 219 Type: Array, 220 KeySize: 4, 221 ValueSize: 4, 222 MaxEntries: 4, 223 } 224 225 arr1, err := NewMap(spec) 226 if err != nil { 227 t.Fatal(err) 228 } 229 defer arr1.Close() 230 231 arr2, err := NewMap(spec) 232 if err != nil { 233 t.Fatal(err) 234 } 235 defer arr2.Close() 236 237 tmp := make([]uint32, 2) 238 239 var cursor MapBatchCursor 240 _, err = arr1.BatchLookup(&cursor, tmp, tmp, nil) 241 testutils.SkipIfNotSupported(t, err) 242 qt.Assert(t, qt.IsNil(err)) 243 244 _, err = arr2.BatchLookup(&cursor, tmp, tmp, nil) 245 qt.Assert(t, qt.IsNotNil(err)) 246 } 247 248 func TestMapLookupKeyTooSmall(t *testing.T) { 249 m := createArray(t) 250 defer m.Close() 251 252 var small uint16 253 qt.Assert(t, qt.IsNil(m.Put(uint32(0), uint32(1234)))) 254 qt.Assert(t, qt.IsNotNil(m.Lookup(uint32(0), &small))) 255 } 256 257 func TestBatchAPIMapDelete(t *testing.T) { 258 if err := haveBatchAPI(); err != nil { 259 t.Skipf("batch api not available: %v", err) 260 } 261 m, err := NewMap(&MapSpec{ 262 Type: Hash, 263 KeySize: 4, 264 ValueSize: 4, 265 MaxEntries: 10, 266 }) 267 if err != nil { 268 t.Fatal(err) 269 } 270 defer m.Close() 271 272 var ( 273 keys = []uint32{0, 1} 274 values = []uint32{42, 4242} 275 ) 276 277 count, err := m.BatchUpdate(keys, values, nil) 278 if err != nil { 279 t.Fatalf("BatchUpdate: %v", err) 280 } 281 if count != len(keys) { 282 t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) 283 } 284 285 var v uint32 286 if err := m.Lookup(uint32(0), &v); err != nil { 287 t.Fatal("Can't lookup 0:", err) 288 } 289 if v != 42 { 290 t.Error("Want value 42, got", v) 291 } 292 293 count, err = m.BatchDelete(keys, nil) 294 if err != nil { 295 t.Fatalf("BatchDelete: %v", err) 296 } 297 if count != len(keys) { 298 t.Fatalf("BatchDelete: expected %d deletions got %d", len(keys), count) 299 } 300 301 if err := m.Lookup(uint32(0), &v); !errors.Is(err, ErrKeyNotExist) { 302 t.Fatalf("Lookup should have failed with error, %v, instead error is %v", ErrKeyNotExist, err) 303 } 304 } 305 306 func TestMapClose(t *testing.T) { 307 m := createArray(t) 308 309 if err := m.Close(); err != nil { 310 t.Fatal("Can't close map:", err) 311 } 312 313 if err := m.Put(uint32(0), uint32(42)); !errors.Is(err, sys.ErrClosedFd) { 314 t.Fatal("Put doesn't check for closed fd", err) 315 } 316 317 if _, err := m.LookupBytes(uint32(0)); !errors.Is(err, sys.ErrClosedFd) { 318 t.Fatal("Get doesn't check for closed fd", err) 319 } 320 } 321 322 func TestBatchMapWithLock(t *testing.T) { 323 testutils.SkipOnOldKernel(t, "5.13", "MAP BATCH BPF_F_LOCK") 324 file := testutils.NativeFile(t, "testdata/map_spin_lock-%s.elf") 325 spec, err := LoadCollectionSpec(file) 326 if err != nil { 327 t.Fatal("Can't parse ELF:", err) 328 } 329 330 coll, err := NewCollection(spec) 331 if err != nil { 332 t.Fatal("Can't parse ELF:", err) 333 } 334 defer coll.Close() 335 336 type spinLockValue struct { 337 Cnt uint32 338 Padding uint32 339 } 340 341 m, ok := coll.Maps["spin_lock_map"] 342 if !ok { 343 t.Fatal(err) 344 } 345 346 keys := []uint32{0, 1} 347 values := []spinLockValue{{Cnt: 42}, {Cnt: 4242}} 348 count, err := m.BatchUpdate(keys, values, &BatchOptions{ElemFlags: uint64(UpdateLock)}) 349 if err != nil { 350 t.Fatalf("BatchUpdate: %v", err) 351 } 352 if count != len(keys) { 353 t.Fatalf("BatchUpdate: expected count, %d, to be %d", count, len(keys)) 354 } 355 356 var cursor MapBatchCursor 357 lookupKeys := make([]uint32, 2) 358 lookupValues := make([]spinLockValue, 2) 359 count, err = m.BatchLookup(&cursor, lookupKeys, lookupValues, &BatchOptions{ElemFlags: uint64(LookupLock)}) 360 if !errors.Is(err, ErrKeyNotExist) { 361 t.Fatalf("BatchLookup: %v", err) 362 } 363 if count != 2 { 364 t.Fatalf("BatchLookup: expected two keys, got %d", count) 365 } 366 367 cursor = MapBatchCursor{} 368 deleteKeys := []uint32{0, 1} 369 deleteValues := make([]spinLockValue, 2) 370 count, err = m.BatchLookupAndDelete(&cursor, deleteKeys, deleteValues, nil) 371 if !errors.Is(err, ErrKeyNotExist) { 372 t.Fatalf("BatchLookupAndDelete: %v", err) 373 } 374 if count != 2 { 375 t.Fatalf("BatchLookupAndDelete: expected two keys, got %d", count) 376 } 377 } 378 379 func TestMapWithLock(t *testing.T) { 380 testutils.SkipOnOldKernel(t, "5.13", "MAP BPF_F_LOCK") 381 file := testutils.NativeFile(t, "testdata/map_spin_lock-%s.elf") 382 spec, err := LoadCollectionSpec(file) 383 if err != nil { 384 t.Fatal("Can't parse ELF:", err) 385 } 386 387 coll, err := NewCollection(spec) 388 if err != nil { 389 t.Fatal("Can't parse ELF:", err) 390 } 391 defer coll.Close() 392 393 type spinLockValue struct { 394 Cnt uint32 395 Padding uint32 396 } 397 398 m, ok := coll.Maps["spin_lock_map"] 399 if !ok { 400 t.Fatal(err) 401 } 402 403 key := uint32(1) 404 value := spinLockValue{Cnt: 5} 405 err = m.Update(key, value, UpdateLock) 406 if err != nil { 407 t.Fatal(err) 408 } 409 410 value.Cnt = 0 411 err = m.LookupWithFlags(&key, &value, LookupLock) 412 if err != nil { 413 t.Fatal(err) 414 } 415 if value.Cnt != 5 { 416 t.Fatalf("Want value 5, got %d", value.Cnt) 417 } 418 419 t.Run("LookupAndDelete", func(t *testing.T) { 420 testutils.SkipOnOldKernel(t, "5.14", "LOOKUP_AND_DELETE flags") 421 422 value.Cnt = 0 423 err = m.LookupAndDeleteWithFlags(&key, &value, LookupLock) 424 if err != nil { 425 t.Fatal(err) 426 } 427 if value.Cnt != 5 { 428 t.Fatalf("Want value 5, got %d", value.Cnt) 429 } 430 431 err = m.LookupWithFlags(&key, &value, LookupLock) 432 if err != nil && !errors.Is(err, ErrKeyNotExist) { 433 t.Fatal(err) 434 } 435 }) 436 } 437 438 func TestMapCloneNil(t *testing.T) { 439 m, err := (*Map)(nil).Clone() 440 if err != nil { 441 t.Fatal(err) 442 } 443 444 if m != nil { 445 t.Fatal("Cloning a nil map doesn't return nil") 446 } 447 } 448 449 func TestMapPin(t *testing.T) { 450 m := createArray(t) 451 452 if err := m.Put(uint32(0), uint32(42)); err != nil { 453 t.Fatal("Can't put:", err) 454 } 455 456 tmp := testutils.TempBPFFS(t) 457 path := filepath.Join(tmp, "map") 458 459 if err := m.Pin(path); err != nil { 460 testutils.SkipIfNotSupported(t, err) 461 t.Fatal(err) 462 } 463 464 pinned := m.IsPinned() 465 qt.Assert(t, qt.IsTrue(pinned)) 466 467 m.Close() 468 469 m, err := LoadPinnedMap(path, nil) 470 testutils.SkipIfNotSupported(t, err) 471 if err != nil { 472 t.Fatal(err) 473 } 474 defer m.Close() 475 476 var v uint32 477 if err := m.Lookup(uint32(0), &v); err != nil { 478 t.Fatal("Can't lookup 0:", err) 479 } 480 if v != 42 { 481 t.Error("Want value 42, got", v) 482 } 483 } 484 485 func TestNestedMapPin(t *testing.T) { 486 m, err := NewMap(&MapSpec{ 487 Type: ArrayOfMaps, 488 KeySize: 4, 489 ValueSize: 4, 490 MaxEntries: 2, 491 InnerMap: &MapSpec{ 492 Type: Array, 493 KeySize: 4, 494 ValueSize: 4, 495 MaxEntries: 1, 496 }, 497 }) 498 testutils.SkipIfNotSupported(t, err) 499 if err != nil { 500 t.Fatal(err) 501 } 502 defer m.Close() 503 504 tmp, err := os.MkdirTemp("/sys/fs/bpf", "ebpf-test") 505 if err != nil { 506 t.Fatal(err) 507 } 508 defer os.RemoveAll(tmp) 509 510 path := filepath.Join(tmp, "nested") 511 if err := m.Pin(path); err != nil { 512 t.Fatal(err) 513 } 514 m.Close() 515 516 m, err = LoadPinnedMap(path, nil) 517 testutils.SkipIfNotSupported(t, err) 518 if err != nil { 519 t.Fatal(err) 520 } 521 defer m.Close() 522 } 523 524 func TestNestedMapPinNested(t *testing.T) { 525 if _, err := NewMap(&MapSpec{ 526 Type: ArrayOfMaps, 527 KeySize: 4, 528 ValueSize: 4, 529 MaxEntries: 2, 530 InnerMap: &MapSpec{ 531 Name: "inner", 532 Type: Array, 533 KeySize: 4, 534 ValueSize: 4, 535 MaxEntries: 1, 536 Pinning: PinByName, 537 }, 538 }); err == nil { 539 t.Error("Inner maps should not be pinnable") 540 } 541 } 542 543 func TestMapPinMultiple(t *testing.T) { 544 testutils.SkipOnOldKernel(t, "4.9", "atomic re-pinning was introduced in 4.9 series") 545 546 tmp := testutils.TempBPFFS(t) 547 548 spec := spec1.Copy() 549 550 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 551 if err != nil { 552 t.Fatal("Can't create map:", err) 553 } 554 defer m1.Close() 555 pinned := m1.IsPinned() 556 qt.Assert(t, qt.IsTrue(pinned)) 557 558 newPath := filepath.Join(tmp, "bar") 559 err = m1.Pin(newPath) 560 testutils.SkipIfNotSupported(t, err) 561 qt.Assert(t, qt.IsNil(err)) 562 oldPath := filepath.Join(tmp, spec.Name) 563 if _, err := os.Stat(oldPath); err == nil { 564 t.Fatal("Previous pinned map path still exists:", err) 565 } 566 m2, err := LoadPinnedMap(newPath, nil) 567 qt.Assert(t, qt.IsNil(err)) 568 pinned = m2.IsPinned() 569 qt.Assert(t, qt.IsTrue(pinned)) 570 defer m2.Close() 571 } 572 573 func TestMapPinWithEmptyPath(t *testing.T) { 574 m := createArray(t) 575 576 err := m.Pin("") 577 578 qt.Assert(t, qt.Not(qt.IsNil(err))) 579 } 580 581 func TestMapPinFailReplace(t *testing.T) { 582 tmp := testutils.TempBPFFS(t) 583 spec := spec1.Copy() 584 spec2 := spec1.Copy() 585 spec2.Name = spec1.Name + "bar" 586 587 m, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 588 if err != nil { 589 t.Fatal("Failed to create map:", err) 590 } 591 defer m.Close() 592 m2, err := NewMapWithOptions(spec2, MapOptions{PinPath: tmp}) 593 if err != nil { 594 t.Fatal("Failed to create map2:", err) 595 } 596 defer m2.Close() 597 qt.Assert(t, qt.IsTrue(m.IsPinned())) 598 newPath := filepath.Join(tmp, spec2.Name) 599 600 qt.Assert(t, qt.Not(qt.IsNil(m.Pin(newPath))), qt.Commentf("Pin didn't"+ 601 " fail new path from replacing an existing path")) 602 } 603 604 func TestMapUnpin(t *testing.T) { 605 tmp := testutils.TempBPFFS(t) 606 spec := spec1.Copy() 607 608 m, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 609 if err != nil { 610 t.Fatal("Failed to create map:", err) 611 } 612 defer m.Close() 613 614 pinned := m.IsPinned() 615 qt.Assert(t, qt.IsTrue(pinned)) 616 path := filepath.Join(tmp, spec.Name) 617 m2, err := LoadPinnedMap(path, nil) 618 testutils.SkipIfNotSupported(t, err) 619 qt.Assert(t, qt.IsNil(err)) 620 defer m2.Close() 621 622 if err = m.Unpin(); err != nil { 623 t.Fatal("Failed to unpin map:", err) 624 } 625 if _, err := os.Stat(path); err == nil { 626 t.Fatal("Pinned map path still exists after unpinning:", err) 627 } 628 } 629 630 func TestMapLoadPinned(t *testing.T) { 631 tmp := testutils.TempBPFFS(t) 632 633 spec := spec1.Copy() 634 635 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 636 qt.Assert(t, qt.IsNil(err)) 637 defer m1.Close() 638 pinned := m1.IsPinned() 639 qt.Assert(t, qt.IsTrue(pinned)) 640 641 path := filepath.Join(tmp, spec.Name) 642 m2, err := LoadPinnedMap(path, nil) 643 testutils.SkipIfNotSupported(t, err) 644 qt.Assert(t, qt.IsNil(err)) 645 defer m2.Close() 646 pinned = m2.IsPinned() 647 qt.Assert(t, qt.IsTrue(pinned)) 648 } 649 650 func TestMapLoadReusePinned(t *testing.T) { 651 for _, typ := range []MapType{Array, Hash, DevMap, DevMapHash} { 652 t.Run(typ.String(), func(t *testing.T) { 653 if typ == DevMap { 654 testutils.SkipOnOldKernel(t, "4.14", "devmap") 655 } 656 if typ == DevMapHash { 657 testutils.SkipOnOldKernel(t, "5.4", "devmap_hash") 658 } 659 tmp := testutils.TempBPFFS(t) 660 spec := &MapSpec{ 661 Name: "pinmap", 662 Type: typ, 663 KeySize: 4, 664 ValueSize: 4, 665 MaxEntries: 1, 666 Pinning: PinByName, 667 } 668 669 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 670 qt.Assert(t, qt.IsNil(err)) 671 defer m1.Close() 672 673 m2, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 674 qt.Assert(t, qt.IsNil(err)) 675 defer m2.Close() 676 }) 677 } 678 } 679 680 func TestMapLoadPinnedUnpin(t *testing.T) { 681 tmp := testutils.TempBPFFS(t) 682 683 spec := spec1.Copy() 684 685 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 686 qt.Assert(t, qt.IsNil(err)) 687 defer m1.Close() 688 pinned := m1.IsPinned() 689 qt.Assert(t, qt.IsTrue(pinned)) 690 691 path := filepath.Join(tmp, spec.Name) 692 m2, err := LoadPinnedMap(path, nil) 693 testutils.SkipIfNotSupported(t, err) 694 qt.Assert(t, qt.IsNil(err)) 695 defer m2.Close() 696 err = m1.Unpin() 697 qt.Assert(t, qt.IsNil(err)) 698 err = m2.Unpin() 699 qt.Assert(t, qt.IsNil(err)) 700 } 701 702 func TestMapLoadPinnedWithOptions(t *testing.T) { 703 // Introduced in commit 6e71b04a8224. 704 testutils.SkipOnOldKernel(t, "4.15", "file_flags in BPF_OBJ_GET") 705 706 array := createArray(t) 707 708 tmp := testutils.TempBPFFS(t) 709 710 path := filepath.Join(tmp, "map") 711 if err := array.Pin(path); err != nil { 712 t.Fatal(err) 713 } 714 if err := array.Put(uint32(0), uint32(123)); err != nil { 715 t.Fatal(err) 716 } 717 array.Close() 718 719 t.Run("read-only", func(t *testing.T) { 720 array, err := LoadPinnedMap(path, &LoadPinOptions{ 721 ReadOnly: true, 722 }) 723 testutils.SkipIfNotSupported(t, err) 724 if err != nil { 725 t.Fatal("Can't load map:", err) 726 } 727 defer array.Close() 728 729 if err := array.Put(uint32(0), uint32(1)); !errors.Is(err, unix.EPERM) { 730 t.Fatal("Expected EPERM from Put, got", err) 731 } 732 }) 733 734 t.Run("write-only", func(t *testing.T) { 735 array, err := LoadPinnedMap(path, &LoadPinOptions{ 736 WriteOnly: true, 737 }) 738 testutils.SkipIfNotSupported(t, err) 739 if err != nil { 740 t.Fatal("Can't load map:", err) 741 } 742 defer array.Close() 743 744 var value uint32 745 if err := array.Lookup(uint32(0), &value); !errors.Is(err, unix.EPERM) { 746 t.Fatal("Expected EPERM from Lookup, got", err) 747 } 748 }) 749 } 750 751 func TestMapPinFlags(t *testing.T) { 752 tmp := testutils.TempBPFFS(t) 753 754 spec := &MapSpec{ 755 Name: "map", 756 Type: Array, 757 KeySize: 4, 758 ValueSize: 4, 759 MaxEntries: 1, 760 Pinning: PinByName, 761 } 762 763 m, err := NewMapWithOptions(spec, MapOptions{ 764 PinPath: tmp, 765 }) 766 qt.Assert(t, qt.IsNil(err)) 767 m.Close() 768 769 _, err = NewMapWithOptions(spec, MapOptions{ 770 PinPath: tmp, 771 LoadPinOptions: LoadPinOptions{ 772 Flags: math.MaxUint32, 773 }, 774 }) 775 if !errors.Is(err, unix.EINVAL) { 776 t.Fatal("Invalid flags should trigger EINVAL:", err) 777 } 778 } 779 780 func createArray(t *testing.T) *Map { 781 t.Helper() 782 783 m, err := NewMap(&MapSpec{ 784 Type: Array, 785 KeySize: 4, 786 ValueSize: 4, 787 MaxEntries: 2, 788 }) 789 if err != nil { 790 t.Fatal(err) 791 } 792 t.Cleanup(func() { m.Close() }) 793 return m 794 } 795 796 func TestMapQueue(t *testing.T) { 797 testutils.SkipOnOldKernel(t, "4.20", "map type queue") 798 799 m, err := NewMap(&MapSpec{ 800 Type: Queue, 801 ValueSize: 4, 802 MaxEntries: 2, 803 }) 804 if err != nil { 805 t.Fatal(err) 806 } 807 defer m.Close() 808 809 for _, v := range []uint32{42, 4242} { 810 if err := m.Put(nil, v); err != nil { 811 t.Fatalf("Can't put %d: %s", v, err) 812 } 813 } 814 815 var v uint32 816 if err := m.LookupAndDelete(nil, &v); err != nil { 817 t.Fatal("Can't lookup and delete element:", err) 818 } 819 if v != 42 { 820 t.Error("Want value 42, got", v) 821 } 822 823 v = 0 824 if err := m.LookupAndDelete(nil, unsafe.Pointer(&v)); err != nil { 825 t.Fatal("Can't lookup and delete element using unsafe.Pointer:", err) 826 } 827 if v != 4242 { 828 t.Error("Want value 4242, got", v) 829 } 830 831 if err := m.LookupAndDelete(nil, &v); !errors.Is(err, ErrKeyNotExist) { 832 t.Fatal("Lookup and delete on empty Queue:", err) 833 } 834 } 835 836 func TestMapInMap(t *testing.T) { 837 for _, typ := range []MapType{ArrayOfMaps, HashOfMaps} { 838 t.Run(typ.String(), func(t *testing.T) { 839 spec := &MapSpec{ 840 Type: typ, 841 KeySize: 4, 842 MaxEntries: 2, 843 InnerMap: &MapSpec{ 844 Type: Array, 845 KeySize: 4, 846 ValueSize: 4, 847 MaxEntries: 2, 848 }, 849 } 850 851 inner, err := NewMap(spec.InnerMap) 852 if err != nil { 853 t.Fatal(err) 854 } 855 if err := inner.Put(uint32(1), uint32(4242)); err != nil { 856 t.Fatal(err) 857 } 858 defer inner.Close() 859 860 outer, err := NewMap(spec) 861 testutils.SkipIfNotSupported(t, err) 862 if err != nil { 863 t.Fatal(err) 864 } 865 defer outer.Close() 866 867 if err := outer.Put(uint32(0), inner); err != nil { 868 t.Fatal("Can't put inner map:", err) 869 } 870 871 var inner2 *Map 872 if err := outer.Lookup(uint32(0), &inner2); err != nil { 873 t.Fatal("Can't lookup 0:", err) 874 } 875 defer inner2.Close() 876 877 var v uint32 878 if err := inner2.Lookup(uint32(1), &v); err != nil { 879 t.Fatal("Can't lookup 1 in inner2:", err) 880 } 881 882 if v != 4242 { 883 t.Error("Expected value 4242, got", v) 884 } 885 886 inner2.Close() 887 888 // Make sure we can still access the original map 889 if err := inner.Lookup(uint32(1), &v); err != nil { 890 t.Fatal("Can't lookup 1 in inner:", err) 891 } 892 893 if v != 4242 { 894 t.Error("Expected value 4242, got", v) 895 } 896 }) 897 } 898 } 899 900 func TestNewMapInMapFromFD(t *testing.T) { 901 nested, err := NewMap(&MapSpec{ 902 Type: ArrayOfMaps, 903 KeySize: 4, 904 MaxEntries: 2, 905 InnerMap: &MapSpec{ 906 Type: Array, 907 KeySize: 4, 908 ValueSize: 4, 909 MaxEntries: 2, 910 }, 911 }) 912 testutils.SkipIfNotSupported(t, err) 913 if err != nil { 914 t.Fatal(err) 915 } 916 defer nested.Close() 917 918 // Do not copy this, use Clone instead. 919 another, err := NewMapFromFD(dupFD(t, nested.FD())) 920 if err != nil { 921 t.Fatal("Can't create a new nested map from an FD") 922 } 923 another.Close() 924 } 925 926 func TestPerfEventArray(t *testing.T) { 927 specs := []*MapSpec{ 928 {Type: PerfEventArray}, 929 {Type: PerfEventArray, KeySize: 4}, 930 {Type: PerfEventArray, ValueSize: 4}, 931 } 932 933 for _, spec := range specs { 934 m, err := NewMap(spec) 935 if err != nil { 936 t.Errorf("Can't create perf event array from %v: %s", spec, err) 937 } else { 938 m.Close() 939 } 940 } 941 } 942 943 func createMapInMap(t *testing.T, typ MapType) *Map { 944 t.Helper() 945 946 spec := &MapSpec{ 947 Type: typ, 948 KeySize: 4, 949 MaxEntries: 2, 950 InnerMap: &MapSpec{ 951 Type: Array, 952 KeySize: 4, 953 ValueSize: 4, 954 MaxEntries: 2, 955 }, 956 } 957 958 m, err := NewMap(spec) 959 testutils.SkipIfNotSupported(t, err) 960 if err != nil { 961 t.Fatal(err) 962 } 963 return m 964 } 965 966 func TestMapInMapValueSize(t *testing.T) { 967 spec := &MapSpec{ 968 Type: ArrayOfMaps, 969 KeySize: 4, 970 ValueSize: 0, 971 MaxEntries: 2, 972 InnerMap: &MapSpec{ 973 Type: Array, 974 KeySize: 4, 975 ValueSize: 4, 976 MaxEntries: 2, 977 }, 978 } 979 980 m, err := NewMap(spec) 981 testutils.SkipIfNotSupported(t, err) 982 if err != nil { 983 t.Fatal(err) 984 } 985 m.Close() 986 987 spec.ValueSize = 4 988 m, err = NewMap(spec) 989 if err != nil { 990 t.Fatal(err) 991 } 992 m.Close() 993 994 spec.ValueSize = 1 995 if _, err := NewMap(spec); err == nil { 996 t.Fatal("Expected an error") 997 } 998 } 999 1000 func TestIterateEmptyMap(t *testing.T) { 1001 makeMap := func(t *testing.T, mapType MapType) *Map { 1002 m, err := NewMap(&MapSpec{ 1003 Type: mapType, 1004 KeySize: 4, 1005 ValueSize: 8, 1006 MaxEntries: 2, 1007 }) 1008 if errors.Is(err, unix.EINVAL) { 1009 t.Skip(mapType, "is not supported") 1010 } 1011 if err != nil { 1012 t.Fatal("Can't create map:", err) 1013 } 1014 t.Cleanup(func() { m.Close() }) 1015 return m 1016 } 1017 1018 for _, mapType := range []MapType{ 1019 Hash, 1020 SockHash, 1021 } { 1022 t.Run(mapType.String(), func(t *testing.T) { 1023 m := makeMap(t, mapType) 1024 entries := m.Iterate() 1025 1026 var key string 1027 var value uint64 1028 if entries.Next(&key, &value) != false { 1029 t.Error("Empty hash should not be iterable") 1030 } 1031 if err := entries.Err(); err != nil { 1032 t.Error("Empty hash shouldn't return an error:", err) 1033 } 1034 }) 1035 } 1036 1037 for _, mapType := range []MapType{ 1038 Array, 1039 SockMap, 1040 } { 1041 t.Run(mapType.String(), func(t *testing.T) { 1042 m := makeMap(t, mapType) 1043 entries := m.Iterate() 1044 var key string 1045 var value uint64 1046 for entries.Next(&key, &value) { 1047 // Some empty arrays like sockmap don't return any keys. 1048 } 1049 if err := entries.Err(); err != nil { 1050 t.Error("Empty array shouldn't return an error:", err) 1051 } 1052 }) 1053 } 1054 } 1055 1056 func TestMapIterate(t *testing.T) { 1057 hash, err := NewMap(&MapSpec{ 1058 Type: Hash, 1059 KeySize: 5, 1060 ValueSize: 4, 1061 MaxEntries: 2, 1062 }) 1063 if err != nil { 1064 t.Fatal(err) 1065 } 1066 defer hash.Close() 1067 1068 if err := hash.Put("hello", uint32(21)); err != nil { 1069 t.Fatal(err) 1070 } 1071 1072 if err := hash.Put("world", uint32(42)); err != nil { 1073 t.Fatal(err) 1074 } 1075 1076 var key string 1077 var value uint32 1078 var keys []string 1079 1080 entries := hash.Iterate() 1081 for entries.Next(&key, &value) { 1082 keys = append(keys, key) 1083 } 1084 1085 if err := entries.Err(); err != nil { 1086 t.Fatal(err) 1087 } 1088 1089 sort.Strings(keys) 1090 1091 if n := len(keys); n != 2 { 1092 t.Fatal("Expected to get 2 keys, have", n) 1093 } 1094 if keys[0] != "hello" { 1095 t.Error("Expected index 0 to be hello, got", keys[0]) 1096 } 1097 if keys[1] != "world" { 1098 t.Error("Expected index 1 to be hello, got", keys[1]) 1099 } 1100 } 1101 1102 func TestMapIteratorAllocations(t *testing.T) { 1103 arr, err := NewMap(&MapSpec{ 1104 Type: Array, 1105 KeySize: 4, 1106 ValueSize: 4, 1107 MaxEntries: 10, 1108 }) 1109 if err != nil { 1110 t.Fatal(err) 1111 } 1112 defer arr.Close() 1113 1114 var k, v uint32 1115 iter := arr.Iterate() 1116 1117 // AllocsPerRun warms up the function for us. 1118 allocs := testing.AllocsPerRun(1, func() { 1119 if !iter.Next(&k, &v) { 1120 t.Fatal("Next failed") 1121 } 1122 }) 1123 1124 qt.Assert(t, qt.Equals(allocs, float64(0))) 1125 } 1126 1127 func TestMapBatchLookupAllocations(t *testing.T) { 1128 testutils.SkipIfNotSupported(t, haveBatchAPI()) 1129 1130 arr, err := NewMap(&MapSpec{ 1131 Type: Array, 1132 KeySize: 4, 1133 ValueSize: 4, 1134 MaxEntries: 10, 1135 }) 1136 if err != nil { 1137 t.Fatal(err) 1138 } 1139 defer arr.Close() 1140 1141 var cursor MapBatchCursor 1142 tmp := make([]uint32, 2) 1143 input := any(tmp) 1144 1145 // AllocsPerRun warms up the function for us. 1146 allocs := testing.AllocsPerRun(1, func() { 1147 _, err := arr.BatchLookup(&cursor, input, input, nil) 1148 if err != nil { 1149 t.Fatal(err) 1150 } 1151 }) 1152 1153 qt.Assert(t, qt.Equals(allocs, 0)) 1154 } 1155 1156 func TestMapIterateHashKeyOneByteFull(t *testing.T) { 1157 hash, err := NewMap(&MapSpec{ 1158 Type: Hash, 1159 KeySize: 1, 1160 ValueSize: 1, 1161 MaxEntries: 256, 1162 }) 1163 if err != nil { 1164 t.Fatal(err) 1165 } 1166 defer hash.Close() 1167 1168 for i := 0; i < int(hash.MaxEntries()); i++ { 1169 if err := hash.Put(uint8(i), uint8(i)); err != nil { 1170 t.Fatal(err) 1171 } 1172 } 1173 var key uint8 1174 var value uint8 1175 var keys int 1176 1177 entries := hash.Iterate() 1178 for entries.Next(&key, &value) { 1179 if key != value { 1180 t.Fatalf("Expected key == value, got key %v value %v", key, value) 1181 } 1182 keys++ 1183 } 1184 1185 if err := entries.Err(); err != nil { 1186 t.Fatal(err) 1187 } 1188 1189 if keys != int(hash.MaxEntries()) { 1190 t.Fatalf("Expected to get %d keys, have %d", hash.MaxEntries(), keys) 1191 } 1192 } 1193 1194 func TestMapGuessNonExistentKey(t *testing.T) { 1195 tests := []struct { 1196 name string 1197 mapType MapType 1198 keys []uint32 1199 }{ 1200 { 1201 "empty", Hash, []uint32{}, 1202 }, 1203 { 1204 "all zero key", Hash, []uint32{0}, 1205 }, 1206 { 1207 "all ones key", Hash, []uint32{math.MaxUint32}, 1208 }, 1209 { 1210 "alternating bits key", Hash, []uint32{0x5555_5555}, 1211 }, 1212 { 1213 "all special patterns", Hash, []uint32{0, math.MaxUint32, 0x5555_5555}, 1214 }, 1215 { 1216 "empty", Array, []uint32{}, 1217 }, 1218 { 1219 "all zero key", Array, []uint32{0}, 1220 }, 1221 { 1222 "full", Array, []uint32{0, 1}, 1223 }, 1224 } 1225 1226 for _, tt := range tests { 1227 t.Run(fmt.Sprintf("%s: %s", tt.mapType, tt.name), func(t *testing.T) { 1228 maxEntries := uint32(len(tt.keys)) 1229 if maxEntries == 0 { 1230 maxEntries = 1 1231 } 1232 1233 m, err := NewMap(&MapSpec{ 1234 Type: tt.mapType, 1235 KeySize: 4, 1236 ValueSize: 4, 1237 MaxEntries: maxEntries, 1238 }) 1239 if err != nil { 1240 t.Fatal(err) 1241 } 1242 defer m.Close() 1243 1244 for _, key := range tt.keys { 1245 if err := m.Put(key, key); err != nil { 1246 t.Fatal(err) 1247 } 1248 } 1249 1250 guess, err := m.guessNonExistentKey() 1251 if err != nil { 1252 t.Fatal(err) 1253 } 1254 1255 if len(guess) != int(m.keySize) { 1256 t.Fatal("Guessed key has wrong size") 1257 } 1258 1259 var value uint32 1260 if err := m.Lookup(guess, &value); !errors.Is(err, unix.ENOENT) { 1261 t.Fatal("Doesn't return ENOENT:", err) 1262 } 1263 }) 1264 } 1265 1266 t.Run("Hash: full", func(t *testing.T) { 1267 const n = math.MaxUint8 + 1 1268 1269 hash, err := NewMap(&MapSpec{ 1270 Type: Hash, 1271 KeySize: 1, 1272 ValueSize: 1, 1273 MaxEntries: n, 1274 }) 1275 if err != nil { 1276 t.Fatal(err) 1277 } 1278 defer hash.Close() 1279 1280 for i := 0; i < n; i++ { 1281 if err := hash.Put(uint8(i), uint8(i)); err != nil { 1282 t.Fatal(err) 1283 } 1284 } 1285 1286 _, err = hash.guessNonExistentKey() 1287 if err == nil { 1288 t.Fatal("guessNonExistentKey doesn't return error on full hash table") 1289 } 1290 }) 1291 } 1292 1293 func TestNotExist(t *testing.T) { 1294 hash := newHash(t) 1295 1296 var tmp uint32 1297 err := hash.Lookup("hello", &tmp) 1298 if !errors.Is(err, ErrKeyNotExist) { 1299 t.Error("Lookup doesn't return ErrKeyNotExist") 1300 } 1301 1302 buf, err := hash.LookupBytes("hello") 1303 if err != nil { 1304 t.Error("Looking up non-existent key return an error:", err) 1305 } 1306 if buf != nil { 1307 t.Error("LookupBytes returns non-nil buffer for non-existent key") 1308 } 1309 1310 if err := hash.Delete("hello"); !errors.Is(err, ErrKeyNotExist) { 1311 t.Error("Deleting unknown key doesn't return ErrKeyNotExist", err) 1312 } 1313 1314 var k = []byte{1, 2, 3, 4, 5} 1315 if err := hash.NextKey(&k, &tmp); !errors.Is(err, ErrKeyNotExist) { 1316 t.Error("Looking up next key in empty map doesn't return a non-existing error", err) 1317 } 1318 1319 if err := hash.NextKey(nil, &tmp); !errors.Is(err, ErrKeyNotExist) { 1320 t.Error("Looking up next key in empty map doesn't return a non-existing error", err) 1321 } 1322 } 1323 1324 func TestExist(t *testing.T) { 1325 hash := newHash(t) 1326 1327 if err := hash.Put("hello", uint32(21)); err != nil { 1328 t.Errorf("Failed to put key/value pair into hash: %v", err) 1329 } 1330 1331 if err := hash.Update("hello", uint32(42), UpdateNoExist); !errors.Is(err, ErrKeyExist) { 1332 t.Error("Updating existing key doesn't return ErrKeyExist") 1333 } 1334 } 1335 1336 func TestIterateMapInMap(t *testing.T) { 1337 const idx = uint32(1) 1338 1339 parent := createMapInMap(t, ArrayOfMaps) 1340 defer parent.Close() 1341 1342 a := createArray(t) 1343 1344 if err := parent.Put(idx, a); err != nil { 1345 t.Fatal(err) 1346 } 1347 1348 var ( 1349 key uint32 1350 m *Map 1351 entries = parent.Iterate() 1352 ) 1353 1354 if !entries.Next(&key, &m) { 1355 t.Fatal("Iterator encountered error:", entries.Err()) 1356 } 1357 m.Close() 1358 1359 if key != 1 { 1360 t.Error("Iterator didn't skip first entry") 1361 } 1362 1363 if m == nil { 1364 t.Fatal("Map is nil") 1365 } 1366 } 1367 1368 func TestPerCPUMarshaling(t *testing.T) { 1369 for _, typ := range []MapType{PerCPUHash, PerCPUArray, LRUCPUHash} { 1370 t.Run(typ.String(), func(t *testing.T) { 1371 numCPU := MustPossibleCPU() 1372 if numCPU < 2 { 1373 t.Skip("Test requires at least two CPUs") 1374 } 1375 if typ == PerCPUHash || typ == PerCPUArray { 1376 testutils.SkipOnOldKernel(t, "4.6", "per-CPU hash and array") 1377 } 1378 if typ == LRUCPUHash { 1379 testutils.SkipOnOldKernel(t, "4.10", "LRU per-CPU hash") 1380 } 1381 1382 arr, err := NewMap(&MapSpec{ 1383 Type: typ, 1384 KeySize: 4, 1385 ValueSize: 5, 1386 MaxEntries: 1, 1387 }) 1388 if err != nil { 1389 t.Fatal(err) 1390 } 1391 defer arr.Close() 1392 1393 values := []*customEncoding{ 1394 {"hello"}, 1395 {"world"}, 1396 } 1397 if err := arr.Put(uint32(0), values); err != nil { 1398 t.Fatal(err) 1399 } 1400 1401 // Make sure unmarshaling works on slices containing pointers 1402 retrievedVal := make([]*customEncoding, numCPU) 1403 if err := arr.Lookup(uint32(0), retrievedVal); err == nil { 1404 t.Fatal("Slices with nil values should generate error") 1405 } 1406 for i := range retrievedVal { 1407 retrievedVal[i] = &customEncoding{} 1408 } 1409 if err := arr.Lookup(uint32(0), retrievedVal); err != nil { 1410 t.Fatal("Can't retrieve key 0:", err) 1411 } 1412 var retrieved []*customEncoding 1413 if err := arr.Lookup(uint32(0), &retrieved); err != nil { 1414 t.Fatal("Can't retrieve key 0:", err) 1415 } 1416 1417 for i, want := range []string{"HELLO", "WORLD"} { 1418 if retrieved[i] == nil { 1419 t.Error("First item is nil") 1420 } else if have := retrieved[i].data; have != want { 1421 t.Errorf("Put doesn't use BinaryMarshaler, expected %s but got %s", want, have) 1422 } 1423 } 1424 1425 }) 1426 } 1427 } 1428 1429 type bpfCgroupStorageKey struct { 1430 CgroupInodeId uint64 1431 AttachType AttachType 1432 _ [4]byte // Padding 1433 } 1434 1435 func TestCgroupPerCPUStorageMarshaling(t *testing.T) { 1436 numCPU := MustPossibleCPU() 1437 if numCPU < 2 { 1438 t.Skip("Test requires at least two CPUs") 1439 } 1440 testutils.SkipOnOldKernel(t, "5.9", "per-CPU CGoup storage with write from user space support") 1441 1442 cgroup := testutils.CreateCgroup(t) 1443 1444 arr, err := NewMap(&MapSpec{ 1445 Type: PerCPUCGroupStorage, 1446 KeySize: uint32(unsafe.Sizeof(bpfCgroupStorageKey{})), 1447 ValueSize: uint32(unsafe.Sizeof(uint64(0))), 1448 }) 1449 if err != nil { 1450 t.Fatal(err) 1451 } 1452 t.Cleanup(func() { 1453 arr.Close() 1454 }) 1455 1456 prog, err := NewProgram(&ProgramSpec{ 1457 Type: CGroupSKB, 1458 AttachType: AttachCGroupInetEgress, 1459 License: "MIT", 1460 Instructions: asm.Instructions{ 1461 asm.LoadMapPtr(asm.R1, arr.FD()), 1462 asm.Mov.Imm(asm.R2, 0), 1463 asm.FnGetLocalStorage.Call(), 1464 asm.Mov.Imm(asm.R0, 0), 1465 asm.Return(), 1466 }, 1467 }) 1468 if err != nil { 1469 t.Fatal(err) 1470 } 1471 defer prog.Close() 1472 1473 progAttachAttrs := sys.ProgAttachAttr{ 1474 TargetFdOrIfindex: uint32(cgroup.Fd()), 1475 AttachBpfFd: uint32(prog.FD()), 1476 AttachType: uint32(AttachCGroupInetEgress), 1477 AttachFlags: 0, 1478 ReplaceBpfFd: 0, 1479 } 1480 err = sys.ProgAttach(&progAttachAttrs) 1481 if err != nil { 1482 t.Fatal(err) 1483 } 1484 defer func() { 1485 attr := sys.ProgDetachAttr{ 1486 TargetFdOrIfindex: uint32(cgroup.Fd()), 1487 AttachBpfFd: uint32(prog.FD()), 1488 AttachType: uint32(AttachCGroupInetEgress), 1489 } 1490 if err := sys.ProgDetach(&attr); err != nil { 1491 t.Fatal(err) 1492 } 1493 }() 1494 1495 var mapKey = &bpfCgroupStorageKey{ 1496 CgroupInodeId: testutils.GetCgroupIno(t, cgroup), 1497 AttachType: AttachCGroupInetEgress, 1498 } 1499 1500 values := []uint64{1, 2} 1501 if err := arr.Put(mapKey, values); err != nil { 1502 t.Fatalf("Can't set cgroup %s storage: %s", cgroup.Name(), err) 1503 } 1504 1505 var retrieved []uint64 1506 if err := arr.Lookup(mapKey, &retrieved); err != nil { 1507 t.Fatalf("Can't retrieve cgroup %s storage: %s", cgroup.Name(), err) 1508 } 1509 1510 for i, want := range []uint64{1, 2} { 1511 if retrieved[i] == 0 { 1512 t.Errorf("Item %d is 0", i) 1513 } else if have := retrieved[i]; have != want { 1514 t.Errorf("PerCPUCGroupStorage map is not correctly unmarshaled, expected %d but got %d", want, have) 1515 } 1516 } 1517 } 1518 1519 func TestMapMarshalUnsafe(t *testing.T) { 1520 m, err := NewMap(&MapSpec{ 1521 Type: Hash, 1522 KeySize: 4, 1523 ValueSize: 4, 1524 MaxEntries: 1, 1525 }) 1526 if err != nil { 1527 t.Fatal(err) 1528 } 1529 defer m.Close() 1530 1531 key := uint32(1) 1532 value := uint32(42) 1533 1534 if err := m.Put(unsafe.Pointer(&key), unsafe.Pointer(&value)); err != nil { 1535 t.Fatal(err) 1536 } 1537 1538 var res uint32 1539 if err := m.Lookup(unsafe.Pointer(&key), unsafe.Pointer(&res)); err != nil { 1540 t.Fatal("Can't get item:", err) 1541 } 1542 1543 var sum uint32 1544 iter := m.Iterate() 1545 for iter.Next(&key, unsafe.Pointer(&res)) { 1546 sum += res 1547 } 1548 if err := iter.Err(); err != nil { 1549 t.Fatal(err) 1550 } 1551 1552 if res != 42 { 1553 t.Fatalf("Expected 42, got %d", res) 1554 } 1555 1556 iter = m.Iterate() 1557 iter.Next(unsafe.Pointer(&key), &res) 1558 if err := iter.Err(); err != nil { 1559 t.Error(err) 1560 } 1561 if key != 1 { 1562 t.Errorf("Expected key 1, got %d", key) 1563 } 1564 1565 if err := m.Delete(unsafe.Pointer(&key)); err != nil { 1566 t.Fatal("Can't delete:", err) 1567 } 1568 } 1569 1570 func TestMapName(t *testing.T) { 1571 if err := haveObjName(); err != nil { 1572 t.Skip(err) 1573 } 1574 1575 m, err := NewMap(&MapSpec{ 1576 Name: "test", 1577 Type: Array, 1578 KeySize: 4, 1579 ValueSize: 4, 1580 MaxEntries: 1, 1581 }) 1582 if err != nil { 1583 t.Fatal(err) 1584 } 1585 defer m.Close() 1586 1587 var info sys.MapInfo 1588 if err := sys.ObjInfo(m.fd, &info); err != nil { 1589 t.Fatal(err) 1590 } 1591 1592 if name := unix.ByteSliceToString(info.Name[:]); name != "test" { 1593 t.Error("Expected name to be test, got", name) 1594 } 1595 } 1596 1597 func TestMapFromFD(t *testing.T) { 1598 m := createArray(t) 1599 1600 if err := m.Put(uint32(0), uint32(123)); err != nil { 1601 t.Fatal(err) 1602 } 1603 1604 // If you're thinking about copying this, don't. Use 1605 // Clone() instead. 1606 m2, err := NewMapFromFD(dupFD(t, m.FD())) 1607 testutils.SkipIfNotSupported(t, err) 1608 if err != nil { 1609 t.Fatal(err) 1610 } 1611 defer m2.Close() 1612 1613 var val uint32 1614 if err := m2.Lookup(uint32(0), &val); err != nil { 1615 t.Fatal("Can't look up key:", err) 1616 } 1617 1618 if val != 123 { 1619 t.Error("Wrong value") 1620 } 1621 } 1622 1623 func TestMapContents(t *testing.T) { 1624 spec := &MapSpec{ 1625 Type: Array, 1626 KeySize: 4, 1627 ValueSize: 4, 1628 MaxEntries: 2, 1629 Contents: []MapKV{ 1630 {uint32(0), uint32(23)}, 1631 {uint32(1), uint32(42)}, 1632 }, 1633 } 1634 1635 m, err := NewMap(spec) 1636 if err != nil { 1637 t.Fatal("Can't create map:", err) 1638 } 1639 defer m.Close() 1640 1641 var value uint32 1642 if err := m.Lookup(uint32(0), &value); err != nil { 1643 t.Error("Can't look up key 0:", err) 1644 } else if value != 23 { 1645 t.Errorf("Incorrect value for key 0, expected 23, have %d", value) 1646 } 1647 1648 if err := m.Lookup(uint32(1), &value); err != nil { 1649 t.Error("Can't look up key 1:", err) 1650 } else if value != 42 { 1651 t.Errorf("Incorrect value for key 0, expected 23, have %d", value) 1652 } 1653 1654 spec.Contents = []MapKV{ 1655 // Key is larger than MaxEntries 1656 {uint32(14), uint32(0)}, 1657 } 1658 1659 if _, err = NewMap(spec); err == nil { 1660 t.Error("Invalid contents should be rejected") 1661 } 1662 } 1663 1664 func TestMapFreeze(t *testing.T) { 1665 arr := createArray(t) 1666 1667 err := arr.Freeze() 1668 testutils.SkipIfNotSupported(t, err) 1669 1670 if err != nil { 1671 t.Fatal("Can't freeze map:", err) 1672 } 1673 1674 if err := arr.Put(uint32(0), uint32(1)); err == nil { 1675 t.Error("Freeze doesn't prevent modification from user space") 1676 } 1677 } 1678 1679 func TestMapGetNextID(t *testing.T) { 1680 testutils.SkipOnOldKernel(t, "4.13", "bpf_map_get_next_id") 1681 var next MapID 1682 var err error 1683 1684 // Ensure there is at least one map on the system. 1685 _ = newHash(t) 1686 1687 if next, err = MapGetNextID(MapID(0)); err != nil { 1688 t.Fatal("Can't get next ID:", err) 1689 } 1690 if next == MapID(0) { 1691 t.Fatal("Expected next ID other than 0") 1692 } 1693 1694 // As there can be multiple eBPF maps, we loop over all of them and 1695 // make sure, the IDs increase and the last call will return ErrNotExist 1696 for { 1697 last := next 1698 if next, err = MapGetNextID(last); err != nil { 1699 if !errors.Is(err, os.ErrNotExist) { 1700 t.Fatal("Expected ErrNotExist, got:", err) 1701 } 1702 break 1703 } 1704 if next <= last { 1705 t.Fatalf("Expected next ID (%d) to be higher than the last ID (%d)", next, last) 1706 } 1707 } 1708 } 1709 1710 func TestNewMapFromID(t *testing.T) { 1711 hash := newHash(t) 1712 1713 info, err := hash.Info() 1714 testutils.SkipIfNotSupported(t, err) 1715 if err != nil { 1716 t.Fatal("Couldn't get map info:", err) 1717 } 1718 1719 id, ok := info.ID() 1720 if !ok { 1721 t.Skip("Map ID not supported") 1722 } 1723 1724 hash2, err := NewMapFromID(id) 1725 if err != nil { 1726 t.Fatalf("Can't get map for ID %d: %v", id, err) 1727 } 1728 hash2.Close() 1729 1730 // As there can be multiple maps, we use max(uint32) as MapID to trigger an expected error. 1731 _, err = NewMapFromID(MapID(math.MaxUint32)) 1732 if !errors.Is(err, os.ErrNotExist) { 1733 t.Fatal("Expected ErrNotExist, got:", err) 1734 } 1735 } 1736 1737 func TestMapPinning(t *testing.T) { 1738 tmp := testutils.TempBPFFS(t) 1739 1740 spec := &MapSpec{ 1741 Name: "test", 1742 Type: Hash, 1743 KeySize: 4, 1744 ValueSize: 4, 1745 MaxEntries: 1, 1746 Pinning: PinByName, 1747 } 1748 1749 m1, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 1750 if err != nil { 1751 t.Fatal("Can't create map:", err) 1752 } 1753 defer m1.Close() 1754 pinned := m1.IsPinned() 1755 qt.Assert(t, qt.IsTrue(pinned)) 1756 1757 m1Info, err := m1.Info() 1758 qt.Assert(t, qt.IsNil(err)) 1759 1760 if err := m1.Put(uint32(0), uint32(42)); err != nil { 1761 t.Fatal("Can't write value:", err) 1762 } 1763 1764 m2, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 1765 testutils.SkipIfNotSupported(t, err) 1766 if err != nil { 1767 t.Fatal("Can't create map:", err) 1768 } 1769 defer m2.Close() 1770 1771 m2Info, err := m2.Info() 1772 qt.Assert(t, qt.IsNil(err)) 1773 1774 if m1ID, ok := m1Info.ID(); ok { 1775 m2ID, _ := m2Info.ID() 1776 qt.Assert(t, qt.Equals(m2ID, m1ID)) 1777 } 1778 1779 var value uint32 1780 if err := m2.Lookup(uint32(0), &value); err != nil { 1781 t.Fatal("Can't read from map:", err) 1782 } 1783 1784 if value != 42 { 1785 t.Fatal("Pinning doesn't use pinned maps") 1786 } 1787 1788 spec.KeySize = 8 1789 spec.ValueSize = 8 1790 m3, err := NewMapWithOptions(spec, MapOptions{PinPath: tmp}) 1791 if err == nil { 1792 m3.Close() 1793 t.Fatalf("Opening a pinned map with a mismatching spec did not fail") 1794 } 1795 if !errors.Is(err, ErrMapIncompatible) { 1796 t.Fatalf("Opening a pinned map with a mismatching spec failed with the wrong error") 1797 } 1798 1799 // Check if error string mentions both KeySize and ValueSize. 1800 qt.Assert(t, qt.StringContains(err.Error(), "KeySize")) 1801 qt.Assert(t, qt.StringContains(err.Error(), "ValueSize")) 1802 } 1803 1804 func TestPerfEventArrayCompatible(t *testing.T) { 1805 ms := &MapSpec{ 1806 Type: PerfEventArray, 1807 } 1808 1809 m, err := NewMap(ms) 1810 qt.Assert(t, qt.IsNil(err)) 1811 defer m.Close() 1812 1813 qt.Assert(t, qt.IsNil(ms.Compatible(m))) 1814 1815 ms.MaxEntries = m.MaxEntries() - 1 1816 qt.Assert(t, qt.IsNotNil(ms.Compatible(m))) 1817 } 1818 1819 type benchValue struct { 1820 ID uint32 1821 Val16 uint16 1822 Val16_2 uint16 1823 Name [8]byte 1824 LID uint64 1825 } 1826 1827 type customBenchValue benchValue 1828 1829 func (cbv *customBenchValue) UnmarshalBinary(buf []byte) error { 1830 cbv.ID = internal.NativeEndian.Uint32(buf) 1831 cbv.Val16 = internal.NativeEndian.Uint16(buf[4:]) 1832 cbv.Val16_2 = internal.NativeEndian.Uint16(buf[6:]) 1833 copy(cbv.Name[:], buf[8:]) 1834 cbv.LID = internal.NativeEndian.Uint64(buf[16:]) 1835 return nil 1836 } 1837 1838 func (cbv *customBenchValue) MarshalBinary() ([]byte, error) { 1839 buf := make([]byte, 24) 1840 internal.NativeEndian.PutUint32(buf, cbv.ID) 1841 internal.NativeEndian.PutUint16(buf[4:], cbv.Val16) 1842 internal.NativeEndian.PutUint16(buf[6:], cbv.Val16_2) 1843 copy(buf[8:], cbv.Name[:]) 1844 internal.NativeEndian.PutUint64(buf[16:], cbv.LID) 1845 return buf, nil 1846 } 1847 1848 type benchKey struct { 1849 id uint64 1850 } 1851 1852 func (bk *benchKey) MarshalBinary() ([]byte, error) { 1853 buf := make([]byte, 8) 1854 internal.NativeEndian.PutUint64(buf, bk.id) 1855 return buf, nil 1856 } 1857 1858 func BenchmarkMarshaling(b *testing.B) { 1859 newMap := func(valueSize uint32) *Map { 1860 m, err := NewMap(&MapSpec{ 1861 Type: Hash, 1862 KeySize: 8, 1863 ValueSize: valueSize, 1864 MaxEntries: 1, 1865 }) 1866 if err != nil { 1867 b.Fatal(err) 1868 } 1869 return m 1870 } 1871 1872 key := uint64(0) 1873 1874 m := newMap(24) 1875 if err := m.Put(key, benchValue{}); err != nil { 1876 b.Fatal(err) 1877 } 1878 b.Cleanup(func() { m.Close() }) 1879 1880 b.Run("ValueUnmarshalReflect", func(b *testing.B) { 1881 b.ReportAllocs() 1882 b.ResetTimer() 1883 1884 var value benchValue 1885 1886 for i := 0; i < b.N; i++ { 1887 err := m.Lookup(unsafe.Pointer(&key), &value) 1888 if err != nil { 1889 b.Fatal("Can't get key:", err) 1890 } 1891 } 1892 }) 1893 1894 b.Run("KeyMarshalReflect", func(b *testing.B) { 1895 b.ReportAllocs() 1896 b.ResetTimer() 1897 1898 var value benchValue 1899 1900 for i := 0; i < b.N; i++ { 1901 err := m.Lookup(&key, unsafe.Pointer(&value)) 1902 if err != nil { 1903 b.Fatal("Can't get key:", err) 1904 } 1905 } 1906 }) 1907 1908 b.Run("ValueBinaryUnmarshaler", func(b *testing.B) { 1909 b.ReportAllocs() 1910 b.ResetTimer() 1911 1912 var value customBenchValue 1913 1914 for i := 0; i < b.N; i++ { 1915 err := m.Lookup(unsafe.Pointer(&key), &value) 1916 if err != nil { 1917 b.Fatal("Can't get key:", err) 1918 } 1919 } 1920 }) 1921 1922 b.Run("KeyBinaryMarshaler", func(b *testing.B) { 1923 b.ReportAllocs() 1924 b.ResetTimer() 1925 1926 var key benchKey 1927 var value customBenchValue 1928 1929 for i := 0; i < b.N; i++ { 1930 err := m.Lookup(&key, unsafe.Pointer(&value)) 1931 if err != nil { 1932 b.Fatal("Can't get key:", err) 1933 } 1934 } 1935 }) 1936 1937 b.Run("KeyValueUnsafe", func(b *testing.B) { 1938 b.ReportAllocs() 1939 b.ResetTimer() 1940 1941 var value benchValue 1942 1943 for i := 0; i < b.N; i++ { 1944 err := m.Lookup(unsafe.Pointer(&key), unsafe.Pointer(&value)) 1945 if err != nil { 1946 b.Fatal("Can't get key:", err) 1947 } 1948 } 1949 }) 1950 } 1951 1952 func BenchmarkPerCPUMarshalling(b *testing.B) { 1953 key := uint64(1) 1954 val := make([]uint64, MustPossibleCPU()) 1955 for i := range val { 1956 val[i] = uint64(i) 1957 } 1958 1959 m, err := NewMap(&MapSpec{ 1960 Type: PerCPUHash, 1961 KeySize: 8, 1962 ValueSize: 8, 1963 MaxEntries: 1, 1964 }) 1965 if err != nil { 1966 b.Fatal(err) 1967 } 1968 1969 b.Cleanup(func() { m.Close() }) 1970 if err := m.Put(key, val[0:]); err != nil { 1971 b.Fatal(err) 1972 } 1973 1974 b.Run("reflection", func(b *testing.B) { 1975 b.ReportAllocs() 1976 b.ResetTimer() 1977 1978 var value []uint64 1979 1980 for i := 0; i < b.N; i++ { 1981 err := m.Lookup(unsafe.Pointer(&key), &value) 1982 if err != nil { 1983 b.Fatal("Can't get key:", err) 1984 } 1985 } 1986 }) 1987 } 1988 1989 func BenchmarkMap(b *testing.B) { 1990 m, err := NewMap(&MapSpec{ 1991 Type: Hash, 1992 KeySize: 4, 1993 ValueSize: 4, 1994 MaxEntries: 1, 1995 }) 1996 if err != nil { 1997 b.Fatal(err) 1998 } 1999 b.Cleanup(func() { m.Close() }) 2000 2001 if err := m.Put(uint32(0), uint32(42)); err != nil { 2002 b.Fatal(err) 2003 } 2004 2005 b.Run("Lookup", func(b *testing.B) { 2006 var key, value uint32 2007 2008 b.ReportAllocs() 2009 2010 for i := 0; i < b.N; i++ { 2011 err := m.Lookup(unsafe.Pointer(&key), unsafe.Pointer(&value)) 2012 if err != nil { 2013 b.Fatal(err) 2014 } 2015 } 2016 }) 2017 2018 b.Run("Update", func(b *testing.B) { 2019 var key, value uint32 2020 2021 b.ReportAllocs() 2022 2023 for i := 0; i < b.N; i++ { 2024 err := m.Update(unsafe.Pointer(&key), unsafe.Pointer(&value), UpdateAny) 2025 if err != nil { 2026 b.Fatal(err) 2027 } 2028 } 2029 }) 2030 2031 b.Run("NextKey", func(b *testing.B) { 2032 var key uint32 2033 2034 b.ReportAllocs() 2035 2036 for i := 0; i < b.N; i++ { 2037 err := m.NextKey(nil, unsafe.Pointer(&key)) 2038 if err != nil { 2039 b.Fatal(err) 2040 } 2041 } 2042 }) 2043 2044 b.Run("Delete", func(b *testing.B) { 2045 var key uint32 2046 2047 b.ReportAllocs() 2048 2049 for i := 0; i < b.N; i++ { 2050 err := m.Delete(unsafe.Pointer(&key)) 2051 if err != nil && !errors.Is(err, ErrKeyNotExist) { 2052 b.Fatal(err) 2053 } 2054 } 2055 }) 2056 } 2057 2058 func BenchmarkIterate(b *testing.B) { 2059 for _, mt := range []MapType{Hash, PerCPUHash} { 2060 m, err := NewMap(&MapSpec{ 2061 Type: mt, 2062 KeySize: 8, 2063 ValueSize: 8, 2064 MaxEntries: 1000, 2065 }) 2066 if err != nil { 2067 b.Fatal(err) 2068 } 2069 b.Cleanup(func() { 2070 m.Close() 2071 }) 2072 possibleCPU := 1 2073 if m.Type().hasPerCPUValue() { 2074 possibleCPU = MustPossibleCPU() 2075 } 2076 var ( 2077 n = m.MaxEntries() 2078 keys = make([]uint64, n) 2079 values = make([]uint64, n*uint32(possibleCPU)) 2080 ) 2081 2082 for i := 0; uint32(i) < n; i++ { 2083 keys[i] = uint64(i) 2084 for j := 0; j < possibleCPU; j++ { 2085 values[i] = uint64((i * possibleCPU) + j) 2086 } 2087 } 2088 2089 _, err = m.BatchUpdate(keys, values, nil) 2090 testutils.SkipIfNotSupported(b, err) 2091 qt.Assert(b, qt.IsNil(err)) 2092 2093 b.Run(m.Type().String(), func(b *testing.B) { 2094 b.Run("MapIterator", func(b *testing.B) { 2095 var k uint64 2096 v := make([]uint64, possibleCPU) 2097 2098 b.ReportAllocs() 2099 b.ResetTimer() 2100 2101 for i := 0; i < b.N; i++ { 2102 iter := m.Iterate() 2103 for iter.Next(&k, v) { 2104 continue 2105 } 2106 if err := iter.Err(); err != nil { 2107 b.Fatal(err) 2108 } 2109 } 2110 }) 2111 2112 b.Run("MapIteratorDelete", func(b *testing.B) { 2113 var k uint64 2114 v := make([]uint64, possibleCPU) 2115 2116 b.ReportAllocs() 2117 b.ResetTimer() 2118 2119 for i := 0; i < b.N; i++ { 2120 b.StopTimer() 2121 if _, err := m.BatchUpdate(keys, values, nil); err != nil { 2122 b.Fatal(err) 2123 } 2124 b.StartTimer() 2125 2126 iter := m.Iterate() 2127 for iter.Next(&k, &v) { 2128 if err := m.Delete(&k); err != nil { 2129 b.Fatal(err) 2130 } 2131 } 2132 if err := iter.Err(); err != nil { 2133 b.Fatal(err) 2134 } 2135 } 2136 }) 2137 2138 b.Run("BatchLookup", func(b *testing.B) { 2139 k := make([]uint64, m.MaxEntries()) 2140 v := make([]uint64, m.MaxEntries()*uint32(possibleCPU)) 2141 2142 b.ReportAllocs() 2143 b.ResetTimer() 2144 2145 for i := 0; i < b.N; i++ { 2146 var cursor MapBatchCursor 2147 for { 2148 _, err := m.BatchLookup(&cursor, k, v, nil) 2149 if errors.Is(err, ErrKeyNotExist) { 2150 break 2151 } 2152 if err != nil { 2153 b.Fatal(err) 2154 } 2155 } 2156 } 2157 }) 2158 2159 b.Run("BatchLookupAndDelete", func(b *testing.B) { 2160 k := make([]uint64, m.MaxEntries()) 2161 v := make([]uint64, m.MaxEntries()*uint32(possibleCPU)) 2162 2163 b.ReportAllocs() 2164 b.ResetTimer() 2165 2166 for i := 0; i < b.N; i++ { 2167 b.StopTimer() 2168 if _, err := m.BatchUpdate(keys, values, nil); err != nil { 2169 b.Fatal(err) 2170 } 2171 b.StartTimer() 2172 2173 var cursor MapBatchCursor 2174 for { 2175 _, err := m.BatchLookupAndDelete(&cursor, k, v, nil) 2176 if errors.Is(err, ErrKeyNotExist) { 2177 break 2178 } 2179 if err != nil { 2180 b.Fatal(err) 2181 } 2182 } 2183 } 2184 }) 2185 2186 b.Run("BatchDelete", func(b *testing.B) { 2187 b.ReportAllocs() 2188 b.ResetTimer() 2189 2190 for i := 0; i < b.N; i++ { 2191 b.StopTimer() 2192 if _, err := m.BatchUpdate(keys, values, nil); err != nil { 2193 b.Fatal(err) 2194 } 2195 b.StartTimer() 2196 2197 if _, err := m.BatchDelete(keys, nil); err != nil { 2198 b.Fatal(err) 2199 } 2200 } 2201 }) 2202 }) 2203 } 2204 } 2205 2206 // Per CPU maps store a distinct value for each CPU. They are useful 2207 // to collect metrics. 2208 func ExampleMap_perCPU() { 2209 arr, err := NewMap(&MapSpec{ 2210 Type: PerCPUArray, 2211 KeySize: 4, 2212 ValueSize: 4, 2213 MaxEntries: 2, 2214 }) 2215 if err != nil { 2216 panic(err) 2217 } 2218 defer arr.Close() 2219 2220 possibleCPUs := MustPossibleCPU() 2221 perCPUValues := map[uint32]uint32{ 2222 0: 4, 2223 1: 5, 2224 } 2225 2226 for k, v := range perCPUValues { 2227 // We set each perCPU slots to the same value. 2228 values := make([]uint32, possibleCPUs) 2229 for i := range values { 2230 values[i] = v 2231 } 2232 if err := arr.Put(k, values); err != nil { 2233 panic(err) 2234 } 2235 } 2236 2237 for k := 0; k < 2; k++ { 2238 var values []uint32 2239 if err := arr.Lookup(uint32(k), &values); err != nil { 2240 panic(err) 2241 } 2242 // Note we will print an unexpected message if this is not true. 2243 fmt.Printf("Value of key %v on all CPUs: %v\n", k, values[0]) 2244 } 2245 var ( 2246 key uint32 2247 entries = arr.Iterate() 2248 ) 2249 2250 var values []uint32 2251 for entries.Next(&key, &values) { 2252 expected, ok := perCPUValues[key] 2253 if !ok { 2254 fmt.Printf("Unexpected key %v\n", key) 2255 continue 2256 } 2257 2258 for i, n := range values { 2259 if n != expected { 2260 fmt.Printf("Key %v, Value for cpu %v is %v not %v\n", 2261 key, i, n, expected) 2262 } 2263 } 2264 } 2265 2266 if err := entries.Err(); err != nil { 2267 panic(err) 2268 } 2269 // Output: 2270 // Value of key 0 on all CPUs: 4 2271 // Value of key 1 on all CPUs: 5 2272 } 2273 2274 // It is possible to use unsafe.Pointer to avoid marshalling 2275 // and copy overhead. It is the responsibility of the caller to ensure 2276 // the correct size of unsafe.Pointers. 2277 // 2278 // Note that using unsafe.Pointer is only marginally faster than 2279 // implementing Marshaler on the type. 2280 func ExampleMap_zeroCopy() { 2281 hash, err := NewMap(&MapSpec{ 2282 Type: Hash, 2283 KeySize: 5, 2284 ValueSize: 4, 2285 MaxEntries: 10, 2286 }) 2287 if err != nil { 2288 panic(err) 2289 } 2290 defer hash.Close() 2291 2292 key := [5]byte{'h', 'e', 'l', 'l', 'o'} 2293 value := uint32(23) 2294 2295 if err := hash.Put(unsafe.Pointer(&key), unsafe.Pointer(&value)); err != nil { 2296 panic(err) 2297 } 2298 2299 value = 0 2300 if err := hash.Lookup(unsafe.Pointer(&key), unsafe.Pointer(&value)); err != nil { 2301 panic("can't get value:" + err.Error()) 2302 } 2303 2304 fmt.Printf("The value is: %d\n", value) 2305 // Output: The value is: 23 2306 } 2307 2308 func ExampleMap_NextKey() { 2309 hash, err := NewMap(&MapSpec{ 2310 Type: Hash, 2311 KeySize: 5, 2312 ValueSize: 4, 2313 MaxEntries: 10, 2314 Contents: []MapKV{ 2315 {"hello", uint32(21)}, 2316 {"world", uint32(42)}, 2317 }, 2318 }) 2319 if err != nil { 2320 panic(err) 2321 } 2322 defer hash.Close() 2323 2324 var cur, next string 2325 var keys []string 2326 2327 for err = hash.NextKey(nil, &next); ; err = hash.NextKey(cur, &next) { 2328 if errors.Is(err, ErrKeyNotExist) { 2329 break 2330 } 2331 if err != nil { 2332 panic(err) 2333 } 2334 keys = append(keys, next) 2335 cur = next 2336 } 2337 2338 // Order of keys is non-deterministic due to randomized map seed 2339 sort.Strings(keys) 2340 fmt.Printf("Keys are %v\n", keys) 2341 // Output: Keys are [hello world] 2342 } 2343 2344 // ExampleMap_Iterate demonstrates how to iterate over all entries 2345 // in a map. 2346 func ExampleMap_Iterate() { 2347 hash, err := NewMap(&MapSpec{ 2348 Type: Hash, 2349 KeySize: 5, 2350 ValueSize: 4, 2351 MaxEntries: 10, 2352 Contents: []MapKV{ 2353 {"hello", uint32(21)}, 2354 {"world", uint32(42)}, 2355 }, 2356 }) 2357 if err != nil { 2358 panic(err) 2359 } 2360 defer hash.Close() 2361 2362 var ( 2363 key string 2364 value uint32 2365 entries = hash.Iterate() 2366 ) 2367 2368 values := make(map[string]uint32) 2369 for entries.Next(&key, &value) { 2370 // Order of keys is non-deterministic due to randomized map seed 2371 values[key] = value 2372 } 2373 2374 if err := entries.Err(); err != nil { 2375 panic(fmt.Sprint("Iterator encountered an error:", err)) 2376 } 2377 2378 for k, v := range values { 2379 fmt.Printf("key: %s, value: %d\n", k, v) 2380 } 2381 2382 // Unordered output: 2383 // key: hello, value: 21 2384 // key: world, value: 42 2385 } 2386 2387 // It is possible to iterate nested maps and program arrays by 2388 // unmarshaling into a *Map or *Program. 2389 func ExampleMap_Iterate_nestedMapsAndProgramArrays() { 2390 inner := &MapSpec{ 2391 Type: Array, 2392 KeySize: 4, 2393 ValueSize: 4, 2394 MaxEntries: 2, 2395 Contents: []MapKV{ 2396 {uint32(0), uint32(1)}, 2397 {uint32(1), uint32(2)}, 2398 }, 2399 } 2400 im, err := NewMap(inner) 2401 if err != nil { 2402 panic(err) 2403 } 2404 defer im.Close() 2405 2406 outer := &MapSpec{ 2407 Type: ArrayOfMaps, 2408 InnerMap: inner, 2409 KeySize: 4, 2410 ValueSize: 4, 2411 MaxEntries: 10, 2412 Contents: []MapKV{ 2413 {uint32(0), im}, 2414 }, 2415 } 2416 arrayOfMaps, err := NewMap(outer) 2417 if errors.Is(err, internal.ErrNotSupported) { 2418 // Fake the output if on very old kernel. 2419 fmt.Println("outerKey: 0") 2420 fmt.Println("\tinnerKey 0 innerValue 1") 2421 fmt.Println("\tinnerKey 1 innerValue 2") 2422 return 2423 } 2424 if err != nil { 2425 panic(err) 2426 } 2427 defer arrayOfMaps.Close() 2428 2429 var ( 2430 key uint32 2431 m *Map 2432 entries = arrayOfMaps.Iterate() 2433 ) 2434 for entries.Next(&key, &m) { 2435 // Make sure that the iterated map is closed after 2436 // we are done. 2437 defer m.Close() 2438 2439 // Order of keys is non-deterministic due to randomized map seed 2440 fmt.Printf("outerKey: %v\n", key) 2441 2442 var innerKey, innerValue uint32 2443 items := m.Iterate() 2444 for items.Next(&innerKey, &innerValue) { 2445 fmt.Printf("\tinnerKey %v innerValue %v\n", innerKey, innerValue) 2446 } 2447 if err := items.Err(); err != nil { 2448 panic(fmt.Sprint("Inner Iterator encountered an error:", err)) 2449 } 2450 } 2451 2452 if err := entries.Err(); err != nil { 2453 panic(fmt.Sprint("Iterator encountered an error:", err)) 2454 } 2455 // Output: 2456 // outerKey: 0 2457 // innerKey 0 innerValue 1 2458 // innerKey 1 innerValue 2 2459 }