github.com/cilium/ebpf@v0.10.0/btf/core.go (about) 1 package btf 2 3 import ( 4 "encoding/binary" 5 "errors" 6 "fmt" 7 "math" 8 "reflect" 9 "strconv" 10 "strings" 11 12 "github.com/cilium/ebpf/asm" 13 ) 14 15 // Code in this file is derived from libbpf, which is available under a BSD 16 // 2-Clause license. 17 18 // COREFixup is the result of computing a CO-RE relocation for a target. 19 type COREFixup struct { 20 kind coreKind 21 local uint32 22 target uint32 23 // True if there is no valid fixup. The instruction is replaced with an 24 // invalid dummy. 25 poison bool 26 // True if the validation of the local value should be skipped. Used by 27 // some kinds of bitfield relocations. 28 skipLocalValidation bool 29 } 30 31 func (f *COREFixup) equal(other COREFixup) bool { 32 return f.local == other.local && f.target == other.target 33 } 34 35 func (f *COREFixup) String() string { 36 if f.poison { 37 return fmt.Sprintf("%s=poison", f.kind) 38 } 39 return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) 40 } 41 42 func (f *COREFixup) Apply(ins *asm.Instruction) error { 43 if f.poison { 44 const badRelo = 0xbad2310 45 46 *ins = asm.BuiltinFunc(badRelo).Call() 47 return nil 48 } 49 50 switch class := ins.OpCode.Class(); class { 51 case asm.LdXClass, asm.StClass, asm.StXClass: 52 if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { 53 return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) 54 } 55 56 if f.target > math.MaxInt16 { 57 return fmt.Errorf("offset %d exceeds MaxInt16", f.target) 58 } 59 60 ins.Offset = int16(f.target) 61 62 case asm.LdClass: 63 if !ins.IsConstantLoad(asm.DWord) { 64 return fmt.Errorf("not a dword-sized immediate load") 65 } 66 67 if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { 68 return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) 69 } 70 71 ins.Constant = int64(f.target) 72 73 case asm.ALUClass: 74 if ins.OpCode.ALUOp() == asm.Swap { 75 return fmt.Errorf("relocation against swap") 76 } 77 78 fallthrough 79 80 case asm.ALU64Class: 81 if src := ins.OpCode.Source(); src != asm.ImmSource { 82 return fmt.Errorf("invalid source %s", src) 83 } 84 85 if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { 86 return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) 87 } 88 89 if f.target > math.MaxInt32 { 90 return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) 91 } 92 93 ins.Constant = int64(f.target) 94 95 default: 96 return fmt.Errorf("invalid class %s", class) 97 } 98 99 return nil 100 } 101 102 func (f COREFixup) isNonExistant() bool { 103 return f.kind.checksForExistence() && f.target == 0 104 } 105 106 // coreKind is the type of CO-RE relocation as specified in BPF source code. 107 type coreKind uint32 108 109 const ( 110 reloFieldByteOffset coreKind = iota /* field byte offset */ 111 reloFieldByteSize /* field size in bytes */ 112 reloFieldExists /* field existence in target kernel */ 113 reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ 114 reloFieldLShiftU64 /* bitfield-specific left bitshift */ 115 reloFieldRShiftU64 /* bitfield-specific right bitshift */ 116 reloTypeIDLocal /* type ID in local BPF object */ 117 reloTypeIDTarget /* type ID in target kernel */ 118 reloTypeExists /* type existence in target kernel */ 119 reloTypeSize /* type size in bytes */ 120 reloEnumvalExists /* enum value existence in target kernel */ 121 reloEnumvalValue /* enum value integer value */ 122 ) 123 124 func (k coreKind) checksForExistence() bool { 125 return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists 126 } 127 128 func (k coreKind) String() string { 129 switch k { 130 case reloFieldByteOffset: 131 return "byte_off" 132 case reloFieldByteSize: 133 return "byte_sz" 134 case reloFieldExists: 135 return "field_exists" 136 case reloFieldSigned: 137 return "signed" 138 case reloFieldLShiftU64: 139 return "lshift_u64" 140 case reloFieldRShiftU64: 141 return "rshift_u64" 142 case reloTypeIDLocal: 143 return "local_type_id" 144 case reloTypeIDTarget: 145 return "target_type_id" 146 case reloTypeExists: 147 return "type_exists" 148 case reloTypeSize: 149 return "type_size" 150 case reloEnumvalExists: 151 return "enumval_exists" 152 case reloEnumvalValue: 153 return "enumval_value" 154 default: 155 return "unknown" 156 } 157 } 158 159 // CORERelocate calculates changes needed to adjust eBPF instructions for differences 160 // in types. 161 // 162 // Returns a list of fixups which can be applied to instructions to make them 163 // match the target type(s). 164 // 165 // Fixups are returned in the order of relos, e.g. fixup[i] is the solution 166 // for relos[i]. 167 func CORERelocate(relos []*CORERelocation, target *Spec, bo binary.ByteOrder) ([]COREFixup, error) { 168 if bo != target.byteOrder { 169 return nil, fmt.Errorf("can't relocate %s against %s", bo, target.byteOrder) 170 } 171 172 type reloGroup struct { 173 relos []*CORERelocation 174 // Position of each relocation in relos. 175 indices []int 176 } 177 178 // Split relocations into per Type lists. 179 relosByType := make(map[Type]*reloGroup) 180 result := make([]COREFixup, len(relos)) 181 for i, relo := range relos { 182 if relo.kind == reloTypeIDLocal { 183 // Filtering out reloTypeIDLocal here makes our lives a lot easier 184 // down the line, since it doesn't have a target at all. 185 if len(relo.accessor) > 1 || relo.accessor[0] != 0 { 186 return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) 187 } 188 189 result[i] = COREFixup{ 190 kind: relo.kind, 191 local: uint32(relo.id), 192 // NB: Using relo.id as the target here is incorrect, since 193 // it doesn't match the BTF we generate on the fly. This isn't 194 // too bad for now since there are no uses of the local type ID 195 // in the kernel, yet. 196 target: uint32(relo.id), 197 } 198 continue 199 } 200 201 group, ok := relosByType[relo.typ] 202 if !ok { 203 group = &reloGroup{} 204 relosByType[relo.typ] = group 205 } 206 group.relos = append(group.relos, relo) 207 group.indices = append(group.indices, i) 208 } 209 210 for localType, group := range relosByType { 211 localTypeName := localType.TypeName() 212 if localTypeName == "" { 213 return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) 214 } 215 216 targets := target.namedTypes[newEssentialName(localTypeName)] 217 fixups, err := coreCalculateFixups(group.relos, target, targets, bo) 218 if err != nil { 219 return nil, fmt.Errorf("relocate %s: %w", localType, err) 220 } 221 222 for j, index := range group.indices { 223 result[index] = fixups[j] 224 } 225 } 226 227 return result, nil 228 } 229 230 var errAmbiguousRelocation = errors.New("ambiguous relocation") 231 var errImpossibleRelocation = errors.New("impossible relocation") 232 233 // coreCalculateFixups finds the target type that best matches all relocations. 234 // 235 // All relos must target the same type. 236 // 237 // The best target is determined by scoring: the less poisoning we have to do 238 // the better the target is. 239 func coreCalculateFixups(relos []*CORERelocation, targetSpec *Spec, targets []Type, bo binary.ByteOrder) ([]COREFixup, error) { 240 bestScore := len(relos) 241 var bestFixups []COREFixup 242 for i := range targets { 243 targetID, err := targetSpec.TypeID(targets[i]) 244 if err != nil { 245 return nil, fmt.Errorf("target type ID: %w", err) 246 } 247 target := Copy(targets[i], UnderlyingType) 248 249 score := 0 // lower is better 250 fixups := make([]COREFixup, 0, len(relos)) 251 for _, relo := range relos { 252 fixup, err := coreCalculateFixup(relo, target, targetID, bo) 253 if err != nil { 254 return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err) 255 } 256 if fixup.poison || fixup.isNonExistant() { 257 score++ 258 } 259 fixups = append(fixups, fixup) 260 } 261 262 if score > bestScore { 263 // We have a better target already, ignore this one. 264 continue 265 } 266 267 if score < bestScore { 268 // This is the best target yet, use it. 269 bestScore = score 270 bestFixups = fixups 271 continue 272 } 273 274 // Some other target has the same score as the current one. Make sure 275 // the fixups agree with each other. 276 for i, fixup := range bestFixups { 277 if !fixup.equal(fixups[i]) { 278 return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) 279 } 280 } 281 } 282 283 if bestFixups == nil { 284 // Nothing at all matched, probably because there are no suitable 285 // targets at all. 286 // 287 // Poison everything except checksForExistence. 288 bestFixups = make([]COREFixup, len(relos)) 289 for i, relo := range relos { 290 if relo.kind.checksForExistence() { 291 bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} 292 } else { 293 bestFixups[i] = COREFixup{kind: relo.kind, poison: true} 294 } 295 } 296 } 297 298 return bestFixups, nil 299 } 300 301 // coreCalculateFixup calculates the fixup for a single local type, target type 302 // and relocation. 303 func coreCalculateFixup(relo *CORERelocation, target Type, targetID TypeID, bo binary.ByteOrder) (COREFixup, error) { 304 fixup := func(local, target uint32) (COREFixup, error) { 305 return COREFixup{kind: relo.kind, local: local, target: target}, nil 306 } 307 fixupWithoutValidation := func(local, target uint32) (COREFixup, error) { 308 return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil 309 } 310 poison := func() (COREFixup, error) { 311 if relo.kind.checksForExistence() { 312 return fixup(1, 0) 313 } 314 return COREFixup{kind: relo.kind, poison: true}, nil 315 } 316 zero := COREFixup{} 317 318 local := Copy(relo.typ, UnderlyingType) 319 320 switch relo.kind { 321 case reloTypeIDTarget, reloTypeSize, reloTypeExists: 322 if len(relo.accessor) > 1 || relo.accessor[0] != 0 { 323 return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) 324 } 325 326 err := coreAreTypesCompatible(local, target) 327 if errors.Is(err, errImpossibleRelocation) { 328 return poison() 329 } 330 if err != nil { 331 return zero, err 332 } 333 334 switch relo.kind { 335 case reloTypeExists: 336 return fixup(1, 1) 337 338 case reloTypeIDTarget: 339 return fixup(uint32(relo.id), uint32(targetID)) 340 341 case reloTypeSize: 342 localSize, err := Sizeof(local) 343 if err != nil { 344 return zero, err 345 } 346 347 targetSize, err := Sizeof(target) 348 if err != nil { 349 return zero, err 350 } 351 352 return fixup(uint32(localSize), uint32(targetSize)) 353 } 354 355 case reloEnumvalValue, reloEnumvalExists: 356 localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) 357 if errors.Is(err, errImpossibleRelocation) { 358 return poison() 359 } 360 if err != nil { 361 return zero, err 362 } 363 364 switch relo.kind { 365 case reloEnumvalExists: 366 return fixup(1, 1) 367 368 case reloEnumvalValue: 369 return fixup(uint32(localValue.Value), uint32(targetValue.Value)) 370 } 371 372 case reloFieldSigned: 373 switch local.(type) { 374 case *Enum: 375 return fixup(1, 1) 376 case *Int: 377 return fixup( 378 uint32(local.(*Int).Encoding&Signed), 379 uint32(target.(*Int).Encoding&Signed), 380 ) 381 default: 382 return fixupWithoutValidation(0, 0) 383 } 384 385 case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64: 386 if _, ok := target.(*Fwd); ok { 387 // We can't relocate fields using a forward declaration, so 388 // skip it. If a non-forward declaration is present in the BTF 389 // we'll find it in one of the other iterations. 390 return poison() 391 } 392 393 localField, targetField, err := coreFindField(local, relo.accessor, target) 394 if errors.Is(err, errImpossibleRelocation) { 395 return poison() 396 } 397 if err != nil { 398 return zero, err 399 } 400 401 maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { 402 f.skipLocalValidation = localField.bitfieldSize > 0 403 return f, err 404 } 405 406 switch relo.kind { 407 case reloFieldExists: 408 return fixup(1, 1) 409 410 case reloFieldByteOffset: 411 return maybeSkipValidation(fixup(localField.offset, targetField.offset)) 412 413 case reloFieldByteSize: 414 localSize, err := Sizeof(localField.Type) 415 if err != nil { 416 return zero, err 417 } 418 419 targetSize, err := Sizeof(targetField.Type) 420 if err != nil { 421 return zero, err 422 } 423 return maybeSkipValidation(fixup(uint32(localSize), uint32(targetSize))) 424 425 case reloFieldLShiftU64: 426 var target uint32 427 if bo == binary.LittleEndian { 428 targetSize, err := targetField.sizeBits() 429 if err != nil { 430 return zero, err 431 } 432 433 target = uint32(64 - targetField.bitfieldOffset - targetSize) 434 } else { 435 loadWidth, err := Sizeof(targetField.Type) 436 if err != nil { 437 return zero, err 438 } 439 440 target = uint32(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) 441 } 442 return fixupWithoutValidation(0, target) 443 444 case reloFieldRShiftU64: 445 targetSize, err := targetField.sizeBits() 446 if err != nil { 447 return zero, err 448 } 449 450 return fixupWithoutValidation(0, uint32(64-targetSize)) 451 } 452 } 453 454 return zero, ErrNotSupported 455 } 456 457 /* coreAccessor contains a path through a struct. It contains at least one index. 458 * 459 * The interpretation depends on the kind of the relocation. The following is 460 * taken from struct bpf_core_relo in libbpf_internal.h: 461 * 462 * - for field-based relocations, string encodes an accessed field using 463 * a sequence of field and array indices, separated by colon (:). It's 464 * conceptually very close to LLVM's getelementptr ([0]) instruction's 465 * arguments for identifying offset to a field. 466 * - for type-based relocations, strings is expected to be just "0"; 467 * - for enum value-based relocations, string contains an index of enum 468 * value within its enum type; 469 * 470 * Example to provide a better feel. 471 * 472 * struct sample { 473 * int a; 474 * struct { 475 * int b[10]; 476 * }; 477 * }; 478 * 479 * struct sample s = ...; 480 * int x = &s->a; // encoded as "0:0" (a is field #0) 481 * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, 482 * // b is field #0 inside anon struct, accessing elem #5) 483 * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) 484 */ 485 type coreAccessor []int 486 487 func parseCOREAccessor(accessor string) (coreAccessor, error) { 488 if accessor == "" { 489 return nil, fmt.Errorf("empty accessor") 490 } 491 492 parts := strings.Split(accessor, ":") 493 result := make(coreAccessor, 0, len(parts)) 494 for _, part := range parts { 495 // 31 bits to avoid overflowing int on 32 bit platforms. 496 index, err := strconv.ParseUint(part, 10, 31) 497 if err != nil { 498 return nil, fmt.Errorf("accessor index %q: %s", part, err) 499 } 500 501 result = append(result, int(index)) 502 } 503 504 return result, nil 505 } 506 507 func (ca coreAccessor) String() string { 508 strs := make([]string, 0, len(ca)) 509 for _, i := range ca { 510 strs = append(strs, strconv.Itoa(i)) 511 } 512 return strings.Join(strs, ":") 513 } 514 515 func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { 516 e, ok := t.(*Enum) 517 if !ok { 518 return nil, fmt.Errorf("not an enum: %s", t) 519 } 520 521 if len(ca) > 1 { 522 return nil, fmt.Errorf("invalid accessor %s for enum", ca) 523 } 524 525 i := ca[0] 526 if i >= len(e.Values) { 527 return nil, fmt.Errorf("invalid index %d for %s", i, e) 528 } 529 530 return &e.Values[i], nil 531 } 532 533 // coreField represents the position of a "child" of a composite type from the 534 // start of that type. 535 // 536 // /- start of composite 537 // | offset * 8 | bitfieldOffset | bitfieldSize | ... | 538 // \- start of field end of field -/ 539 type coreField struct { 540 Type Type 541 542 // The position of the field from the start of the composite type in bytes. 543 offset uint32 544 545 // The offset of the bitfield in bits from the start of the field. 546 bitfieldOffset Bits 547 548 // The size of the bitfield in bits. 549 // 550 // Zero if the field is not a bitfield. 551 bitfieldSize Bits 552 } 553 554 func (cf *coreField) adjustOffsetToNthElement(n int) error { 555 if n == 0 { 556 return nil 557 } 558 559 size, err := Sizeof(cf.Type) 560 if err != nil { 561 return err 562 } 563 564 cf.offset += uint32(n) * uint32(size) 565 return nil 566 } 567 568 func (cf *coreField) adjustOffsetBits(offset Bits) error { 569 align, err := alignof(cf.Type) 570 if err != nil { 571 return err 572 } 573 574 // We can compute the load offset by: 575 // 1) converting the bit offset to bytes with a flooring division. 576 // 2) dividing and multiplying that offset by the alignment, yielding the 577 // load size aligned offset. 578 offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) 579 580 // The number of bits remaining is the bit offset less the number of bits 581 // we can "skip" with the aligned offset. 582 cf.bitfieldOffset = offset - Bits(offsetBytes*8) 583 584 // We know that cf.offset is aligned at to at least align since we get it 585 // from the compiler via BTF. Adding an aligned offsetBytes preserves the 586 // alignment. 587 cf.offset += offsetBytes 588 return nil 589 } 590 591 func (cf *coreField) sizeBits() (Bits, error) { 592 if cf.bitfieldSize > 0 { 593 return cf.bitfieldSize, nil 594 } 595 596 // Someone is trying to access a non-bitfield via a bit shift relocation. 597 // This happens when a field changes from a bitfield to a regular field 598 // between kernel versions. Synthesise the size to make the shifts work. 599 size, err := Sizeof(cf.Type) 600 if err != nil { 601 return 0, nil 602 } 603 return Bits(size * 8), nil 604 } 605 606 // coreFindField descends into the local type using the accessor and tries to 607 // find an equivalent field in target at each step. 608 // 609 // Returns the field and the offset of the field from the start of 610 // target in bits. 611 func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { 612 local := coreField{Type: localT} 613 target := coreField{Type: targetT} 614 615 if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { 616 return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) 617 } 618 619 // The first index is used to offset a pointer of the base type like 620 // when accessing an array. 621 if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { 622 return coreField{}, coreField{}, err 623 } 624 625 if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { 626 return coreField{}, coreField{}, err 627 } 628 629 var localMaybeFlex, targetMaybeFlex bool 630 for i, acc := range localAcc[1:] { 631 switch localType := local.Type.(type) { 632 case composite: 633 // For composite types acc is used to find the field in the local type, 634 // and then we try to find a field in target with the same name. 635 localMembers := localType.members() 636 if acc >= len(localMembers) { 637 return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) 638 } 639 640 localMember := localMembers[acc] 641 if localMember.Name == "" { 642 _, ok := localMember.Type.(composite) 643 if !ok { 644 return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) 645 } 646 647 // This is an anonymous struct or union, ignore it. 648 local = coreField{ 649 Type: localMember.Type, 650 offset: local.offset + localMember.Offset.Bytes(), 651 } 652 localMaybeFlex = false 653 continue 654 } 655 656 targetType, ok := target.Type.(composite) 657 if !ok { 658 return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) 659 } 660 661 targetMember, last, err := coreFindMember(targetType, localMember.Name) 662 if err != nil { 663 return coreField{}, coreField{}, err 664 } 665 666 local = coreField{ 667 Type: localMember.Type, 668 offset: local.offset, 669 bitfieldSize: localMember.BitfieldSize, 670 } 671 localMaybeFlex = acc == len(localMembers)-1 672 673 target = coreField{ 674 Type: targetMember.Type, 675 offset: target.offset, 676 bitfieldSize: targetMember.BitfieldSize, 677 } 678 targetMaybeFlex = last 679 680 if local.bitfieldSize == 0 && target.bitfieldSize == 0 { 681 local.offset += localMember.Offset.Bytes() 682 target.offset += targetMember.Offset.Bytes() 683 break 684 } 685 686 // Either of the members is a bitfield. Make sure we're at the 687 // end of the accessor. 688 if next := i + 1; next < len(localAcc[1:]) { 689 return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") 690 } 691 692 if err := local.adjustOffsetBits(localMember.Offset); err != nil { 693 return coreField{}, coreField{}, err 694 } 695 696 if err := target.adjustOffsetBits(targetMember.Offset); err != nil { 697 return coreField{}, coreField{}, err 698 } 699 700 case *Array: 701 // For arrays, acc is the index in the target. 702 targetType, ok := target.Type.(*Array) 703 if !ok { 704 return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) 705 } 706 707 if localType.Nelems == 0 && !localMaybeFlex { 708 return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") 709 } 710 if targetType.Nelems == 0 && !targetMaybeFlex { 711 return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") 712 } 713 714 if localType.Nelems > 0 && acc >= int(localType.Nelems) { 715 return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) 716 } 717 if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { 718 return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) 719 } 720 721 local = coreField{ 722 Type: localType.Type, 723 offset: local.offset, 724 } 725 localMaybeFlex = false 726 727 if err := local.adjustOffsetToNthElement(acc); err != nil { 728 return coreField{}, coreField{}, err 729 } 730 731 target = coreField{ 732 Type: targetType.Type, 733 offset: target.offset, 734 } 735 targetMaybeFlex = false 736 737 if err := target.adjustOffsetToNthElement(acc); err != nil { 738 return coreField{}, coreField{}, err 739 } 740 741 default: 742 return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) 743 } 744 745 if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { 746 return coreField{}, coreField{}, err 747 } 748 } 749 750 return local, target, nil 751 } 752 753 // coreFindMember finds a member in a composite type while handling anonymous 754 // structs and unions. 755 func coreFindMember(typ composite, name string) (Member, bool, error) { 756 if name == "" { 757 return Member{}, false, errors.New("can't search for anonymous member") 758 } 759 760 type offsetTarget struct { 761 composite 762 offset Bits 763 } 764 765 targets := []offsetTarget{{typ, 0}} 766 visited := make(map[composite]bool) 767 768 for i := 0; i < len(targets); i++ { 769 target := targets[i] 770 771 // Only visit targets once to prevent infinite recursion. 772 if visited[target] { 773 continue 774 } 775 if len(visited) >= maxTypeDepth { 776 // This check is different than libbpf, which restricts the entire 777 // path to BPF_CORE_SPEC_MAX_LEN items. 778 return Member{}, false, fmt.Errorf("type is nested too deep") 779 } 780 visited[target] = true 781 782 members := target.members() 783 for j, member := range members { 784 if member.Name == name { 785 // NB: This is safe because member is a copy. 786 member.Offset += target.offset 787 return member, j == len(members)-1, nil 788 } 789 790 // The names don't match, but this member could be an anonymous struct 791 // or union. 792 if member.Name != "" { 793 continue 794 } 795 796 comp, ok := member.Type.(composite) 797 if !ok { 798 return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) 799 } 800 801 targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) 802 } 803 } 804 805 return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) 806 } 807 808 // coreFindEnumValue follows localAcc to find the equivalent enum value in target. 809 func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { 810 localValue, err := localAcc.enumValue(local) 811 if err != nil { 812 return nil, nil, err 813 } 814 815 targetEnum, ok := target.(*Enum) 816 if !ok { 817 return nil, nil, errImpossibleRelocation 818 } 819 820 localName := newEssentialName(localValue.Name) 821 for i, targetValue := range targetEnum.Values { 822 if newEssentialName(targetValue.Name) != localName { 823 continue 824 } 825 826 return localValue, &targetEnum.Values[i], nil 827 } 828 829 return nil, nil, errImpossibleRelocation 830 } 831 832 /* The comment below is from bpf_core_types_are_compat in libbpf.c: 833 * 834 * Check local and target types for compatibility. This check is used for 835 * type-based CO-RE relocations and follow slightly different rules than 836 * field-based relocations. This function assumes that root types were already 837 * checked for name match. Beyond that initial root-level name check, names 838 * are completely ignored. Compatibility rules are as follows: 839 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but 840 * kind should match for local and target types (i.e., STRUCT is not 841 * compatible with UNION); 842 * - for ENUMs, the size is ignored; 843 * - for INT, size and signedness are ignored; 844 * - for ARRAY, dimensionality is ignored, element types are checked for 845 * compatibility recursively; 846 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 847 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 848 * - FUNC_PROTOs are compatible if they have compatible signature: same 849 * number of input args and compatible return and argument types. 850 * These rules are not set in stone and probably will be adjusted as we get 851 * more experience with using BPF CO-RE relocations. 852 * 853 * Returns errImpossibleRelocation if types are not compatible. 854 */ 855 func coreAreTypesCompatible(localType Type, targetType Type) error { 856 var ( 857 localTs, targetTs typeDeque 858 l, t = &localType, &targetType 859 depth = 0 860 ) 861 862 for ; l != nil && t != nil; l, t = localTs.Shift(), targetTs.Shift() { 863 if depth >= maxTypeDepth { 864 return errors.New("types are nested too deep") 865 } 866 867 localType = *l 868 targetType = *t 869 870 if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { 871 return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) 872 } 873 874 switch lv := (localType).(type) { 875 case *Void, *Struct, *Union, *Enum, *Fwd, *Int: 876 // Nothing to do here 877 878 case *Pointer, *Array: 879 depth++ 880 walkType(localType, localTs.Push) 881 walkType(targetType, targetTs.Push) 882 883 case *FuncProto: 884 tv := targetType.(*FuncProto) 885 if len(lv.Params) != len(tv.Params) { 886 return fmt.Errorf("function param mismatch: %w", errImpossibleRelocation) 887 } 888 889 depth++ 890 walkType(localType, localTs.Push) 891 walkType(targetType, targetTs.Push) 892 893 default: 894 return fmt.Errorf("unsupported type %T", localType) 895 } 896 } 897 898 if l != nil { 899 return fmt.Errorf("dangling local type %T", *l) 900 } 901 902 if t != nil { 903 return fmt.Errorf("dangling target type %T", *t) 904 } 905 906 return nil 907 } 908 909 /* coreAreMembersCompatible checks two types for field-based relocation compatibility. 910 * 911 * The comment below is from bpf_core_fields_are_compat in libbpf.c: 912 * 913 * Check two types for compatibility for the purpose of field access 914 * relocation. const/volatile/restrict and typedefs are skipped to ensure we 915 * are relocating semantically compatible entities: 916 * - any two STRUCTs/UNIONs are compatible and can be mixed; 917 * - any two FWDs are compatible, if their names match (modulo flavor suffix); 918 * - any two PTRs are always compatible; 919 * - for ENUMs, names should be the same (ignoring flavor suffix) or at 920 * least one of enums should be anonymous; 921 * - for ENUMs, check sizes, names are ignored; 922 * - for INT, size and signedness are ignored; 923 * - any two FLOATs are always compatible; 924 * - for ARRAY, dimensionality is ignored, element types are checked for 925 * compatibility recursively; 926 * [ NB: coreAreMembersCompatible doesn't recurse, this check is done 927 * by coreFindField. ] 928 * - everything else shouldn't be ever a target of relocation. 929 * These rules are not set in stone and probably will be adjusted as we get 930 * more experience with using BPF CO-RE relocations. 931 * 932 * Returns errImpossibleRelocation if the members are not compatible. 933 */ 934 func coreAreMembersCompatible(localType Type, targetType Type) error { 935 doNamesMatch := func(a, b string) error { 936 if a == "" || b == "" { 937 // allow anonymous and named type to match 938 return nil 939 } 940 941 if newEssentialName(a) == newEssentialName(b) { 942 return nil 943 } 944 945 return fmt.Errorf("names don't match: %w", errImpossibleRelocation) 946 } 947 948 _, lok := localType.(composite) 949 _, tok := targetType.(composite) 950 if lok && tok { 951 return nil 952 } 953 954 if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { 955 return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) 956 } 957 958 switch lv := localType.(type) { 959 case *Array, *Pointer, *Float, *Int: 960 return nil 961 962 case *Enum: 963 tv := targetType.(*Enum) 964 return doNamesMatch(lv.Name, tv.Name) 965 966 case *Fwd: 967 tv := targetType.(*Fwd) 968 return doNamesMatch(lv.Name, tv.Name) 969 970 default: 971 return fmt.Errorf("type %s: %w", localType, ErrNotSupported) 972 } 973 }