github.com/cilium/ebpf@v0.16.0/btf/core.go (about) 1 package btf 2 3 import ( 4 "encoding/binary" 5 "errors" 6 "fmt" 7 "math" 8 "reflect" 9 "slices" 10 "strconv" 11 "strings" 12 13 "github.com/cilium/ebpf/asm" 14 ) 15 16 // Code in this file is derived from libbpf, which is available under a BSD 17 // 2-Clause license. 18 19 // A constant used when CO-RE relocation has to remove instructions. 20 // 21 // Taken from libbpf. 22 const COREBadRelocationSentinel = 0xbad2310 23 24 // COREFixup is the result of computing a CO-RE relocation for a target. 25 type COREFixup struct { 26 kind coreKind 27 local uint64 28 target uint64 29 // True if there is no valid fixup. The instruction is replaced with an 30 // invalid dummy. 31 poison bool 32 // True if the validation of the local value should be skipped. Used by 33 // some kinds of bitfield relocations. 34 skipLocalValidation bool 35 } 36 37 func (f *COREFixup) equal(other COREFixup) bool { 38 return f.local == other.local && f.target == other.target 39 } 40 41 func (f *COREFixup) String() string { 42 if f.poison { 43 return fmt.Sprintf("%s=poison", f.kind) 44 } 45 return fmt.Sprintf("%s=%d->%d", f.kind, f.local, f.target) 46 } 47 48 func (f *COREFixup) Apply(ins *asm.Instruction) error { 49 if f.poison { 50 // Relocation is poisoned, replace the instruction with an invalid one. 51 if ins.OpCode.IsDWordLoad() { 52 // Replace a dword load with a invalid dword load to preserve instruction size. 53 *ins = asm.LoadImm(asm.R10, COREBadRelocationSentinel, asm.DWord) 54 } else { 55 // Replace all single size instruction with a invalid call instruction. 56 *ins = asm.BuiltinFunc(COREBadRelocationSentinel).Call() 57 } 58 59 // Add context to the kernel verifier output. 60 if source := ins.Source(); source != nil { 61 *ins = ins.WithSource(asm.Comment(fmt.Sprintf("instruction poisoned by CO-RE: %s", source))) 62 } else { 63 *ins = ins.WithSource(asm.Comment("instruction poisoned by CO-RE")) 64 } 65 66 return nil 67 } 68 69 switch class := ins.OpCode.Class(); class { 70 case asm.LdXClass, asm.StClass, asm.StXClass: 71 if want := int16(f.local); !f.skipLocalValidation && want != ins.Offset { 72 return fmt.Errorf("invalid offset %d, expected %d", ins.Offset, f.local) 73 } 74 75 if f.target > math.MaxInt16 { 76 return fmt.Errorf("offset %d exceeds MaxInt16", f.target) 77 } 78 79 ins.Offset = int16(f.target) 80 81 case asm.LdClass: 82 if !ins.IsConstantLoad(asm.DWord) { 83 return fmt.Errorf("not a dword-sized immediate load") 84 } 85 86 if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { 87 return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v)", ins.Constant, want, f) 88 } 89 90 ins.Constant = int64(f.target) 91 92 case asm.ALUClass: 93 if ins.OpCode.ALUOp() == asm.Swap { 94 return fmt.Errorf("relocation against swap") 95 } 96 97 fallthrough 98 99 case asm.ALU64Class: 100 if src := ins.OpCode.Source(); src != asm.ImmSource { 101 return fmt.Errorf("invalid source %s", src) 102 } 103 104 if want := int64(f.local); !f.skipLocalValidation && want != ins.Constant { 105 return fmt.Errorf("invalid immediate %d, expected %d (fixup: %v, kind: %v, ins: %v)", ins.Constant, want, f, f.kind, ins) 106 } 107 108 if f.target > math.MaxInt32 { 109 return fmt.Errorf("immediate %d exceeds MaxInt32", f.target) 110 } 111 112 ins.Constant = int64(f.target) 113 114 default: 115 return fmt.Errorf("invalid class %s", class) 116 } 117 118 return nil 119 } 120 121 func (f COREFixup) isNonExistant() bool { 122 return f.kind.checksForExistence() && f.target == 0 123 } 124 125 // coreKind is the type of CO-RE relocation as specified in BPF source code. 126 type coreKind uint32 127 128 const ( 129 reloFieldByteOffset coreKind = iota /* field byte offset */ 130 reloFieldByteSize /* field size in bytes */ 131 reloFieldExists /* field existence in target kernel */ 132 reloFieldSigned /* field signedness (0 - unsigned, 1 - signed) */ 133 reloFieldLShiftU64 /* bitfield-specific left bitshift */ 134 reloFieldRShiftU64 /* bitfield-specific right bitshift */ 135 reloTypeIDLocal /* type ID in local BPF object */ 136 reloTypeIDTarget /* type ID in target kernel */ 137 reloTypeExists /* type existence in target kernel */ 138 reloTypeSize /* type size in bytes */ 139 reloEnumvalExists /* enum value existence in target kernel */ 140 reloEnumvalValue /* enum value integer value */ 141 reloTypeMatches /* type matches kernel type */ 142 ) 143 144 func (k coreKind) checksForExistence() bool { 145 return k == reloEnumvalExists || k == reloTypeExists || k == reloFieldExists || k == reloTypeMatches 146 } 147 148 func (k coreKind) String() string { 149 switch k { 150 case reloFieldByteOffset: 151 return "byte_off" 152 case reloFieldByteSize: 153 return "byte_sz" 154 case reloFieldExists: 155 return "field_exists" 156 case reloFieldSigned: 157 return "signed" 158 case reloFieldLShiftU64: 159 return "lshift_u64" 160 case reloFieldRShiftU64: 161 return "rshift_u64" 162 case reloTypeIDLocal: 163 return "local_type_id" 164 case reloTypeIDTarget: 165 return "target_type_id" 166 case reloTypeExists: 167 return "type_exists" 168 case reloTypeSize: 169 return "type_size" 170 case reloEnumvalExists: 171 return "enumval_exists" 172 case reloEnumvalValue: 173 return "enumval_value" 174 case reloTypeMatches: 175 return "type_matches" 176 default: 177 return fmt.Sprintf("unknown (%d)", k) 178 } 179 } 180 181 // CORERelocate calculates changes needed to adjust eBPF instructions for differences 182 // in types. 183 // 184 // targets forms the set of types to relocate against. The first element has to be 185 // BTF for vmlinux, the following must be types for kernel modules. 186 // 187 // resolveLocalTypeID is called for each local type which requires a stable TypeID. 188 // Calling the function with the same type multiple times must produce the same 189 // result. It is the callers responsibility to ensure that the relocated instructions 190 // are loaded with matching BTF. 191 // 192 // Returns a list of fixups which can be applied to instructions to make them 193 // match the target type(s). 194 // 195 // Fixups are returned in the order of relos, e.g. fixup[i] is the solution 196 // for relos[i]. 197 func CORERelocate(relos []*CORERelocation, targets []*Spec, bo binary.ByteOrder, resolveLocalTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { 198 if len(targets) == 0 { 199 // Explicitly check for nil here since the argument used to be optional. 200 return nil, fmt.Errorf("targets must be provided") 201 } 202 203 // We can't encode type IDs that aren't for vmlinux into instructions at the 204 // moment. 205 resolveTargetTypeID := targets[0].TypeID 206 207 for _, target := range targets { 208 if bo != target.imm.byteOrder { 209 return nil, fmt.Errorf("can't relocate %s against %s", bo, target.imm.byteOrder) 210 } 211 } 212 213 type reloGroup struct { 214 relos []*CORERelocation 215 // Position of each relocation in relos. 216 indices []int 217 } 218 219 // Split relocations into per Type lists. 220 relosByType := make(map[Type]*reloGroup) 221 result := make([]COREFixup, len(relos)) 222 for i, relo := range relos { 223 if relo.kind == reloTypeIDLocal { 224 // Filtering out reloTypeIDLocal here makes our lives a lot easier 225 // down the line, since it doesn't have a target at all. 226 if len(relo.accessor) > 1 || relo.accessor[0] != 0 { 227 return nil, fmt.Errorf("%s: unexpected accessor %v", relo.kind, relo.accessor) 228 } 229 230 id, err := resolveLocalTypeID(relo.typ) 231 if err != nil { 232 return nil, fmt.Errorf("%s: get type id: %w", relo.kind, err) 233 } 234 235 result[i] = COREFixup{ 236 kind: relo.kind, 237 local: uint64(relo.id), 238 target: uint64(id), 239 } 240 continue 241 } 242 243 group, ok := relosByType[relo.typ] 244 if !ok { 245 group = &reloGroup{} 246 relosByType[relo.typ] = group 247 } 248 group.relos = append(group.relos, relo) 249 group.indices = append(group.indices, i) 250 } 251 252 for localType, group := range relosByType { 253 localTypeName := localType.TypeName() 254 if localTypeName == "" { 255 return nil, fmt.Errorf("relocate unnamed or anonymous type %s: %w", localType, ErrNotSupported) 256 } 257 258 essentialName := newEssentialName(localTypeName) 259 260 var targetTypes []Type 261 for _, target := range targets { 262 namedTypeIDs := target.imm.namedTypes[essentialName] 263 targetTypes = slices.Grow(targetTypes, len(namedTypeIDs)) 264 for _, id := range namedTypeIDs { 265 typ, err := target.TypeByID(id) 266 if err != nil { 267 return nil, err 268 } 269 270 targetTypes = append(targetTypes, typ) 271 } 272 } 273 274 fixups, err := coreCalculateFixups(group.relos, targetTypes, bo, resolveTargetTypeID) 275 if err != nil { 276 return nil, fmt.Errorf("relocate %s: %w", localType, err) 277 } 278 279 for j, index := range group.indices { 280 result[index] = fixups[j] 281 } 282 } 283 284 return result, nil 285 } 286 287 var errAmbiguousRelocation = errors.New("ambiguous relocation") 288 var errImpossibleRelocation = errors.New("impossible relocation") 289 var errIncompatibleTypes = errors.New("incompatible types") 290 291 // coreCalculateFixups finds the target type that best matches all relocations. 292 // 293 // All relos must target the same type. 294 // 295 // The best target is determined by scoring: the less poisoning we have to do 296 // the better the target is. 297 func coreCalculateFixups(relos []*CORERelocation, targets []Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) ([]COREFixup, error) { 298 bestScore := len(relos) 299 var bestFixups []COREFixup 300 for _, target := range targets { 301 score := 0 // lower is better 302 fixups := make([]COREFixup, 0, len(relos)) 303 for _, relo := range relos { 304 fixup, err := coreCalculateFixup(relo, target, bo, resolveTargetTypeID) 305 if err != nil { 306 return nil, fmt.Errorf("target %s: %s: %w", target, relo.kind, err) 307 } 308 if fixup.poison || fixup.isNonExistant() { 309 score++ 310 } 311 fixups = append(fixups, fixup) 312 } 313 314 if score > bestScore { 315 // We have a better target already, ignore this one. 316 continue 317 } 318 319 if score < bestScore { 320 // This is the best target yet, use it. 321 bestScore = score 322 bestFixups = fixups 323 continue 324 } 325 326 // Some other target has the same score as the current one. Make sure 327 // the fixups agree with each other. 328 for i, fixup := range bestFixups { 329 if !fixup.equal(fixups[i]) { 330 return nil, fmt.Errorf("%s: multiple types match: %w", fixup.kind, errAmbiguousRelocation) 331 } 332 } 333 } 334 335 if bestFixups == nil { 336 // Nothing at all matched, probably because there are no suitable 337 // targets at all. 338 // 339 // Poison everything except checksForExistence. 340 bestFixups = make([]COREFixup, len(relos)) 341 for i, relo := range relos { 342 if relo.kind.checksForExistence() { 343 bestFixups[i] = COREFixup{kind: relo.kind, local: 1, target: 0} 344 } else { 345 bestFixups[i] = COREFixup{kind: relo.kind, poison: true} 346 } 347 } 348 } 349 350 return bestFixups, nil 351 } 352 353 var errNoSignedness = errors.New("no signedness") 354 355 // coreCalculateFixup calculates the fixup given a relocation and a target type. 356 func coreCalculateFixup(relo *CORERelocation, target Type, bo binary.ByteOrder, resolveTargetTypeID func(Type) (TypeID, error)) (COREFixup, error) { 357 fixup := func(local, target uint64) (COREFixup, error) { 358 return COREFixup{kind: relo.kind, local: local, target: target}, nil 359 } 360 fixupWithoutValidation := func(local, target uint64) (COREFixup, error) { 361 return COREFixup{kind: relo.kind, local: local, target: target, skipLocalValidation: true}, nil 362 } 363 poison := func() (COREFixup, error) { 364 if relo.kind.checksForExistence() { 365 return fixup(1, 0) 366 } 367 return COREFixup{kind: relo.kind, poison: true}, nil 368 } 369 zero := COREFixup{} 370 371 local := relo.typ 372 373 switch relo.kind { 374 case reloTypeMatches: 375 if len(relo.accessor) > 1 || relo.accessor[0] != 0 { 376 return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) 377 } 378 379 err := coreTypesMatch(local, target, nil) 380 if errors.Is(err, errIncompatibleTypes) { 381 return poison() 382 } 383 if err != nil { 384 return zero, err 385 } 386 387 return fixup(1, 1) 388 389 case reloTypeIDTarget, reloTypeSize, reloTypeExists: 390 if len(relo.accessor) > 1 || relo.accessor[0] != 0 { 391 return zero, fmt.Errorf("unexpected accessor %v", relo.accessor) 392 } 393 394 err := CheckTypeCompatibility(local, target) 395 if errors.Is(err, errIncompatibleTypes) { 396 return poison() 397 } 398 if err != nil { 399 return zero, err 400 } 401 402 switch relo.kind { 403 case reloTypeExists: 404 return fixup(1, 1) 405 406 case reloTypeIDTarget: 407 targetID, err := resolveTargetTypeID(target) 408 if errors.Is(err, ErrNotFound) { 409 // Probably a relocation trying to get the ID 410 // of a type from a kmod. 411 return poison() 412 } 413 if err != nil { 414 return zero, err 415 } 416 return fixup(uint64(relo.id), uint64(targetID)) 417 418 case reloTypeSize: 419 localSize, err := Sizeof(local) 420 if err != nil { 421 return zero, err 422 } 423 424 targetSize, err := Sizeof(target) 425 if err != nil { 426 return zero, err 427 } 428 429 return fixup(uint64(localSize), uint64(targetSize)) 430 } 431 432 case reloEnumvalValue, reloEnumvalExists: 433 localValue, targetValue, err := coreFindEnumValue(local, relo.accessor, target) 434 if errors.Is(err, errImpossibleRelocation) { 435 return poison() 436 } 437 if err != nil { 438 return zero, err 439 } 440 441 switch relo.kind { 442 case reloEnumvalExists: 443 return fixup(1, 1) 444 445 case reloEnumvalValue: 446 return fixup(localValue.Value, targetValue.Value) 447 } 448 449 case reloFieldByteOffset, reloFieldByteSize, reloFieldExists, reloFieldLShiftU64, reloFieldRShiftU64, reloFieldSigned: 450 if _, ok := As[*Fwd](target); ok { 451 // We can't relocate fields using a forward declaration, so 452 // skip it. If a non-forward declaration is present in the BTF 453 // we'll find it in one of the other iterations. 454 return poison() 455 } 456 457 localField, targetField, err := coreFindField(local, relo.accessor, target) 458 if errors.Is(err, errImpossibleRelocation) { 459 return poison() 460 } 461 if err != nil { 462 return zero, err 463 } 464 465 maybeSkipValidation := func(f COREFixup, err error) (COREFixup, error) { 466 f.skipLocalValidation = localField.bitfieldSize > 0 467 return f, err 468 } 469 470 switch relo.kind { 471 case reloFieldExists: 472 return fixup(1, 1) 473 474 case reloFieldByteOffset: 475 return maybeSkipValidation(fixup(uint64(localField.offset), uint64(targetField.offset))) 476 477 case reloFieldByteSize: 478 localSize, err := Sizeof(localField.Type) 479 if err != nil { 480 return zero, err 481 } 482 483 targetSize, err := Sizeof(targetField.Type) 484 if err != nil { 485 return zero, err 486 } 487 return maybeSkipValidation(fixup(uint64(localSize), uint64(targetSize))) 488 489 case reloFieldLShiftU64: 490 var target uint64 491 if bo == binary.LittleEndian { 492 targetSize, err := targetField.sizeBits() 493 if err != nil { 494 return zero, err 495 } 496 497 target = uint64(64 - targetField.bitfieldOffset - targetSize) 498 } else { 499 loadWidth, err := Sizeof(targetField.Type) 500 if err != nil { 501 return zero, err 502 } 503 504 target = uint64(64 - Bits(loadWidth*8) + targetField.bitfieldOffset) 505 } 506 return fixupWithoutValidation(0, target) 507 508 case reloFieldRShiftU64: 509 targetSize, err := targetField.sizeBits() 510 if err != nil { 511 return zero, err 512 } 513 514 return fixupWithoutValidation(0, uint64(64-targetSize)) 515 516 case reloFieldSigned: 517 switch local := UnderlyingType(localField.Type).(type) { 518 case *Enum: 519 target, ok := As[*Enum](targetField.Type) 520 if !ok { 521 return zero, fmt.Errorf("target isn't *Enum but %T", targetField.Type) 522 } 523 524 return fixup(boolToUint64(local.Signed), boolToUint64(target.Signed)) 525 case *Int: 526 target, ok := As[*Int](targetField.Type) 527 if !ok { 528 return zero, fmt.Errorf("target isn't *Int but %T", targetField.Type) 529 } 530 531 return fixup( 532 uint64(local.Encoding&Signed), 533 uint64(target.Encoding&Signed), 534 ) 535 default: 536 return zero, fmt.Errorf("type %T: %w", local, errNoSignedness) 537 } 538 } 539 } 540 541 return zero, ErrNotSupported 542 } 543 544 func boolToUint64(val bool) uint64 { 545 if val { 546 return 1 547 } 548 return 0 549 } 550 551 /* coreAccessor contains a path through a struct. It contains at least one index. 552 * 553 * The interpretation depends on the kind of the relocation. The following is 554 * taken from struct bpf_core_relo in libbpf_internal.h: 555 * 556 * - for field-based relocations, string encodes an accessed field using 557 * a sequence of field and array indices, separated by colon (:). It's 558 * conceptually very close to LLVM's getelementptr ([0]) instruction's 559 * arguments for identifying offset to a field. 560 * - for type-based relocations, strings is expected to be just "0"; 561 * - for enum value-based relocations, string contains an index of enum 562 * value within its enum type; 563 * 564 * Example to provide a better feel. 565 * 566 * struct sample { 567 * int a; 568 * struct { 569 * int b[10]; 570 * }; 571 * }; 572 * 573 * struct sample s = ...; 574 * int x = &s->a; // encoded as "0:0" (a is field #0) 575 * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1, 576 * // b is field #0 inside anon struct, accessing elem #5) 577 * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array) 578 */ 579 type coreAccessor []int 580 581 func parseCOREAccessor(accessor string) (coreAccessor, error) { 582 if accessor == "" { 583 return nil, fmt.Errorf("empty accessor") 584 } 585 586 parts := strings.Split(accessor, ":") 587 result := make(coreAccessor, 0, len(parts)) 588 for _, part := range parts { 589 // 31 bits to avoid overflowing int on 32 bit platforms. 590 index, err := strconv.ParseUint(part, 10, 31) 591 if err != nil { 592 return nil, fmt.Errorf("accessor index %q: %s", part, err) 593 } 594 595 result = append(result, int(index)) 596 } 597 598 return result, nil 599 } 600 601 func (ca coreAccessor) String() string { 602 strs := make([]string, 0, len(ca)) 603 for _, i := range ca { 604 strs = append(strs, strconv.Itoa(i)) 605 } 606 return strings.Join(strs, ":") 607 } 608 609 func (ca coreAccessor) enumValue(t Type) (*EnumValue, error) { 610 e, ok := As[*Enum](t) 611 if !ok { 612 return nil, fmt.Errorf("not an enum: %s", t) 613 } 614 615 if len(ca) > 1 { 616 return nil, fmt.Errorf("invalid accessor %s for enum", ca) 617 } 618 619 i := ca[0] 620 if i >= len(e.Values) { 621 return nil, fmt.Errorf("invalid index %d for %s", i, e) 622 } 623 624 return &e.Values[i], nil 625 } 626 627 // coreField represents the position of a "child" of a composite type from the 628 // start of that type. 629 // 630 // /- start of composite 631 // | offset * 8 | bitfieldOffset | bitfieldSize | ... | 632 // \- start of field end of field -/ 633 type coreField struct { 634 Type Type 635 636 // The position of the field from the start of the composite type in bytes. 637 offset uint32 638 639 // The offset of the bitfield in bits from the start of the field. 640 bitfieldOffset Bits 641 642 // The size of the bitfield in bits. 643 // 644 // Zero if the field is not a bitfield. 645 bitfieldSize Bits 646 } 647 648 func (cf *coreField) adjustOffsetToNthElement(n int) error { 649 if n == 0 { 650 return nil 651 } 652 653 size, err := Sizeof(cf.Type) 654 if err != nil { 655 return err 656 } 657 658 cf.offset += uint32(n) * uint32(size) 659 return nil 660 } 661 662 func (cf *coreField) adjustOffsetBits(offset Bits) error { 663 align, err := alignof(cf.Type) 664 if err != nil { 665 return err 666 } 667 668 // We can compute the load offset by: 669 // 1) converting the bit offset to bytes with a flooring division. 670 // 2) dividing and multiplying that offset by the alignment, yielding the 671 // load size aligned offset. 672 offsetBytes := uint32(offset/8) / uint32(align) * uint32(align) 673 674 // The number of bits remaining is the bit offset less the number of bits 675 // we can "skip" with the aligned offset. 676 cf.bitfieldOffset = offset - Bits(offsetBytes*8) 677 678 // We know that cf.offset is aligned at to at least align since we get it 679 // from the compiler via BTF. Adding an aligned offsetBytes preserves the 680 // alignment. 681 cf.offset += offsetBytes 682 return nil 683 } 684 685 func (cf *coreField) sizeBits() (Bits, error) { 686 if cf.bitfieldSize > 0 { 687 return cf.bitfieldSize, nil 688 } 689 690 // Someone is trying to access a non-bitfield via a bit shift relocation. 691 // This happens when a field changes from a bitfield to a regular field 692 // between kernel versions. Synthesise the size to make the shifts work. 693 size, err := Sizeof(cf.Type) 694 if err != nil { 695 return 0, err 696 } 697 return Bits(size * 8), nil 698 } 699 700 // coreFindField descends into the local type using the accessor and tries to 701 // find an equivalent field in target at each step. 702 // 703 // Returns the field and the offset of the field from the start of 704 // target in bits. 705 func coreFindField(localT Type, localAcc coreAccessor, targetT Type) (coreField, coreField, error) { 706 local := coreField{Type: localT} 707 target := coreField{Type: targetT} 708 709 if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { 710 return coreField{}, coreField{}, fmt.Errorf("fields: %w", err) 711 } 712 713 // The first index is used to offset a pointer of the base type like 714 // when accessing an array. 715 if err := local.adjustOffsetToNthElement(localAcc[0]); err != nil { 716 return coreField{}, coreField{}, err 717 } 718 719 if err := target.adjustOffsetToNthElement(localAcc[0]); err != nil { 720 return coreField{}, coreField{}, err 721 } 722 723 var localMaybeFlex, targetMaybeFlex bool 724 for i, acc := range localAcc[1:] { 725 switch localType := UnderlyingType(local.Type).(type) { 726 case composite: 727 // For composite types acc is used to find the field in the local type, 728 // and then we try to find a field in target with the same name. 729 localMembers := localType.members() 730 if acc >= len(localMembers) { 731 return coreField{}, coreField{}, fmt.Errorf("invalid accessor %d for %s", acc, localType) 732 } 733 734 localMember := localMembers[acc] 735 if localMember.Name == "" { 736 localMemberType, ok := As[composite](localMember.Type) 737 if !ok { 738 return coreField{}, coreField{}, fmt.Errorf("unnamed field with type %s: %s", localMember.Type, ErrNotSupported) 739 } 740 741 // This is an anonymous struct or union, ignore it. 742 local = coreField{ 743 Type: localMemberType, 744 offset: local.offset + localMember.Offset.Bytes(), 745 } 746 localMaybeFlex = false 747 continue 748 } 749 750 targetType, ok := As[composite](target.Type) 751 if !ok { 752 return coreField{}, coreField{}, fmt.Errorf("target not composite: %w", errImpossibleRelocation) 753 } 754 755 targetMember, last, err := coreFindMember(targetType, localMember.Name) 756 if err != nil { 757 return coreField{}, coreField{}, err 758 } 759 760 local = coreField{ 761 Type: localMember.Type, 762 offset: local.offset, 763 bitfieldSize: localMember.BitfieldSize, 764 } 765 localMaybeFlex = acc == len(localMembers)-1 766 767 target = coreField{ 768 Type: targetMember.Type, 769 offset: target.offset, 770 bitfieldSize: targetMember.BitfieldSize, 771 } 772 targetMaybeFlex = last 773 774 if local.bitfieldSize == 0 && target.bitfieldSize == 0 { 775 local.offset += localMember.Offset.Bytes() 776 target.offset += targetMember.Offset.Bytes() 777 break 778 } 779 780 // Either of the members is a bitfield. Make sure we're at the 781 // end of the accessor. 782 if next := i + 1; next < len(localAcc[1:]) { 783 return coreField{}, coreField{}, fmt.Errorf("can't descend into bitfield") 784 } 785 786 if err := local.adjustOffsetBits(localMember.Offset); err != nil { 787 return coreField{}, coreField{}, err 788 } 789 790 if err := target.adjustOffsetBits(targetMember.Offset); err != nil { 791 return coreField{}, coreField{}, err 792 } 793 794 case *Array: 795 // For arrays, acc is the index in the target. 796 targetType, ok := As[*Array](target.Type) 797 if !ok { 798 return coreField{}, coreField{}, fmt.Errorf("target not array: %w", errImpossibleRelocation) 799 } 800 801 if localType.Nelems == 0 && !localMaybeFlex { 802 return coreField{}, coreField{}, fmt.Errorf("local type has invalid flexible array") 803 } 804 if targetType.Nelems == 0 && !targetMaybeFlex { 805 return coreField{}, coreField{}, fmt.Errorf("target type has invalid flexible array") 806 } 807 808 if localType.Nelems > 0 && acc >= int(localType.Nelems) { 809 return coreField{}, coreField{}, fmt.Errorf("invalid access of %s at index %d", localType, acc) 810 } 811 if targetType.Nelems > 0 && acc >= int(targetType.Nelems) { 812 return coreField{}, coreField{}, fmt.Errorf("out of bounds access of target: %w", errImpossibleRelocation) 813 } 814 815 local = coreField{ 816 Type: localType.Type, 817 offset: local.offset, 818 } 819 localMaybeFlex = false 820 821 if err := local.adjustOffsetToNthElement(acc); err != nil { 822 return coreField{}, coreField{}, err 823 } 824 825 target = coreField{ 826 Type: targetType.Type, 827 offset: target.offset, 828 } 829 targetMaybeFlex = false 830 831 if err := target.adjustOffsetToNthElement(acc); err != nil { 832 return coreField{}, coreField{}, err 833 } 834 835 default: 836 return coreField{}, coreField{}, fmt.Errorf("relocate field of %T: %w", localType, ErrNotSupported) 837 } 838 839 if err := coreAreMembersCompatible(local.Type, target.Type); err != nil { 840 return coreField{}, coreField{}, err 841 } 842 } 843 844 return local, target, nil 845 } 846 847 // coreFindMember finds a member in a composite type while handling anonymous 848 // structs and unions. 849 func coreFindMember(typ composite, name string) (Member, bool, error) { 850 if name == "" { 851 return Member{}, false, errors.New("can't search for anonymous member") 852 } 853 854 type offsetTarget struct { 855 composite 856 offset Bits 857 } 858 859 targets := []offsetTarget{{typ, 0}} 860 visited := make(map[composite]bool) 861 862 for i := 0; i < len(targets); i++ { 863 target := targets[i] 864 865 // Only visit targets once to prevent infinite recursion. 866 if visited[target] { 867 continue 868 } 869 if len(visited) >= maxResolveDepth { 870 // This check is different than libbpf, which restricts the entire 871 // path to BPF_CORE_SPEC_MAX_LEN items. 872 return Member{}, false, fmt.Errorf("type is nested too deep") 873 } 874 visited[target] = true 875 876 members := target.members() 877 for j, member := range members { 878 if member.Name == name { 879 // NB: This is safe because member is a copy. 880 member.Offset += target.offset 881 return member, j == len(members)-1, nil 882 } 883 884 // The names don't match, but this member could be an anonymous struct 885 // or union. 886 if member.Name != "" { 887 continue 888 } 889 890 comp, ok := As[composite](member.Type) 891 if !ok { 892 return Member{}, false, fmt.Errorf("anonymous non-composite type %T not allowed", member.Type) 893 } 894 895 targets = append(targets, offsetTarget{comp, target.offset + member.Offset}) 896 } 897 } 898 899 return Member{}, false, fmt.Errorf("no matching member: %w", errImpossibleRelocation) 900 } 901 902 // coreFindEnumValue follows localAcc to find the equivalent enum value in target. 903 func coreFindEnumValue(local Type, localAcc coreAccessor, target Type) (localValue, targetValue *EnumValue, _ error) { 904 localValue, err := localAcc.enumValue(local) 905 if err != nil { 906 return nil, nil, err 907 } 908 909 targetEnum, ok := As[*Enum](target) 910 if !ok { 911 return nil, nil, errImpossibleRelocation 912 } 913 914 localName := newEssentialName(localValue.Name) 915 for i, targetValue := range targetEnum.Values { 916 if newEssentialName(targetValue.Name) != localName { 917 continue 918 } 919 920 return localValue, &targetEnum.Values[i], nil 921 } 922 923 return nil, nil, errImpossibleRelocation 924 } 925 926 // CheckTypeCompatibility checks local and target types for Compatibility according to CO-RE rules. 927 // 928 // Only layout compatibility is checked, ignoring names of the root type. 929 func CheckTypeCompatibility(localType Type, targetType Type) error { 930 return coreAreTypesCompatible(localType, targetType, nil) 931 } 932 933 type pair struct { 934 A, B Type 935 } 936 937 /* The comment below is from bpf_core_types_are_compat in libbpf.c: 938 * 939 * Check local and target types for compatibility. This check is used for 940 * type-based CO-RE relocations and follow slightly different rules than 941 * field-based relocations. This function assumes that root types were already 942 * checked for name match. Beyond that initial root-level name check, names 943 * are completely ignored. Compatibility rules are as follows: 944 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but 945 * kind should match for local and target types (i.e., STRUCT is not 946 * compatible with UNION); 947 * - for ENUMs, the size is ignored; 948 * - for INT, size and signedness are ignored; 949 * - for ARRAY, dimensionality is ignored, element types are checked for 950 * compatibility recursively; 951 * - CONST/VOLATILE/RESTRICT modifiers are ignored; 952 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; 953 * - FUNC_PROTOs are compatible if they have compatible signature: same 954 * number of input args and compatible return and argument types. 955 * These rules are not set in stone and probably will be adjusted as we get 956 * more experience with using BPF CO-RE relocations. 957 * 958 * Returns errIncompatibleTypes if types are not compatible. 959 */ 960 func coreAreTypesCompatible(localType Type, targetType Type, visited map[pair]struct{}) error { 961 localType = UnderlyingType(localType) 962 targetType = UnderlyingType(targetType) 963 964 if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { 965 return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) 966 } 967 968 if _, ok := visited[pair{localType, targetType}]; ok { 969 return nil 970 } 971 if visited == nil { 972 visited = make(map[pair]struct{}) 973 } 974 visited[pair{localType, targetType}] = struct{}{} 975 976 switch lv := localType.(type) { 977 case *Void, *Struct, *Union, *Enum, *Fwd, *Int: 978 return nil 979 980 case *Pointer: 981 tv := targetType.(*Pointer) 982 return coreAreTypesCompatible(lv.Target, tv.Target, visited) 983 984 case *Array: 985 tv := targetType.(*Array) 986 if err := coreAreTypesCompatible(lv.Index, tv.Index, visited); err != nil { 987 return err 988 } 989 990 return coreAreTypesCompatible(lv.Type, tv.Type, visited) 991 992 case *FuncProto: 993 tv := targetType.(*FuncProto) 994 if err := coreAreTypesCompatible(lv.Return, tv.Return, visited); err != nil { 995 return err 996 } 997 998 if len(lv.Params) != len(tv.Params) { 999 return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) 1000 } 1001 1002 for i, localParam := range lv.Params { 1003 targetParam := tv.Params[i] 1004 if err := coreAreTypesCompatible(localParam.Type, targetParam.Type, visited); err != nil { 1005 return err 1006 } 1007 } 1008 1009 return nil 1010 1011 default: 1012 return fmt.Errorf("unsupported type %T", localType) 1013 } 1014 } 1015 1016 /* coreAreMembersCompatible checks two types for field-based relocation compatibility. 1017 * 1018 * The comment below is from bpf_core_fields_are_compat in libbpf.c: 1019 * 1020 * Check two types for compatibility for the purpose of field access 1021 * relocation. const/volatile/restrict and typedefs are skipped to ensure we 1022 * are relocating semantically compatible entities: 1023 * - any two STRUCTs/UNIONs are compatible and can be mixed; 1024 * - any two FWDs are compatible, if their names match (modulo flavor suffix); 1025 * - any two PTRs are always compatible; 1026 * - for ENUMs, names should be the same (ignoring flavor suffix) or at 1027 * least one of enums should be anonymous; 1028 * - for ENUMs, check sizes, names are ignored; 1029 * - for INT, size and signedness are ignored; 1030 * - any two FLOATs are always compatible; 1031 * - for ARRAY, dimensionality is ignored, element types are checked for 1032 * compatibility recursively; 1033 * [ NB: coreAreMembersCompatible doesn't recurse, this check is done 1034 * by coreFindField. ] 1035 * - everything else shouldn't be ever a target of relocation. 1036 * These rules are not set in stone and probably will be adjusted as we get 1037 * more experience with using BPF CO-RE relocations. 1038 * 1039 * Returns errImpossibleRelocation if the members are not compatible. 1040 */ 1041 func coreAreMembersCompatible(localType Type, targetType Type) error { 1042 localType = UnderlyingType(localType) 1043 targetType = UnderlyingType(targetType) 1044 1045 _, lok := localType.(composite) 1046 _, tok := targetType.(composite) 1047 if lok && tok { 1048 return nil 1049 } 1050 1051 if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { 1052 return fmt.Errorf("type mismatch: %w", errImpossibleRelocation) 1053 } 1054 1055 switch lv := localType.(type) { 1056 case *Array, *Pointer, *Float, *Int: 1057 return nil 1058 1059 case *Enum: 1060 tv := targetType.(*Enum) 1061 if !coreEssentialNamesMatch(lv.Name, tv.Name) { 1062 return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) 1063 } 1064 1065 return nil 1066 1067 case *Fwd: 1068 tv := targetType.(*Fwd) 1069 if !coreEssentialNamesMatch(lv.Name, tv.Name) { 1070 return fmt.Errorf("names %q and %q don't match: %w", lv.Name, tv.Name, errImpossibleRelocation) 1071 } 1072 1073 return nil 1074 1075 default: 1076 return fmt.Errorf("type %s: %w", localType, ErrNotSupported) 1077 } 1078 } 1079 1080 // coreEssentialNamesMatch compares two names while ignoring their flavour suffix. 1081 // 1082 // This should only be used on names which are in the global scope, like struct 1083 // names, typedefs or enum values. 1084 func coreEssentialNamesMatch(a, b string) bool { 1085 if a == "" || b == "" { 1086 // allow anonymous and named type to match 1087 return true 1088 } 1089 1090 return newEssentialName(a) == newEssentialName(b) 1091 } 1092 1093 /* The comment below is from __bpf_core_types_match in relo_core.c: 1094 * 1095 * Check that two types "match". This function assumes that root types were 1096 * already checked for name match. 1097 * 1098 * The matching relation is defined as follows: 1099 * - modifiers and typedefs are stripped (and, hence, effectively ignored) 1100 * - generally speaking types need to be of same kind (struct vs. struct, union 1101 * vs. union, etc.) 1102 * - exceptions are struct/union behind a pointer which could also match a 1103 * forward declaration of a struct or union, respectively, and enum vs. 1104 * enum64 (see below) 1105 * Then, depending on type: 1106 * - integers: 1107 * - match if size and signedness match 1108 * - arrays & pointers: 1109 * - target types are recursively matched 1110 * - structs & unions: 1111 * - local members need to exist in target with the same name 1112 * - for each member we recursively check match unless it is already behind a 1113 * pointer, in which case we only check matching names and compatible kind 1114 * - enums: 1115 * - local variants have to have a match in target by symbolic name (but not 1116 * numeric value) 1117 * - size has to match (but enum may match enum64 and vice versa) 1118 * - function pointers: 1119 * - number and position of arguments in local type has to match target 1120 * - for each argument and the return value we recursively check match 1121 */ 1122 func coreTypesMatch(localType Type, targetType Type, visited map[pair]struct{}) error { 1123 localType = UnderlyingType(localType) 1124 targetType = UnderlyingType(targetType) 1125 1126 if !coreEssentialNamesMatch(localType.TypeName(), targetType.TypeName()) { 1127 return fmt.Errorf("type name %q don't match %q: %w", localType.TypeName(), targetType.TypeName(), errIncompatibleTypes) 1128 } 1129 1130 if reflect.TypeOf(localType) != reflect.TypeOf(targetType) { 1131 return fmt.Errorf("type mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) 1132 } 1133 1134 if _, ok := visited[pair{localType, targetType}]; ok { 1135 return nil 1136 } 1137 if visited == nil { 1138 visited = make(map[pair]struct{}) 1139 } 1140 visited[pair{localType, targetType}] = struct{}{} 1141 1142 switch lv := (localType).(type) { 1143 case *Void: 1144 1145 case *Fwd: 1146 if targetType.(*Fwd).Kind != lv.Kind { 1147 return fmt.Errorf("fwd kind mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) 1148 } 1149 1150 case *Enum: 1151 return coreEnumsMatch(lv, targetType.(*Enum)) 1152 1153 case composite: 1154 tv := targetType.(composite) 1155 1156 if len(lv.members()) > len(tv.members()) { 1157 return errIncompatibleTypes 1158 } 1159 1160 localMembers := lv.members() 1161 targetMembers := map[string]Member{} 1162 for _, member := range tv.members() { 1163 targetMembers[member.Name] = member 1164 } 1165 1166 for _, localMember := range localMembers { 1167 targetMember, found := targetMembers[localMember.Name] 1168 if !found { 1169 return fmt.Errorf("no field %q in %v: %w", localMember.Name, targetType, errIncompatibleTypes) 1170 } 1171 1172 err := coreTypesMatch(localMember.Type, targetMember.Type, visited) 1173 if err != nil { 1174 return err 1175 } 1176 } 1177 1178 case *Int: 1179 if !coreEncodingMatches(lv, targetType.(*Int)) { 1180 return fmt.Errorf("int mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) 1181 } 1182 1183 case *Pointer: 1184 tv := targetType.(*Pointer) 1185 1186 // Allow a pointer to a forward declaration to match a struct 1187 // or union. 1188 if fwd, ok := As[*Fwd](lv.Target); ok && fwd.matches(tv.Target) { 1189 return nil 1190 } 1191 1192 if fwd, ok := As[*Fwd](tv.Target); ok && fwd.matches(lv.Target) { 1193 return nil 1194 } 1195 1196 return coreTypesMatch(lv.Target, tv.Target, visited) 1197 1198 case *Array: 1199 tv := targetType.(*Array) 1200 1201 if lv.Nelems != tv.Nelems { 1202 return fmt.Errorf("array mismatch between %v and %v: %w", localType, targetType, errIncompatibleTypes) 1203 } 1204 1205 return coreTypesMatch(lv.Type, tv.Type, visited) 1206 1207 case *FuncProto: 1208 tv := targetType.(*FuncProto) 1209 1210 if len(lv.Params) != len(tv.Params) { 1211 return fmt.Errorf("function param mismatch: %w", errIncompatibleTypes) 1212 } 1213 1214 for i, lparam := range lv.Params { 1215 if err := coreTypesMatch(lparam.Type, tv.Params[i].Type, visited); err != nil { 1216 return err 1217 } 1218 } 1219 1220 return coreTypesMatch(lv.Return, tv.Return, visited) 1221 1222 default: 1223 return fmt.Errorf("unsupported type %T", localType) 1224 } 1225 1226 return nil 1227 } 1228 1229 // coreEncodingMatches returns true if both ints have the same size and signedness. 1230 // All encodings other than `Signed` are considered unsigned. 1231 func coreEncodingMatches(local, target *Int) bool { 1232 return local.Size == target.Size && (local.Encoding == Signed) == (target.Encoding == Signed) 1233 } 1234 1235 // coreEnumsMatch checks two enums match, which is considered to be the case if the following is true: 1236 // - size has to match (but enum may match enum64 and vice versa) 1237 // - local variants have to have a match in target by symbolic name (but not numeric value) 1238 func coreEnumsMatch(local *Enum, target *Enum) error { 1239 if local.Size != target.Size { 1240 return fmt.Errorf("size mismatch between %v and %v: %w", local, target, errIncompatibleTypes) 1241 } 1242 1243 // If there are more values in the local than the target, there must be at least one value in the local 1244 // that isn't in the target, and therefor the types are incompatible. 1245 if len(local.Values) > len(target.Values) { 1246 return fmt.Errorf("local has more values than target: %w", errIncompatibleTypes) 1247 } 1248 1249 outer: 1250 for _, lv := range local.Values { 1251 for _, rv := range target.Values { 1252 if coreEssentialNamesMatch(lv.Name, rv.Name) { 1253 continue outer 1254 } 1255 } 1256 1257 return fmt.Errorf("no match for %v in %v: %w", lv, target, errIncompatibleTypes) 1258 } 1259 1260 return nil 1261 }