gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/tools/go_marshal/test/marshal_test.go (about) 1 // Copyright 2020 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // Package marshal_test contains manual tests for the marshal interface. These 16 // are intended to test behaviour not covered by the automatically generated 17 // tests. 18 package marshal_test 19 20 import ( 21 "bytes" 22 "encoding/binary" 23 "fmt" 24 "reflect" 25 "runtime" 26 "testing" 27 "unsafe" 28 29 "github.com/google/go-cmp/cmp" 30 "gvisor.dev/gvisor/pkg/errors/linuxerr" 31 "gvisor.dev/gvisor/pkg/hostarch" 32 "gvisor.dev/gvisor/pkg/marshal" 33 "gvisor.dev/gvisor/pkg/marshal/primitive" 34 "gvisor.dev/gvisor/pkg/usermem" 35 "gvisor.dev/gvisor/tools/go_marshal/analysis" 36 "gvisor.dev/gvisor/tools/go_marshal/test" 37 ) 38 39 var simulatedErr error = linuxerr.EFAULT 40 41 // mockCopyContext implements marshal.CopyContext. 42 type mockCopyContext struct { 43 taskMem usermem.BytesIO 44 } 45 46 // populate fills the task memory with the contents of val. 47 func (t *mockCopyContext) populate(val any) { 48 var buf bytes.Buffer 49 // Use binary.Write so we aren't testing go-marshal against its own 50 // potentially buggy implementation. 51 if err := binary.Write(&buf, hostarch.ByteOrder, val); err != nil { 52 panic(err) 53 } 54 t.taskMem.Bytes = buf.Bytes() 55 } 56 57 func (t *mockCopyContext) setLimit(n int) { 58 if len(t.taskMem.Bytes) < n { 59 grown := make([]byte, n) 60 copy(grown, t.taskMem.Bytes) 61 t.taskMem.Bytes = grown 62 return 63 } 64 t.taskMem.Bytes = t.taskMem.Bytes[:n] 65 } 66 67 // CopyScratchBuffer implements marshal.CopyContext.CopyScratchBuffer. 68 func (t *mockCopyContext) CopyScratchBuffer(size int) []byte { 69 return make([]byte, size) 70 } 71 72 // CopyOutBytes implements marshal.CopyContext.CopyOutBytes. The implementation 73 // completely ignores the target address and stores a copy of b in its 74 // internally buffer, overriding any previous contents. 75 func (t *mockCopyContext) CopyOutBytes(_ hostarch.Addr, b []byte) (int, error) { 76 return t.taskMem.CopyOut(nil, 0, b, usermem.IOOpts{}) 77 } 78 79 // CopyInBytes implements marshal.CopyContext.CopyInBytes. The implementation 80 // completely ignores the source address and always fills b from the begining of 81 // its internal buffer. 82 func (t *mockCopyContext) CopyInBytes(_ hostarch.Addr, b []byte) (int, error) { 83 return t.taskMem.CopyIn(nil, 0, b, usermem.IOOpts{}) 84 } 85 86 // unsafeMemory returns the underlying memory for m. The returned slice is only 87 // valid for the lifetime for m. The garbage collector isn't aware that the 88 // returned slice is related to m, the caller must ensure m lives long enough. 89 func unsafeMemory(m marshal.Marshallable) []byte { 90 if !m.Packed() { 91 // We can't return a slice pointing to the underlying memory 92 // since the layout isn't packed. Allocate a temporary buffer 93 // and marshal instead. 94 var buf bytes.Buffer 95 if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil { 96 panic(err) 97 } 98 return buf.Bytes() 99 } 100 101 // reflect.ValueOf(m) 102 // .Elem() // Unwrap interface to inner concrete object 103 // .Addr() // Pointer value to object 104 // .Pointer() // Actual address from the pointer value 105 ptr := reflect.ValueOf(m).Elem().Addr().Pointer() 106 107 size := m.SizeBytes() 108 109 var mem []byte 110 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&mem)) 111 hdr.Data = ptr 112 hdr.Len = size 113 hdr.Cap = size 114 115 return mem 116 } 117 118 // unsafeMemorySlice returns the underlying memory for m. The returned slice is 119 // only valid for the lifetime for m. The garbage collector isn't aware that the 120 // returned slice is related to m, the caller must ensure m lives long enough. 121 // 122 // Precondition: m must be a slice. 123 func unsafeMemorySlice(m any, elt marshal.Marshallable) []byte { 124 kind := reflect.TypeOf(m).Kind() 125 if kind != reflect.Slice { 126 panic("unsafeMemorySlice called on non-slice") 127 } 128 129 if !elt.Packed() { 130 // We can't return a slice pointing to the underlying memory 131 // since the layout isn't packed. Allocate a temporary buffer 132 // and marshal instead. 133 var buf bytes.Buffer 134 if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil { 135 panic(err) 136 } 137 return buf.Bytes() 138 } 139 140 v := reflect.ValueOf(m) 141 length := v.Len() * elt.SizeBytes() 142 143 var mem []byte 144 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&mem)) 145 hdr.Data = v.Pointer() // This is a pointer to the first elem for slices. 146 hdr.Len = length 147 hdr.Cap = length 148 149 return mem 150 } 151 152 func isZeroes(buf []byte) bool { 153 for _, b := range buf { 154 if b != 0 { 155 return false 156 } 157 } 158 return true 159 } 160 161 // compareMemory compares the first n bytes of two chuncks of memory represented 162 // by expected and actual. 163 func compareMemory(t *testing.T, expected, actual []byte, n int) { 164 t.Logf("Expected (%d): %v (%d) + (%d) %v\n", len(expected), expected[:n], n, len(expected)-n, expected[n:]) 165 t.Logf("Actual (%d): %v (%d) + (%d) %v\n", len(actual), actual[:n], n, len(actual)-n, actual[n:]) 166 167 if diff := cmp.Diff(expected[:n], actual[:n]); diff != "" { 168 t.Errorf("Memory buffers don't match:\n--- expected only\n+++ actual only\n%v", diff) 169 } 170 } 171 172 // limitedCopyIn populates task memory with src, then unmarshals task memory to 173 // dst. The task signals an error at limit bytes during copy-in, which should 174 // result in a truncated unmarshalling. 175 func limitedCopyIn(t *testing.T, src, dst marshal.Marshallable, limit int) { 176 var cc mockCopyContext 177 cc.populate(src) 178 cc.setLimit(limit) 179 180 n, err := dst.CopyIn(&cc, hostarch.Addr(0)) 181 if n != limit { 182 t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n) 183 } 184 if err != simulatedErr { 185 t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err) 186 } 187 188 expectedMem := unsafeMemory(src) 189 defer runtime.KeepAlive(src) 190 actualMem := unsafeMemory(dst) 191 defer runtime.KeepAlive(dst) 192 193 compareMemory(t, expectedMem, actualMem, n) 194 195 // The last n bytes should be zero for actual, since actual was 196 // zero-initialized, and CopyIn shouldn't have touched those bytes. However 197 // we can only guarantee we didn't touch anything in the last n bytes if the 198 // layout is packed. 199 if dst.Packed() && !isZeroes(actualMem[n:]) { 200 t.Errorf("Expected the last %d bytes of copied in object to be zeroes, got %v\n", dst.SizeBytes()-n, actualMem) 201 } 202 } 203 204 // limitedCopyOut marshals src to task memory. The task signals an error at 205 // limit bytes during copy-out, which should result in a truncated marshalling. 206 func limitedCopyOut(t *testing.T, src marshal.Marshallable, limit int) { 207 var cc mockCopyContext 208 cc.setLimit(limit) 209 210 n, err := src.CopyOut(&cc, hostarch.Addr(0)) 211 if n != limit { 212 t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n) 213 } 214 if err != simulatedErr { 215 t.Errorf("CopyOut returned unexpected error, expected %v, got %v", simulatedErr, err) 216 } 217 218 expectedMem := unsafeMemory(src) 219 defer runtime.KeepAlive(src) 220 actualMem := cc.taskMem.Bytes 221 222 compareMemory(t, expectedMem, actualMem, n) 223 } 224 225 // copyInN marshals src to task memory, requesting the marshalling to be 226 // limited to limit bytes. 227 func copyInN(t *testing.T, src, dst marshal.Marshallable, limit int) { 228 var cc mockCopyContext 229 cc.populate(src) 230 cc.setLimit(limit) 231 232 n, err := dst.CopyInN(&cc, hostarch.Addr(0), limit) 233 if err != nil { 234 t.Errorf("CopyInN returned unexpected error: %v", err) 235 } 236 if n != limit { 237 t.Errorf("CopyInN copied unexpected number of bytes, expected %d, got %d", limit, n) 238 } 239 240 expectedMem := unsafeMemory(src) 241 defer runtime.KeepAlive(src) 242 actualMem := unsafeMemory(dst) 243 defer runtime.KeepAlive(dst) 244 245 t.Logf("Expected: %v + %v\n", expectedMem[:n], expectedMem[n:]) 246 t.Logf("Actual : %v + %v\n", actualMem[:n], actualMem[n:]) 247 248 compareMemory(t, expectedMem, actualMem, n) 249 } 250 251 // copyOutN marshals src to task memory, requesting the marshalling to be 252 // limited to limit bytes. 253 func copyOutN(t *testing.T, src marshal.Marshallable, limit int) { 254 var cc mockCopyContext 255 cc.setLimit(limit) 256 257 n, err := src.CopyOutN(&cc, hostarch.Addr(0), limit) 258 if err != nil { 259 t.Errorf("CopyOut returned unexpected error: %v", err) 260 } 261 if n != limit { 262 t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n) 263 } 264 265 expectedMem := unsafeMemory(src) 266 defer runtime.KeepAlive(src) 267 actualMem := cc.taskMem.Bytes 268 269 t.Logf("Expected: %v + %v\n", expectedMem[:n], expectedMem[n:]) 270 t.Logf("Actual : %v + %v\n", actualMem[:n], actualMem[n:]) 271 272 compareMemory(t, expectedMem, actualMem, n) 273 } 274 275 // TestLimitedMarshalling verifies marshalling/unmarshalling succeeds when the 276 // underyling copy in/out operations partially succeed. 277 func TestLimitedMarshalling(t *testing.T) { 278 types := []reflect.Type{ 279 // Packed types. 280 reflect.TypeOf((*test.Type2)(nil)), 281 reflect.TypeOf((*test.Type3)(nil)), 282 reflect.TypeOf((*test.Timespec)(nil)), 283 reflect.TypeOf((*test.Stat)(nil)), 284 reflect.TypeOf((*test.InetAddr)(nil)), 285 reflect.TypeOf((*test.SignalSet)(nil)), 286 reflect.TypeOf((*test.SignalSetAlias)(nil)), 287 // Non-packed types. 288 reflect.TypeOf((*test.Type1)(nil)), 289 reflect.TypeOf((*test.Type4)(nil)), 290 reflect.TypeOf((*test.Type5)(nil)), 291 reflect.TypeOf((*test.Type6)(nil)), 292 reflect.TypeOf((*test.Type7)(nil)), 293 reflect.TypeOf((*test.Type8)(nil)), 294 } 295 296 for _, tyPtr := range types { 297 // Remove one level of pointer-indirection from the type. We get this 298 // back when we pass the type to reflect.New. 299 ty := tyPtr.Elem() 300 301 // Partial copy-in. 302 t.Run(fmt.Sprintf("PartialCopyIn_%v", ty), func(t *testing.T) { 303 expected := reflect.New(ty).Interface().(marshal.Marshallable) 304 actual := reflect.New(ty).Interface().(marshal.Marshallable) 305 analysis.RandomizeValue(expected) 306 307 limitedCopyIn(t, expected, actual, expected.SizeBytes()/2) 308 }) 309 310 // Partial copy-out. 311 t.Run(fmt.Sprintf("PartialCopyOut_%v", ty), func(t *testing.T) { 312 expected := reflect.New(ty).Interface().(marshal.Marshallable) 313 analysis.RandomizeValue(expected) 314 315 limitedCopyOut(t, expected, expected.SizeBytes()/2) 316 }) 317 318 // Explicitly request partial copy-out. 319 t.Run(fmt.Sprintf("PartialCopyOutN_%v", ty), func(t *testing.T) { 320 expected := reflect.New(ty).Interface().(marshal.Marshallable) 321 analysis.RandomizeValue(expected) 322 323 copyOutN(t, expected, expected.SizeBytes()/2) 324 }) 325 326 // Explicitly request partial copy-in. 327 t.Run(fmt.Sprintf("PartialCopyInN_%v", ty), func(t *testing.T) { 328 expected := reflect.New(ty).Interface().(marshal.Marshallable) 329 analysis.RandomizeValue(expected) 330 331 copyInN(t, expected, expected, expected.SizeBytes()/2) 332 }) 333 } 334 } 335 336 // TestLimitedMarshalling verifies marshalling/unmarshalling of slices of 337 // marshallable types succeed when the underyling copy in/out operations 338 // partially succeed. 339 func TestLimitedSliceMarshalling(t *testing.T) { 340 types := []struct { 341 arrayPtrType reflect.Type 342 copySliceIn func(cc marshal.CopyContext, addr hostarch.Addr, dstSlice any) (int, error) 343 copySliceOut func(cc marshal.CopyContext, addr hostarch.Addr, srcSlice any) (int, error) 344 unsafeMemory func(arrPtr any) []byte 345 }{ 346 // Packed types. 347 { 348 reflect.TypeOf((*[20]test.Stat)(nil)), 349 func(cc marshal.CopyContext, addr hostarch.Addr, dst any) (int, error) { 350 slice := dst.(*[20]test.Stat)[:] 351 return test.CopyStatSliceIn(cc, addr, slice) 352 }, 353 func(cc marshal.CopyContext, addr hostarch.Addr, src any) (int, error) { 354 slice := src.(*[20]test.Stat)[:] 355 return test.CopyStatSliceOut(cc, addr, slice) 356 }, 357 func(a any) []byte { 358 slice := a.(*[20]test.Stat)[:] 359 return unsafeMemorySlice(slice, &slice[0]) 360 }, 361 }, 362 { 363 reflect.TypeOf((*[1]test.Stat)(nil)), 364 func(cc marshal.CopyContext, addr hostarch.Addr, dst any) (int, error) { 365 slice := dst.(*[1]test.Stat)[:] 366 return test.CopyStatSliceIn(cc, addr, slice) 367 }, 368 func(cc marshal.CopyContext, addr hostarch.Addr, src any) (int, error) { 369 slice := src.(*[1]test.Stat)[:] 370 return test.CopyStatSliceOut(cc, addr, slice) 371 }, 372 func(a any) []byte { 373 slice := a.(*[1]test.Stat)[:] 374 return unsafeMemorySlice(slice, &slice[0]) 375 }, 376 }, 377 { 378 reflect.TypeOf((*[5]test.SignalSetAlias)(nil)), 379 func(cc marshal.CopyContext, addr hostarch.Addr, dst any) (int, error) { 380 slice := dst.(*[5]test.SignalSetAlias)[:] 381 return test.CopySignalSetAliasSliceIn(cc, addr, slice) 382 }, 383 func(cc marshal.CopyContext, addr hostarch.Addr, src any) (int, error) { 384 slice := src.(*[5]test.SignalSetAlias)[:] 385 return test.CopySignalSetAliasSliceOut(cc, addr, slice) 386 }, 387 func(a any) []byte { 388 slice := a.(*[5]test.SignalSetAlias)[:] 389 return unsafeMemorySlice(slice, &slice[0]) 390 }, 391 }, 392 // Non-packed types. 393 { 394 reflect.TypeOf((*[20]test.Type1)(nil)), 395 func(cc marshal.CopyContext, addr hostarch.Addr, dst any) (int, error) { 396 slice := dst.(*[20]test.Type1)[:] 397 return test.CopyType1SliceIn(cc, addr, slice) 398 }, 399 func(cc marshal.CopyContext, addr hostarch.Addr, src any) (int, error) { 400 slice := src.(*[20]test.Type1)[:] 401 return test.CopyType1SliceOut(cc, addr, slice) 402 }, 403 func(a any) []byte { 404 slice := a.(*[20]test.Type1)[:] 405 return unsafeMemorySlice(slice, &slice[0]) 406 }, 407 }, 408 { 409 reflect.TypeOf((*[1]test.Type1)(nil)), 410 func(cc marshal.CopyContext, addr hostarch.Addr, dst any) (int, error) { 411 slice := dst.(*[1]test.Type1)[:] 412 return test.CopyType1SliceIn(cc, addr, slice) 413 }, 414 func(cc marshal.CopyContext, addr hostarch.Addr, src any) (int, error) { 415 slice := src.(*[1]test.Type1)[:] 416 return test.CopyType1SliceOut(cc, addr, slice) 417 }, 418 func(a any) []byte { 419 slice := a.(*[1]test.Type1)[:] 420 return unsafeMemorySlice(slice, &slice[0]) 421 }, 422 }, 423 { 424 reflect.TypeOf((*[7]test.Type8)(nil)), 425 func(cc marshal.CopyContext, addr hostarch.Addr, dst any) (int, error) { 426 slice := dst.(*[7]test.Type8)[:] 427 return test.CopyType8SliceIn(cc, addr, slice) 428 }, 429 func(cc marshal.CopyContext, addr hostarch.Addr, src any) (int, error) { 430 slice := src.(*[7]test.Type8)[:] 431 return test.CopyType8SliceOut(cc, addr, slice) 432 }, 433 func(a any) []byte { 434 slice := a.(*[7]test.Type8)[:] 435 return unsafeMemorySlice(slice, &slice[0]) 436 }, 437 }, 438 } 439 440 for _, tt := range types { 441 // The body of this loop is generic over the type tt.arrayPtrType, with 442 // the help of reflection. To aid in readability, comments below show 443 // the equivalent go code assuming 444 // tt.arrayPtrType = typeof(*[20]test.Stat). 445 446 // Equivalent: 447 // var x *[20]test.Stat 448 // arrayTy := reflect.TypeOf(*x) 449 arrayTy := tt.arrayPtrType.Elem() 450 451 // Partial copy-in of slices. 452 t.Run(fmt.Sprintf("PartialCopySliceIn_%v", arrayTy), func(t *testing.T) { 453 // Equivalent: 454 // var x [20]test.Stat 455 // length := len(x) 456 length := arrayTy.Len() 457 if length < 1 { 458 panic("Test type can't be zero-length array") 459 } 460 // Equivalent: 461 // elem := new(test.Stat).(marshal.Marshallable) 462 elem := reflect.New(arrayTy.Elem()).Interface().(marshal.Marshallable) 463 464 // Equivalent: 465 // var expected, actual any 466 // expected = new([20]test.Stat) 467 // actual = new([20]test.Stat) 468 expected := reflect.New(arrayTy).Interface() 469 actual := reflect.New(arrayTy).Interface() 470 471 analysis.RandomizeValue(expected) 472 473 limit := (length * elem.SizeBytes()) / 2 474 // Also make sure the limit is partially inside one of the elements. 475 limit += elem.SizeBytes() / 2 476 analysis.RandomizeValue(expected) 477 478 var cc mockCopyContext 479 cc.populate(expected) 480 cc.setLimit(limit) 481 482 n, err := tt.copySliceIn(&cc, hostarch.Addr(0), actual) 483 if n != limit { 484 t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n) 485 } 486 if n < length*elem.SizeBytes() && err != simulatedErr { 487 t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err) 488 } 489 490 expectedMem := tt.unsafeMemory(expected) 491 defer runtime.KeepAlive(expected) 492 actualMem := tt.unsafeMemory(actual) 493 defer runtime.KeepAlive(actual) 494 495 compareMemory(t, expectedMem, actualMem, n) 496 497 // The last n bytes should be zero for actual, since actual was 498 // zero-initialized, and CopyIn shouldn't have touched those bytes. However 499 // we can only guarantee we didn't touch anything in the last n bytes if the 500 // layout is packed. 501 if elem.Packed() && !isZeroes(actualMem[n:]) { 502 t.Errorf("Expected the last %d bytes of copied in object to be zeroes, got %v\n", (elem.SizeBytes()*length)-n, actualMem) 503 } 504 }) 505 506 // Partial copy-out of slices. 507 t.Run(fmt.Sprintf("PartialCopySliceOut_%v", arrayTy), func(t *testing.T) { 508 // Equivalent: 509 // var x [20]test.Stat 510 // length := len(x) 511 length := arrayTy.Len() 512 if length < 1 { 513 panic("Test type can't be zero-length array") 514 } 515 // Equivalent: 516 // elem := new(test.Stat).(marshal.Marshallable) 517 elem := reflect.New(arrayTy.Elem()).Interface().(marshal.Marshallable) 518 519 // Equivalent: 520 // var expected, actual any 521 // expected = new([20]test.Stat) 522 // actual = new([20]test.Stat) 523 expected := reflect.New(arrayTy).Interface() 524 525 analysis.RandomizeValue(expected) 526 527 limit := (length * elem.SizeBytes()) / 2 528 // Also make sure the limit is partially inside one of the elements. 529 limit += elem.SizeBytes() / 2 530 analysis.RandomizeValue(expected) 531 532 var cc mockCopyContext 533 cc.populate(expected) 534 cc.setLimit(limit) 535 536 n, err := tt.copySliceOut(&cc, hostarch.Addr(0), expected) 537 if n != limit { 538 t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n) 539 } 540 if n < length*elem.SizeBytes() && err != simulatedErr { 541 t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err) 542 } 543 544 expectedMem := tt.unsafeMemory(expected) 545 defer runtime.KeepAlive(expected) 546 actualMem := cc.taskMem.Bytes 547 548 compareMemory(t, expectedMem, actualMem, n) 549 }) 550 } 551 } 552 553 func TestDynamicTypeStruct(t *testing.T) { 554 t12 := test.Type12Dynamic{ 555 X: 32, 556 Y: []primitive.Int64{5, 6, 7}, 557 } 558 var cc mockCopyContext 559 cc.setLimit(t12.SizeBytes()) 560 561 if _, err := t12.CopyOut(&cc, hostarch.Addr(0)); err != nil { 562 t.Fatalf("cc.CopyOut faile: %v", err) 563 } 564 565 res := test.Type12Dynamic{ 566 Y: make([]primitive.Int64, len(t12.Y)), 567 } 568 res.CopyIn(&cc, hostarch.Addr(0)) 569 if !reflect.DeepEqual(t12, res) { 570 t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %+v, after = %+v", t12, res) 571 } 572 } 573 574 func TestDynamicTypeIdentifier(t *testing.T) { 575 s := test.Type13Dynamic("go_marshal") 576 var cc mockCopyContext 577 cc.setLimit(s.SizeBytes()) 578 579 if _, err := s.CopyOut(&cc, hostarch.Addr(0)); err != nil { 580 t.Fatalf("cc.CopyOut faile: %v", err) 581 } 582 583 res := test.Type13Dynamic(make([]byte, len(s))) 584 res.CopyIn(&cc, hostarch.Addr(0)) 585 if res != s { 586 t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %s, after = %s", s, res) 587 } 588 }