github.com/SagerNet/gvisor@v0.0.0-20210707092255-7731c139d75c/tools/go_marshal/test/marshal_test.go (about) 1 // Copyright 2020 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 // Package marshal_test contains manual tests for the marshal interface. These 16 // are intended to test behaviour not covered by the automatically generated 17 // tests. 18 package marshal_test 19 20 import ( 21 "bytes" 22 "encoding/binary" 23 "fmt" 24 "reflect" 25 "runtime" 26 "testing" 27 "unsafe" 28 29 "github.com/google/go-cmp/cmp" 30 "github.com/SagerNet/gvisor/pkg/hostarch" 31 "github.com/SagerNet/gvisor/pkg/marshal" 32 "github.com/SagerNet/gvisor/pkg/marshal/primitive" 33 "github.com/SagerNet/gvisor/pkg/syserror" 34 "github.com/SagerNet/gvisor/pkg/usermem" 35 "github.com/SagerNet/gvisor/tools/go_marshal/analysis" 36 "github.com/SagerNet/gvisor/tools/go_marshal/test" 37 ) 38 39 var simulatedErr error = syserror.EFAULT 40 41 // mockCopyContext implements marshal.CopyContext. 42 type mockCopyContext struct { 43 taskMem usermem.BytesIO 44 } 45 46 // populate fills the task memory with the contents of val. 47 func (t *mockCopyContext) populate(val interface{}) { 48 var buf bytes.Buffer 49 // Use binary.Write so we aren't testing go-marshal against its own 50 // potentially buggy implementation. 51 if err := binary.Write(&buf, hostarch.ByteOrder, val); err != nil { 52 panic(err) 53 } 54 t.taskMem.Bytes = buf.Bytes() 55 } 56 57 func (t *mockCopyContext) setLimit(n int) { 58 if len(t.taskMem.Bytes) < n { 59 grown := make([]byte, n) 60 copy(grown, t.taskMem.Bytes) 61 t.taskMem.Bytes = grown 62 return 63 } 64 t.taskMem.Bytes = t.taskMem.Bytes[:n] 65 } 66 67 // CopyScratchBuffer implements marshal.CopyContext.CopyScratchBuffer. 68 func (t *mockCopyContext) CopyScratchBuffer(size int) []byte { 69 return make([]byte, size) 70 } 71 72 // CopyOutBytes implements marshal.CopyContext.CopyOutBytes. The implementation 73 // completely ignores the target address and stores a copy of b in its 74 // internally buffer, overriding any previous contents. 75 func (t *mockCopyContext) CopyOutBytes(_ hostarch.Addr, b []byte) (int, error) { 76 return t.taskMem.CopyOut(nil, 0, b, usermem.IOOpts{}) 77 } 78 79 // CopyInBytes implements marshal.CopyContext.CopyInBytes. The implementation 80 // completely ignores the source address and always fills b from the begining of 81 // its internal buffer. 82 func (t *mockCopyContext) CopyInBytes(_ hostarch.Addr, b []byte) (int, error) { 83 return t.taskMem.CopyIn(nil, 0, b, usermem.IOOpts{}) 84 } 85 86 // unsafeMemory returns the underlying memory for m. The returned slice is only 87 // valid for the lifetime for m. The garbage collector isn't aware that the 88 // returned slice is related to m, the caller must ensure m lives long enough. 89 func unsafeMemory(m marshal.Marshallable) []byte { 90 if !m.Packed() { 91 // We can't return a slice pointing to the underlying memory 92 // since the layout isn't packed. Allocate a temporary buffer 93 // and marshal instead. 94 var buf bytes.Buffer 95 if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil { 96 panic(err) 97 } 98 return buf.Bytes() 99 } 100 101 // reflect.ValueOf(m) 102 // .Elem() // Unwrap interface to inner concrete object 103 // .Addr() // Pointer value to object 104 // .Pointer() // Actual address from the pointer value 105 ptr := reflect.ValueOf(m).Elem().Addr().Pointer() 106 107 size := m.SizeBytes() 108 109 var mem []byte 110 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&mem)) 111 hdr.Data = ptr 112 hdr.Len = size 113 hdr.Cap = size 114 115 return mem 116 } 117 118 // unsafeMemorySlice returns the underlying memory for m. The returned slice is 119 // only valid for the lifetime for m. The garbage collector isn't aware that the 120 // returned slice is related to m, the caller must ensure m lives long enough. 121 // 122 // Precondition: m must be a slice. 123 func unsafeMemorySlice(m interface{}, elt marshal.Marshallable) []byte { 124 kind := reflect.TypeOf(m).Kind() 125 if kind != reflect.Slice { 126 panic("unsafeMemorySlice called on non-slice") 127 } 128 129 if !elt.Packed() { 130 // We can't return a slice pointing to the underlying memory 131 // since the layout isn't packed. Allocate a temporary buffer 132 // and marshal instead. 133 var buf bytes.Buffer 134 if err := binary.Write(&buf, hostarch.ByteOrder, m); err != nil { 135 panic(err) 136 } 137 return buf.Bytes() 138 } 139 140 v := reflect.ValueOf(m) 141 length := v.Len() * elt.SizeBytes() 142 143 var mem []byte 144 hdr := (*reflect.SliceHeader)(unsafe.Pointer(&mem)) 145 hdr.Data = v.Pointer() // This is a pointer to the first elem for slices. 146 hdr.Len = length 147 hdr.Cap = length 148 149 return mem 150 } 151 152 func isZeroes(buf []byte) bool { 153 for _, b := range buf { 154 if b != 0 { 155 return false 156 } 157 } 158 return true 159 } 160 161 // compareMemory compares the first n bytes of two chuncks of memory represented 162 // by expected and actual. 163 func compareMemory(t *testing.T, expected, actual []byte, n int) { 164 t.Logf("Expected (%d): %v (%d) + (%d) %v\n", len(expected), expected[:n], n, len(expected)-n, expected[n:]) 165 t.Logf("Actual (%d): %v (%d) + (%d) %v\n", len(actual), actual[:n], n, len(actual)-n, actual[n:]) 166 167 if diff := cmp.Diff(expected[:n], actual[:n]); diff != "" { 168 t.Errorf("Memory buffers don't match:\n--- expected only\n+++ actual only\n%v", diff) 169 } 170 } 171 172 // limitedCopyIn populates task memory with src, then unmarshals task memory to 173 // dst. The task signals an error at limit bytes during copy-in, which should 174 // result in a truncated unmarshalling. 175 func limitedCopyIn(t *testing.T, src, dst marshal.Marshallable, limit int) { 176 var cc mockCopyContext 177 cc.populate(src) 178 cc.setLimit(limit) 179 180 n, err := dst.CopyIn(&cc, hostarch.Addr(0)) 181 if n != limit { 182 t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n) 183 } 184 if err != simulatedErr { 185 t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err) 186 } 187 188 expectedMem := unsafeMemory(src) 189 defer runtime.KeepAlive(src) 190 actualMem := unsafeMemory(dst) 191 defer runtime.KeepAlive(dst) 192 193 compareMemory(t, expectedMem, actualMem, n) 194 195 // The last n bytes should be zero for actual, since actual was 196 // zero-initialized, and CopyIn shouldn't have touched those bytes. However 197 // we can only guarantee we didn't touch anything in the last n bytes if the 198 // layout is packed. 199 if dst.Packed() && !isZeroes(actualMem[n:]) { 200 t.Errorf("Expected the last %d bytes of copied in object to be zeroes, got %v\n", dst.SizeBytes()-n, actualMem) 201 } 202 } 203 204 // limitedCopyOut marshals src to task memory. The task signals an error at 205 // limit bytes during copy-out, which should result in a truncated marshalling. 206 func limitedCopyOut(t *testing.T, src marshal.Marshallable, limit int) { 207 var cc mockCopyContext 208 cc.setLimit(limit) 209 210 n, err := src.CopyOut(&cc, hostarch.Addr(0)) 211 if n != limit { 212 t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n) 213 } 214 if err != simulatedErr { 215 t.Errorf("CopyOut returned unexpected error, expected %v, got %v", simulatedErr, err) 216 } 217 218 expectedMem := unsafeMemory(src) 219 defer runtime.KeepAlive(src) 220 actualMem := cc.taskMem.Bytes 221 222 compareMemory(t, expectedMem, actualMem, n) 223 } 224 225 // copyOutN marshals src to task memory, requesting the marshalling to be 226 // limited to limit bytes. 227 func copyOutN(t *testing.T, src marshal.Marshallable, limit int) { 228 var cc mockCopyContext 229 cc.setLimit(limit) 230 231 n, err := src.CopyOutN(&cc, hostarch.Addr(0), limit) 232 if err != nil { 233 t.Errorf("CopyOut returned unexpected error: %v", err) 234 } 235 if n != limit { 236 t.Errorf("CopyOut copied unexpected number of bytes, expected %d, got %d", limit, n) 237 } 238 239 expectedMem := unsafeMemory(src) 240 defer runtime.KeepAlive(src) 241 actualMem := cc.taskMem.Bytes 242 243 t.Logf("Expected: %v + %v\n", expectedMem[:n], expectedMem[n:]) 244 t.Logf("Actual : %v + %v\n", actualMem[:n], actualMem[n:]) 245 246 compareMemory(t, expectedMem, actualMem, n) 247 } 248 249 // TestLimitedMarshalling verifies marshalling/unmarshalling succeeds when the 250 // underyling copy in/out operations partially succeed. 251 func TestLimitedMarshalling(t *testing.T) { 252 types := []reflect.Type{ 253 // Packed types. 254 reflect.TypeOf((*test.Type2)(nil)), 255 reflect.TypeOf((*test.Type3)(nil)), 256 reflect.TypeOf((*test.Timespec)(nil)), 257 reflect.TypeOf((*test.Stat)(nil)), 258 reflect.TypeOf((*test.InetAddr)(nil)), 259 reflect.TypeOf((*test.SignalSet)(nil)), 260 reflect.TypeOf((*test.SignalSetAlias)(nil)), 261 // Non-packed types. 262 reflect.TypeOf((*test.Type1)(nil)), 263 reflect.TypeOf((*test.Type4)(nil)), 264 reflect.TypeOf((*test.Type5)(nil)), 265 reflect.TypeOf((*test.Type6)(nil)), 266 reflect.TypeOf((*test.Type7)(nil)), 267 reflect.TypeOf((*test.Type8)(nil)), 268 } 269 270 for _, tyPtr := range types { 271 // Remove one level of pointer-indirection from the type. We get this 272 // back when we pass the type to reflect.New. 273 ty := tyPtr.Elem() 274 275 // Partial copy-in. 276 t.Run(fmt.Sprintf("PartialCopyIn_%v", ty), func(t *testing.T) { 277 expected := reflect.New(ty).Interface().(marshal.Marshallable) 278 actual := reflect.New(ty).Interface().(marshal.Marshallable) 279 analysis.RandomizeValue(expected) 280 281 limitedCopyIn(t, expected, actual, expected.SizeBytes()/2) 282 }) 283 284 // Partial copy-out. 285 t.Run(fmt.Sprintf("PartialCopyOut_%v", ty), func(t *testing.T) { 286 expected := reflect.New(ty).Interface().(marshal.Marshallable) 287 analysis.RandomizeValue(expected) 288 289 limitedCopyOut(t, expected, expected.SizeBytes()/2) 290 }) 291 292 // Explicitly request partial copy-out. 293 t.Run(fmt.Sprintf("PartialCopyOutN_%v", ty), func(t *testing.T) { 294 expected := reflect.New(ty).Interface().(marshal.Marshallable) 295 analysis.RandomizeValue(expected) 296 297 copyOutN(t, expected, expected.SizeBytes()/2) 298 }) 299 } 300 } 301 302 // TestLimitedMarshalling verifies marshalling/unmarshalling of slices of 303 // marshallable types succeed when the underyling copy in/out operations 304 // partially succeed. 305 func TestLimitedSliceMarshalling(t *testing.T) { 306 types := []struct { 307 arrayPtrType reflect.Type 308 copySliceIn func(cc marshal.CopyContext, addr hostarch.Addr, dstSlice interface{}) (int, error) 309 copySliceOut func(cc marshal.CopyContext, addr hostarch.Addr, srcSlice interface{}) (int, error) 310 unsafeMemory func(arrPtr interface{}) []byte 311 }{ 312 // Packed types. 313 { 314 reflect.TypeOf((*[20]test.Stat)(nil)), 315 func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) { 316 slice := dst.(*[20]test.Stat)[:] 317 return test.CopyStatSliceIn(cc, addr, slice) 318 }, 319 func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) { 320 slice := src.(*[20]test.Stat)[:] 321 return test.CopyStatSliceOut(cc, addr, slice) 322 }, 323 func(a interface{}) []byte { 324 slice := a.(*[20]test.Stat)[:] 325 return unsafeMemorySlice(slice, &slice[0]) 326 }, 327 }, 328 { 329 reflect.TypeOf((*[1]test.Stat)(nil)), 330 func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) { 331 slice := dst.(*[1]test.Stat)[:] 332 return test.CopyStatSliceIn(cc, addr, slice) 333 }, 334 func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) { 335 slice := src.(*[1]test.Stat)[:] 336 return test.CopyStatSliceOut(cc, addr, slice) 337 }, 338 func(a interface{}) []byte { 339 slice := a.(*[1]test.Stat)[:] 340 return unsafeMemorySlice(slice, &slice[0]) 341 }, 342 }, 343 { 344 reflect.TypeOf((*[5]test.SignalSetAlias)(nil)), 345 func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) { 346 slice := dst.(*[5]test.SignalSetAlias)[:] 347 return test.CopySignalSetAliasSliceIn(cc, addr, slice) 348 }, 349 func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) { 350 slice := src.(*[5]test.SignalSetAlias)[:] 351 return test.CopySignalSetAliasSliceOut(cc, addr, slice) 352 }, 353 func(a interface{}) []byte { 354 slice := a.(*[5]test.SignalSetAlias)[:] 355 return unsafeMemorySlice(slice, &slice[0]) 356 }, 357 }, 358 // Non-packed types. 359 { 360 reflect.TypeOf((*[20]test.Type1)(nil)), 361 func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) { 362 slice := dst.(*[20]test.Type1)[:] 363 return test.CopyType1SliceIn(cc, addr, slice) 364 }, 365 func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) { 366 slice := src.(*[20]test.Type1)[:] 367 return test.CopyType1SliceOut(cc, addr, slice) 368 }, 369 func(a interface{}) []byte { 370 slice := a.(*[20]test.Type1)[:] 371 return unsafeMemorySlice(slice, &slice[0]) 372 }, 373 }, 374 { 375 reflect.TypeOf((*[1]test.Type1)(nil)), 376 func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) { 377 slice := dst.(*[1]test.Type1)[:] 378 return test.CopyType1SliceIn(cc, addr, slice) 379 }, 380 func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) { 381 slice := src.(*[1]test.Type1)[:] 382 return test.CopyType1SliceOut(cc, addr, slice) 383 }, 384 func(a interface{}) []byte { 385 slice := a.(*[1]test.Type1)[:] 386 return unsafeMemorySlice(slice, &slice[0]) 387 }, 388 }, 389 { 390 reflect.TypeOf((*[7]test.Type8)(nil)), 391 func(cc marshal.CopyContext, addr hostarch.Addr, dst interface{}) (int, error) { 392 slice := dst.(*[7]test.Type8)[:] 393 return test.CopyType8SliceIn(cc, addr, slice) 394 }, 395 func(cc marshal.CopyContext, addr hostarch.Addr, src interface{}) (int, error) { 396 slice := src.(*[7]test.Type8)[:] 397 return test.CopyType8SliceOut(cc, addr, slice) 398 }, 399 func(a interface{}) []byte { 400 slice := a.(*[7]test.Type8)[:] 401 return unsafeMemorySlice(slice, &slice[0]) 402 }, 403 }, 404 } 405 406 for _, tt := range types { 407 // The body of this loop is generic over the type tt.arrayPtrType, with 408 // the help of reflection. To aid in readability, comments below show 409 // the equivalent go code assuming 410 // tt.arrayPtrType = typeof(*[20]test.Stat). 411 412 // Equivalent: 413 // var x *[20]test.Stat 414 // arrayTy := reflect.TypeOf(*x) 415 arrayTy := tt.arrayPtrType.Elem() 416 417 // Partial copy-in of slices. 418 t.Run(fmt.Sprintf("PartialCopySliceIn_%v", arrayTy), func(t *testing.T) { 419 // Equivalent: 420 // var x [20]test.Stat 421 // length := len(x) 422 length := arrayTy.Len() 423 if length < 1 { 424 panic("Test type can't be zero-length array") 425 } 426 // Equivalent: 427 // elem := new(test.Stat).(marshal.Marshallable) 428 elem := reflect.New(arrayTy.Elem()).Interface().(marshal.Marshallable) 429 430 // Equivalent: 431 // var expected, actual interface{} 432 // expected = new([20]test.Stat) 433 // actual = new([20]test.Stat) 434 expected := reflect.New(arrayTy).Interface() 435 actual := reflect.New(arrayTy).Interface() 436 437 analysis.RandomizeValue(expected) 438 439 limit := (length * elem.SizeBytes()) / 2 440 // Also make sure the limit is partially inside one of the elements. 441 limit += elem.SizeBytes() / 2 442 analysis.RandomizeValue(expected) 443 444 var cc mockCopyContext 445 cc.populate(expected) 446 cc.setLimit(limit) 447 448 n, err := tt.copySliceIn(&cc, hostarch.Addr(0), actual) 449 if n != limit { 450 t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n) 451 } 452 if n < length*elem.SizeBytes() && err != simulatedErr { 453 t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err) 454 } 455 456 expectedMem := tt.unsafeMemory(expected) 457 defer runtime.KeepAlive(expected) 458 actualMem := tt.unsafeMemory(actual) 459 defer runtime.KeepAlive(actual) 460 461 compareMemory(t, expectedMem, actualMem, n) 462 463 // The last n bytes should be zero for actual, since actual was 464 // zero-initialized, and CopyIn shouldn't have touched those bytes. However 465 // we can only guarantee we didn't touch anything in the last n bytes if the 466 // layout is packed. 467 if elem.Packed() && !isZeroes(actualMem[n:]) { 468 t.Errorf("Expected the last %d bytes of copied in object to be zeroes, got %v\n", (elem.SizeBytes()*length)-n, actualMem) 469 } 470 }) 471 472 // Partial copy-out of slices. 473 t.Run(fmt.Sprintf("PartialCopySliceOut_%v", arrayTy), func(t *testing.T) { 474 // Equivalent: 475 // var x [20]test.Stat 476 // length := len(x) 477 length := arrayTy.Len() 478 if length < 1 { 479 panic("Test type can't be zero-length array") 480 } 481 // Equivalent: 482 // elem := new(test.Stat).(marshal.Marshallable) 483 elem := reflect.New(arrayTy.Elem()).Interface().(marshal.Marshallable) 484 485 // Equivalent: 486 // var expected, actual interface{} 487 // expected = new([20]test.Stat) 488 // actual = new([20]test.Stat) 489 expected := reflect.New(arrayTy).Interface() 490 491 analysis.RandomizeValue(expected) 492 493 limit := (length * elem.SizeBytes()) / 2 494 // Also make sure the limit is partially inside one of the elements. 495 limit += elem.SizeBytes() / 2 496 analysis.RandomizeValue(expected) 497 498 var cc mockCopyContext 499 cc.populate(expected) 500 cc.setLimit(limit) 501 502 n, err := tt.copySliceOut(&cc, hostarch.Addr(0), expected) 503 if n != limit { 504 t.Errorf("CopyIn copied unexpected number of bytes, expected %d, got %d", limit, n) 505 } 506 if n < length*elem.SizeBytes() && err != simulatedErr { 507 t.Errorf("CopyIn returned unexpected error, expected %v, got %v", simulatedErr, err) 508 } 509 510 expectedMem := tt.unsafeMemory(expected) 511 defer runtime.KeepAlive(expected) 512 actualMem := cc.taskMem.Bytes 513 514 compareMemory(t, expectedMem, actualMem, n) 515 }) 516 } 517 } 518 519 func TestDynamicTypeStruct(t *testing.T) { 520 t12 := test.Type12Dynamic{ 521 X: 32, 522 Y: []primitive.Int64{5, 6, 7}, 523 } 524 var cc mockCopyContext 525 cc.setLimit(t12.SizeBytes()) 526 527 if _, err := t12.CopyOut(&cc, hostarch.Addr(0)); err != nil { 528 t.Fatalf("cc.CopyOut faile: %v", err) 529 } 530 531 res := test.Type12Dynamic{ 532 Y: make([]primitive.Int64, len(t12.Y)), 533 } 534 res.CopyIn(&cc, hostarch.Addr(0)) 535 if !reflect.DeepEqual(t12, res) { 536 t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %+v, after = %+v", t12, res) 537 } 538 } 539 540 func TestDynamicTypeIdentifier(t *testing.T) { 541 s := test.Type13Dynamic("go_marshal") 542 var cc mockCopyContext 543 cc.setLimit(s.SizeBytes()) 544 545 if _, err := s.CopyOut(&cc, hostarch.Addr(0)); err != nil { 546 t.Fatalf("cc.CopyOut faile: %v", err) 547 } 548 549 res := test.Type13Dynamic(make([]byte, len(s))) 550 res.CopyIn(&cc, hostarch.Addr(0)) 551 if res != s { 552 t.Errorf("dynamic type is not same after marshalling and unmarshalling: before = %s, after = %s", s, res) 553 } 554 }