github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/runtime_test.go (about) 1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "flag" 9 "io" 10 . "runtime" 11 "runtime/debug" 12 "strings" 13 "testing" 14 "unsafe" 15 ) 16 17 var flagQuick = flag.Bool("quick", false, "skip slow tests, for second run in all.bash") 18 19 func init() { 20 // We're testing the runtime, so make tracebacks show things 21 // in the runtime. This only raises the level, so it won't 22 // override GOTRACEBACK=crash from the user. 23 SetTracebackEnv("system") 24 } 25 26 var errf error 27 28 func errfn() error { 29 return errf 30 } 31 32 func errfn1() error { 33 return io.EOF 34 } 35 36 func BenchmarkIfaceCmp100(b *testing.B) { 37 for i := 0; i < b.N; i++ { 38 for j := 0; j < 100; j++ { 39 if errfn() == io.EOF { 40 b.Fatal("bad comparison") 41 } 42 } 43 } 44 } 45 46 func BenchmarkIfaceCmpNil100(b *testing.B) { 47 for i := 0; i < b.N; i++ { 48 for j := 0; j < 100; j++ { 49 if errfn1() == nil { 50 b.Fatal("bad comparison") 51 } 52 } 53 } 54 } 55 56 var efaceCmp1 interface{} 57 var efaceCmp2 interface{} 58 59 func BenchmarkEfaceCmpDiff(b *testing.B) { 60 x := 5 61 efaceCmp1 = &x 62 y := 6 63 efaceCmp2 = &y 64 for i := 0; i < b.N; i++ { 65 for j := 0; j < 100; j++ { 66 if efaceCmp1 == efaceCmp2 { 67 b.Fatal("bad comparison") 68 } 69 } 70 } 71 } 72 73 func BenchmarkDefer(b *testing.B) { 74 for i := 0; i < b.N; i++ { 75 defer1() 76 } 77 } 78 79 func defer1() { 80 defer func(x, y, z int) { 81 if recover() != nil || x != 1 || y != 2 || z != 3 { 82 panic("bad recover") 83 } 84 }(1, 2, 3) 85 } 86 87 func BenchmarkDefer10(b *testing.B) { 88 for i := 0; i < b.N/10; i++ { 89 defer2() 90 } 91 } 92 93 func defer2() { 94 for i := 0; i < 10; i++ { 95 defer func(x, y, z int) { 96 if recover() != nil || x != 1 || y != 2 || z != 3 { 97 panic("bad recover") 98 } 99 }(1, 2, 3) 100 } 101 } 102 103 func BenchmarkDeferMany(b *testing.B) { 104 for i := 0; i < b.N; i++ { 105 defer func(x, y, z int) { 106 if recover() != nil || x != 1 || y != 2 || z != 3 { 107 panic("bad recover") 108 } 109 }(1, 2, 3) 110 } 111 } 112 113 // golang.org/issue/7063 114 func TestStopCPUProfilingWithProfilerOff(t *testing.T) { 115 SetCPUProfileRate(0) 116 } 117 118 // Addresses to test for faulting behavior. 119 // This is less a test of SetPanicOnFault and more a check that 120 // the operating system and the runtime can process these faults 121 // correctly. That is, we're indirectly testing that without SetPanicOnFault 122 // these would manage to turn into ordinary crashes. 123 // Note that these are truncated on 32-bit systems, so the bottom 32 bits 124 // of the larger addresses must themselves be invalid addresses. 125 // We might get unlucky and the OS might have mapped one of these 126 // addresses, but probably not: they're all in the first page, very high 127 // addresses that normally an OS would reserve for itself, or malformed 128 // addresses. Even so, we might have to remove one or two on different 129 // systems. We will see. 130 131 var faultAddrs = []uint64{ 132 // low addresses 133 0, 134 1, 135 0xfff, 136 // high (kernel) addresses 137 // or else malformed. 138 0xffffffffffffffff, 139 0xfffffffffffff001, 140 0xffffffffffff0001, 141 0xfffffffffff00001, 142 0xffffffffff000001, 143 0xfffffffff0000001, 144 0xffffffff00000001, 145 0xfffffff000000001, 146 0xffffff0000000001, 147 0xfffff00000000001, 148 0xffff000000000001, 149 0xfff0000000000001, 150 0xff00000000000001, 151 0xf000000000000001, 152 0x8000000000000001, 153 } 154 155 func TestSetPanicOnFault(t *testing.T) { 156 old := debug.SetPanicOnFault(true) 157 defer debug.SetPanicOnFault(old) 158 159 nfault := 0 160 for _, addr := range faultAddrs { 161 testSetPanicOnFault(t, uintptr(addr), &nfault) 162 } 163 if nfault == 0 { 164 t.Fatalf("none of the addresses faulted") 165 } 166 } 167 168 func testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) { 169 if GOOS == "nacl" { 170 t.Skip("nacl doesn't seem to fault on high addresses") 171 } 172 if GOOS == "js" { 173 t.Skip("js does not support catching faults") 174 } 175 176 defer func() { 177 if err := recover(); err != nil { 178 *nfault++ 179 } 180 }() 181 182 // The read should fault, except that sometimes we hit 183 // addresses that have had C or kernel pages mapped there 184 // readable by user code. So just log the content. 185 // If no addresses fault, we'll fail the test. 186 v := *(*byte)(unsafe.Pointer(addr)) 187 t.Logf("addr %#x: %#x\n", addr, v) 188 } 189 190 func eqstring_generic(s1, s2 string) bool { 191 if len(s1) != len(s2) { 192 return false 193 } 194 // optimization in assembly versions: 195 // if s1.str == s2.str { return true } 196 for i := 0; i < len(s1); i++ { 197 if s1[i] != s2[i] { 198 return false 199 } 200 } 201 return true 202 } 203 204 func TestEqString(t *testing.T) { 205 // This isn't really an exhaustive test of == on strings, it's 206 // just a convenient way of documenting (via eqstring_generic) 207 // what == does. 208 s := []string{ 209 "", 210 "a", 211 "c", 212 "aaa", 213 "ccc", 214 "cccc"[:3], // same contents, different string 215 "1234567890", 216 } 217 for _, s1 := range s { 218 for _, s2 := range s { 219 x := s1 == s2 220 y := eqstring_generic(s1, s2) 221 if x != y { 222 t.Errorf(`("%s" == "%s") = %t, want %t`, s1, s2, x, y) 223 } 224 } 225 } 226 } 227 228 func TestTrailingZero(t *testing.T) { 229 // make sure we add padding for structs with trailing zero-sized fields 230 type T1 struct { 231 n int32 232 z [0]byte 233 } 234 if unsafe.Sizeof(T1{}) != 8 { 235 t.Errorf("sizeof(%#v)==%d, want 8", T1{}, unsafe.Sizeof(T1{})) 236 } 237 type T2 struct { 238 n int64 239 z struct{} 240 } 241 if unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(Uintreg(0)) { 242 t.Errorf("sizeof(%#v)==%d, want %d", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(Uintreg(0))) 243 } 244 type T3 struct { 245 n byte 246 z [4]struct{} 247 } 248 if unsafe.Sizeof(T3{}) != 2 { 249 t.Errorf("sizeof(%#v)==%d, want 2", T3{}, unsafe.Sizeof(T3{})) 250 } 251 // make sure padding can double for both zerosize and alignment 252 type T4 struct { 253 a int32 254 b int16 255 c int8 256 z struct{} 257 } 258 if unsafe.Sizeof(T4{}) != 8 { 259 t.Errorf("sizeof(%#v)==%d, want 8", T4{}, unsafe.Sizeof(T4{})) 260 } 261 // make sure we don't pad a zero-sized thing 262 type T5 struct { 263 } 264 if unsafe.Sizeof(T5{}) != 0 { 265 t.Errorf("sizeof(%#v)==%d, want 0", T5{}, unsafe.Sizeof(T5{})) 266 } 267 } 268 269 func TestBadOpen(t *testing.T) { 270 if GOOS == "windows" || GOOS == "nacl" || GOOS == "js" { 271 t.Skip("skipping OS that doesn't have open/read/write/close") 272 } 273 // make sure we get the correct error code if open fails. Same for 274 // read/write/close on the resulting -1 fd. See issue 10052. 275 nonfile := []byte("/notreallyafile") 276 fd := Open(&nonfile[0], 0, 0) 277 if fd != -1 { 278 t.Errorf("open(\"%s\")=%d, want -1", string(nonfile), fd) 279 } 280 var buf [32]byte 281 r := Read(-1, unsafe.Pointer(&buf[0]), int32(len(buf))) 282 if r != -1 { 283 t.Errorf("read()=%d, want -1", r) 284 } 285 w := Write(^uintptr(0), unsafe.Pointer(&buf[0]), int32(len(buf))) 286 if w != -1 { 287 t.Errorf("write()=%d, want -1", w) 288 } 289 c := Close(-1) 290 if c != -1 { 291 t.Errorf("close()=%d, want -1", c) 292 } 293 } 294 295 func TestAppendGrowth(t *testing.T) { 296 var x []int64 297 check := func(want int) { 298 if cap(x) != want { 299 t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want) 300 } 301 } 302 303 check(0) 304 want := 1 305 for i := 1; i <= 100; i++ { 306 x = append(x, 1) 307 check(want) 308 if i&(i-1) == 0 { 309 want = 2 * i 310 } 311 } 312 } 313 314 var One = []int64{1} 315 316 func TestAppendSliceGrowth(t *testing.T) { 317 var x []int64 318 check := func(want int) { 319 if cap(x) != want { 320 t.Errorf("len=%d, cap=%d, want cap=%d", len(x), cap(x), want) 321 } 322 } 323 324 check(0) 325 want := 1 326 for i := 1; i <= 100; i++ { 327 x = append(x, One...) 328 check(want) 329 if i&(i-1) == 0 { 330 want = 2 * i 331 } 332 } 333 } 334 335 func TestGoroutineProfileTrivial(t *testing.T) { 336 // Calling GoroutineProfile twice in a row should find the same number of goroutines, 337 // but it's possible there are goroutines just about to exit, so we might end up 338 // with fewer in the second call. Try a few times; it should converge once those 339 // zombies are gone. 340 for i := 0; ; i++ { 341 n1, ok := GoroutineProfile(nil) // should fail, there's at least 1 goroutine 342 if n1 < 1 || ok { 343 t.Fatalf("GoroutineProfile(nil) = %d, %v, want >0, false", n1, ok) 344 } 345 n2, ok := GoroutineProfile(make([]StackRecord, n1)) 346 if n2 == n1 && ok { 347 break 348 } 349 t.Logf("GoroutineProfile(%d) = %d, %v, want %d, true", n1, n2, ok, n1) 350 if i >= 10 { 351 t.Fatalf("GoroutineProfile not converging") 352 } 353 } 354 } 355 356 func TestVersion(t *testing.T) { 357 // Test that version does not contain \r or \n. 358 vers := Version() 359 if strings.Contains(vers, "\r") || strings.Contains(vers, "\n") { 360 t.Fatalf("cr/nl in version: %q", vers) 361 } 362 }