gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/pkg/sentry/fsimpl/proc/tasks_sys.go (about) 1 // Copyright 2019 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package proc 16 17 import ( 18 "bytes" 19 "fmt" 20 "io" 21 "math" 22 23 "gvisor.dev/gvisor/pkg/abi/linux" 24 "gvisor.dev/gvisor/pkg/atomicbitops" 25 "gvisor.dev/gvisor/pkg/context" 26 "gvisor.dev/gvisor/pkg/errors/linuxerr" 27 "gvisor.dev/gvisor/pkg/hostarch" 28 "gvisor.dev/gvisor/pkg/rand" 29 "gvisor.dev/gvisor/pkg/sentry/fsimpl/kernfs" 30 "gvisor.dev/gvisor/pkg/sentry/inet" 31 "gvisor.dev/gvisor/pkg/sentry/kernel" 32 "gvisor.dev/gvisor/pkg/sentry/kernel/auth" 33 "gvisor.dev/gvisor/pkg/sentry/vfs" 34 "gvisor.dev/gvisor/pkg/sync" 35 "gvisor.dev/gvisor/pkg/tcpip/network/ipv4" 36 "gvisor.dev/gvisor/pkg/usermem" 37 ) 38 39 // +stateify savable 40 type tcpMemDir int 41 42 const ( 43 tcpRMem tcpMemDir = iota 44 tcpWMem 45 ) 46 47 // newSysDir returns the dentry corresponding to /proc/sys directory. 48 func (fs *filesystem) newSysDir(ctx context.Context, root *auth.Credentials, k *kernel.Kernel) kernfs.Inode { 49 return fs.newStaticDir(ctx, root, map[string]kernfs.Inode{ 50 "kernel": fs.newStaticDir(ctx, root, map[string]kernfs.Inode{ 51 "cap_last_cap": fs.newInode(ctx, root, 0444, newStaticFile(fmt.Sprintf("%d\n", linux.CAP_LAST_CAP))), 52 "hostname": fs.newInode(ctx, root, 0444, &hostnameData{}), 53 "overflowgid": fs.newInode(ctx, root, 0444, newStaticFile(fmt.Sprintf("%d\n", auth.OverflowGID))), 54 "overflowuid": fs.newInode(ctx, root, 0444, newStaticFile(fmt.Sprintf("%d\n", auth.OverflowUID))), 55 "random": fs.newStaticDir(ctx, root, map[string]kernfs.Inode{ 56 "boot_id": fs.newInode(ctx, root, 0444, newStaticFile(randUUID())), 57 }), 58 "sem": fs.newInode(ctx, root, 0444, newStaticFile(fmt.Sprintf("%d\t%d\t%d\t%d\n", linux.SEMMSL, linux.SEMMNS, linux.SEMOPM, linux.SEMMNI))), 59 "shmall": fs.newInode(ctx, root, 0444, ipcData(linux.SHMALL)), 60 "shmmax": fs.newInode(ctx, root, 0444, ipcData(linux.SHMMAX)), 61 "shmmni": fs.newInode(ctx, root, 0444, ipcData(linux.SHMMNI)), 62 "msgmni": fs.newInode(ctx, root, 0444, ipcData(linux.MSGMNI)), 63 "msgmax": fs.newInode(ctx, root, 0444, ipcData(linux.MSGMAX)), 64 "msgmnb": fs.newInode(ctx, root, 0444, ipcData(linux.MSGMNB)), 65 "yama": fs.newStaticDir(ctx, root, map[string]kernfs.Inode{ 66 "ptrace_scope": fs.newYAMAPtraceScopeFile(ctx, k, root), 67 }), 68 }), 69 "fs": fs.newStaticDir(ctx, root, map[string]kernfs.Inode{ 70 "nr_open": fs.newInode(ctx, root, 0644, &atomicInt32File{val: &k.MaxFDLimit, min: 8, max: kernel.MaxFdLimit}), 71 }), 72 "vm": fs.newStaticDir(ctx, root, map[string]kernfs.Inode{ 73 "max_map_count": fs.newInode(ctx, root, 0444, newStaticFile("2147483647\n")), 74 "mmap_min_addr": fs.newInode(ctx, root, 0444, &mmapMinAddrData{k: k}), 75 "overcommit_memory": fs.newInode(ctx, root, 0444, newStaticFile("0\n")), 76 }), 77 "net": fs.newSysNetDir(ctx, root, k), 78 }) 79 } 80 81 // newSysNetDir returns the dentry corresponding to /proc/sys/net directory. 82 func (fs *filesystem) newSysNetDir(ctx context.Context, root *auth.Credentials, k *kernel.Kernel) kernfs.Inode { 83 var contents map[string]kernfs.Inode 84 85 // TODO(gvisor.dev/issue/1833): Support for using the network stack in the 86 // network namespace of the calling process. 87 if stack := k.RootNetworkNamespace().Stack(); stack != nil { 88 contents = map[string]kernfs.Inode{ 89 "ipv4": fs.newStaticDir(ctx, root, map[string]kernfs.Inode{ 90 "ip_forward": fs.newInode(ctx, root, 0444, &ipForwarding{stack: stack}), 91 "ip_local_port_range": fs.newInode(ctx, root, 0644, &portRange{stack: stack}), 92 "tcp_recovery": fs.newInode(ctx, root, 0644, &tcpRecoveryData{stack: stack}), 93 "tcp_rmem": fs.newInode(ctx, root, 0644, &tcpMemData{stack: stack, dir: tcpRMem}), 94 "tcp_sack": fs.newInode(ctx, root, 0644, &tcpSackData{stack: stack}), 95 "tcp_wmem": fs.newInode(ctx, root, 0644, &tcpMemData{stack: stack, dir: tcpWMem}), 96 97 // The following files are simple stubs until they are implemented in 98 // netstack, most of these files are configuration related. We use the 99 // value closest to the actual netstack behavior or any empty file, all 100 // of these files will have mode 0444 (read-only for all users). 101 "ip_local_reserved_ports": fs.newInode(ctx, root, 0444, newStaticFile("")), 102 "ipfrag_time": fs.newInode(ctx, root, 0444, newStaticFile("30")), 103 "ip_nonlocal_bind": fs.newInode(ctx, root, 0444, newStaticFile("0")), 104 "ip_no_pmtu_disc": fs.newInode(ctx, root, 0444, newStaticFile("1")), 105 106 // tcp_allowed_congestion_control tell the user what they are able to 107 // do as an unprivledged process so we leave it empty. 108 "tcp_allowed_congestion_control": fs.newInode(ctx, root, 0444, newStaticFile("")), 109 "tcp_available_congestion_control": fs.newInode(ctx, root, 0444, newStaticFile("reno")), 110 "tcp_congestion_control": fs.newInode(ctx, root, 0444, newStaticFile("reno")), 111 112 // Many of the following stub files are features netstack doesn't 113 // support. The unsupported features return "0" to indicate they are 114 // disabled. 115 "tcp_base_mss": fs.newInode(ctx, root, 0444, newStaticFile("1280")), 116 "tcp_dsack": fs.newInode(ctx, root, 0444, newStaticFile("0")), 117 "tcp_early_retrans": fs.newInode(ctx, root, 0444, newStaticFile("0")), 118 "tcp_fack": fs.newInode(ctx, root, 0444, newStaticFile("0")), 119 "tcp_fastopen": fs.newInode(ctx, root, 0444, newStaticFile("0")), 120 "tcp_fastopen_key": fs.newInode(ctx, root, 0444, newStaticFile("")), 121 "tcp_invalid_ratelimit": fs.newInode(ctx, root, 0444, newStaticFile("0")), 122 "tcp_keepalive_intvl": fs.newInode(ctx, root, 0444, newStaticFile("0")), 123 "tcp_keepalive_probes": fs.newInode(ctx, root, 0444, newStaticFile("0")), 124 "tcp_keepalive_time": fs.newInode(ctx, root, 0444, newStaticFile("7200")), 125 "tcp_mtu_probing": fs.newInode(ctx, root, 0444, newStaticFile("0")), 126 "tcp_no_metrics_save": fs.newInode(ctx, root, 0444, newStaticFile("1")), 127 "tcp_probe_interval": fs.newInode(ctx, root, 0444, newStaticFile("0")), 128 "tcp_probe_threshold": fs.newInode(ctx, root, 0444, newStaticFile("0")), 129 "tcp_retries1": fs.newInode(ctx, root, 0444, newStaticFile("3")), 130 "tcp_retries2": fs.newInode(ctx, root, 0444, newStaticFile("15")), 131 "tcp_rfc1337": fs.newInode(ctx, root, 0444, newStaticFile("1")), 132 "tcp_slow_start_after_idle": fs.newInode(ctx, root, 0444, newStaticFile("1")), 133 "tcp_synack_retries": fs.newInode(ctx, root, 0444, newStaticFile("5")), 134 "tcp_syn_retries": fs.newInode(ctx, root, 0444, newStaticFile("3")), 135 "tcp_timestamps": fs.newInode(ctx, root, 0444, newStaticFile("1")), 136 }), 137 "core": fs.newStaticDir(ctx, root, map[string]kernfs.Inode{ 138 "default_qdisc": fs.newInode(ctx, root, 0444, newStaticFile("pfifo_fast")), 139 "message_burst": fs.newInode(ctx, root, 0444, newStaticFile("10")), 140 "message_cost": fs.newInode(ctx, root, 0444, newStaticFile("5")), 141 "optmem_max": fs.newInode(ctx, root, 0444, newStaticFile("0")), 142 "rmem_default": fs.newInode(ctx, root, 0444, newStaticFile("212992")), 143 "rmem_max": fs.newInode(ctx, root, 0444, newStaticFile("212992")), 144 "somaxconn": fs.newInode(ctx, root, 0444, newStaticFile("128")), 145 "wmem_default": fs.newInode(ctx, root, 0444, newStaticFile("212992")), 146 "wmem_max": fs.newInode(ctx, root, 0444, newStaticFile("212992")), 147 }), 148 } 149 } 150 151 return fs.newStaticDir(ctx, root, contents) 152 } 153 154 // mmapMinAddrData implements vfs.DynamicBytesSource for 155 // /proc/sys/vm/mmap_min_addr. 156 // 157 // +stateify savable 158 type mmapMinAddrData struct { 159 kernfs.DynamicBytesFile 160 161 k *kernel.Kernel 162 } 163 164 var _ dynamicInode = (*mmapMinAddrData)(nil) 165 166 // Generate implements vfs.DynamicBytesSource.Generate. 167 func (d *mmapMinAddrData) Generate(ctx context.Context, buf *bytes.Buffer) error { 168 fmt.Fprintf(buf, "%d\n", d.k.Platform.MinUserAddress()) 169 return nil 170 } 171 172 // hostnameData implements vfs.DynamicBytesSource for /proc/sys/kernel/hostname. 173 // 174 // +stateify savable 175 type hostnameData struct { 176 kernfs.DynamicBytesFile 177 } 178 179 var _ dynamicInode = (*hostnameData)(nil) 180 181 // Generate implements vfs.DynamicBytesSource.Generate. 182 func (*hostnameData) Generate(ctx context.Context, buf *bytes.Buffer) error { 183 utsns := kernel.UTSNamespaceFromContext(ctx) 184 defer utsns.DecRef(ctx) 185 buf.WriteString(utsns.HostName()) 186 buf.WriteString("\n") 187 return nil 188 } 189 190 // tcpSackData implements vfs.WritableDynamicBytesSource for 191 // /proc/sys/net/tcp_sack. 192 // 193 // +stateify savable 194 type tcpSackData struct { 195 kernfs.DynamicBytesFile 196 197 stack inet.Stack `state:"wait"` 198 enabled *bool 199 } 200 201 var _ vfs.WritableDynamicBytesSource = (*tcpSackData)(nil) 202 203 // Generate implements vfs.DynamicBytesSource.Generate. 204 func (d *tcpSackData) Generate(ctx context.Context, buf *bytes.Buffer) error { 205 if d.enabled == nil { 206 sack, err := d.stack.TCPSACKEnabled() 207 if err != nil { 208 return err 209 } 210 d.enabled = &sack 211 } 212 213 val := "0\n" 214 if *d.enabled { 215 // Technically, this is not quite compatible with Linux. Linux stores these 216 // as an integer, so if you write "2" into tcp_sack, you should get 2 back. 217 // Tough luck. 218 val = "1\n" 219 } 220 _, err := buf.WriteString(val) 221 return err 222 } 223 224 // Write implements vfs.WritableDynamicBytesSource.Write. 225 func (d *tcpSackData) Write(ctx context.Context, _ *vfs.FileDescription, src usermem.IOSequence, offset int64) (int64, error) { 226 if offset != 0 { 227 // No need to handle partial writes thus far. 228 return 0, linuxerr.EINVAL 229 } 230 if src.NumBytes() == 0 { 231 return 0, nil 232 } 233 234 // Limit the amount of memory allocated. 235 src = src.TakeFirst(hostarch.PageSize - 1) 236 237 var v int32 238 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) 239 if err != nil { 240 return 0, err 241 } 242 if d.enabled == nil { 243 d.enabled = new(bool) 244 } 245 *d.enabled = v != 0 246 return n, d.stack.SetTCPSACKEnabled(*d.enabled) 247 } 248 249 // tcpRecoveryData implements vfs.WritableDynamicBytesSource for 250 // /proc/sys/net/ipv4/tcp_recovery. 251 // 252 // +stateify savable 253 type tcpRecoveryData struct { 254 kernfs.DynamicBytesFile 255 256 stack inet.Stack `state:"wait"` 257 } 258 259 var _ vfs.WritableDynamicBytesSource = (*tcpRecoveryData)(nil) 260 261 // Generate implements vfs.DynamicBytesSource.Generate. 262 func (d *tcpRecoveryData) Generate(ctx context.Context, buf *bytes.Buffer) error { 263 recovery, err := d.stack.TCPRecovery() 264 if err != nil { 265 return err 266 } 267 268 _, err = buf.WriteString(fmt.Sprintf("%d\n", recovery)) 269 return err 270 } 271 272 // Write implements vfs.WritableDynamicBytesSource.Write. 273 func (d *tcpRecoveryData) Write(ctx context.Context, _ *vfs.FileDescription, src usermem.IOSequence, offset int64) (int64, error) { 274 if offset != 0 { 275 // No need to handle partial writes thus far. 276 return 0, linuxerr.EINVAL 277 } 278 if src.NumBytes() == 0 { 279 return 0, nil 280 } 281 282 // Limit the amount of memory allocated. 283 src = src.TakeFirst(hostarch.PageSize - 1) 284 285 var v int32 286 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) 287 if err != nil { 288 return 0, err 289 } 290 if err := d.stack.SetTCPRecovery(inet.TCPLossRecovery(v)); err != nil { 291 return 0, err 292 } 293 return n, nil 294 } 295 296 // tcpMemData implements vfs.WritableDynamicBytesSource for 297 // /proc/sys/net/ipv4/tcp_rmem and /proc/sys/net/ipv4/tcp_wmem. 298 // 299 // +stateify savable 300 type tcpMemData struct { 301 kernfs.DynamicBytesFile 302 303 dir tcpMemDir 304 stack inet.Stack `state:"wait"` 305 306 // mu protects against concurrent reads/writes to FDs based on the dentry 307 // backing this byte source. 308 mu sync.Mutex `state:"nosave"` 309 } 310 311 var _ vfs.WritableDynamicBytesSource = (*tcpMemData)(nil) 312 313 // Generate implements vfs.DynamicBytesSource.Generate. 314 func (d *tcpMemData) Generate(ctx context.Context, buf *bytes.Buffer) error { 315 d.mu.Lock() 316 defer d.mu.Unlock() 317 318 size, err := d.readSizeLocked() 319 if err != nil { 320 return err 321 } 322 _, err = buf.WriteString(fmt.Sprintf("%d\t%d\t%d\n", size.Min, size.Default, size.Max)) 323 return err 324 } 325 326 // Write implements vfs.WritableDynamicBytesSource.Write. 327 func (d *tcpMemData) Write(ctx context.Context, _ *vfs.FileDescription, src usermem.IOSequence, offset int64) (int64, error) { 328 if offset != 0 { 329 // No need to handle partial writes thus far. 330 return 0, linuxerr.EINVAL 331 } 332 if src.NumBytes() == 0 { 333 return 0, nil 334 } 335 d.mu.Lock() 336 defer d.mu.Unlock() 337 338 // Limit the amount of memory allocated. 339 src = src.TakeFirst(hostarch.PageSize - 1) 340 size, err := d.readSizeLocked() 341 if err != nil { 342 return 0, err 343 } 344 buf := []int32{int32(size.Min), int32(size.Default), int32(size.Max)} 345 n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, buf, src.Opts) 346 if err != nil { 347 return 0, err 348 } 349 newSize := inet.TCPBufferSize{ 350 Min: int(buf[0]), 351 Default: int(buf[1]), 352 Max: int(buf[2]), 353 } 354 if err := d.writeSizeLocked(newSize); err != nil { 355 return 0, err 356 } 357 return n, nil 358 } 359 360 // Precondition: d.mu must be locked. 361 func (d *tcpMemData) readSizeLocked() (inet.TCPBufferSize, error) { 362 switch d.dir { 363 case tcpRMem: 364 return d.stack.TCPReceiveBufferSize() 365 case tcpWMem: 366 return d.stack.TCPSendBufferSize() 367 default: 368 panic(fmt.Sprintf("unknown tcpMemFile type: %v", d.dir)) 369 } 370 } 371 372 // Precondition: d.mu must be locked. 373 func (d *tcpMemData) writeSizeLocked(size inet.TCPBufferSize) error { 374 switch d.dir { 375 case tcpRMem: 376 return d.stack.SetTCPReceiveBufferSize(size) 377 case tcpWMem: 378 return d.stack.SetTCPSendBufferSize(size) 379 default: 380 panic(fmt.Sprintf("unknown tcpMemFile type: %v", d.dir)) 381 } 382 } 383 384 // ipForwarding implements vfs.WritableDynamicBytesSource for 385 // /proc/sys/net/ipv4/ip_forward. 386 // 387 // +stateify savable 388 type ipForwarding struct { 389 kernfs.DynamicBytesFile 390 391 stack inet.Stack `state:"wait"` 392 enabled bool 393 } 394 395 var _ vfs.WritableDynamicBytesSource = (*ipForwarding)(nil) 396 397 // Generate implements vfs.DynamicBytesSource.Generate. 398 func (ipf *ipForwarding) Generate(ctx context.Context, buf *bytes.Buffer) error { 399 val := "0\n" 400 if ipf.enabled { 401 // Technically, this is not quite compatible with Linux. Linux stores these 402 // as an integer, so if you write "2" into tcp_sack, you should get 2 back. 403 // Tough luck. 404 val = "1\n" 405 } 406 buf.WriteString(val) 407 408 return nil 409 } 410 411 // Write implements vfs.WritableDynamicBytesSource.Write. 412 func (ipf *ipForwarding) Write(ctx context.Context, _ *vfs.FileDescription, src usermem.IOSequence, offset int64) (int64, error) { 413 if offset != 0 { 414 // No need to handle partial writes thus far. 415 return 0, linuxerr.EINVAL 416 } 417 if src.NumBytes() == 0 { 418 return 0, nil 419 } 420 421 // Limit input size so as not to impact performance if input size is large. 422 src = src.TakeFirst(hostarch.PageSize - 1) 423 424 var v int32 425 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) 426 if err != nil { 427 return 0, err 428 } 429 ipf.enabled = v != 0 430 if err := ipf.stack.SetForwarding(ipv4.ProtocolNumber, ipf.enabled); err != nil { 431 return 0, err 432 } 433 return n, nil 434 } 435 436 // portRange implements vfs.WritableDynamicBytesSource for 437 // /proc/sys/net/ipv4/ip_local_port_range. 438 // 439 // +stateify savable 440 type portRange struct { 441 kernfs.DynamicBytesFile 442 443 stack inet.Stack `state:"wait"` 444 445 // start and end store the port range. We must save/restore this here, 446 // since a netstack instance is created on restore. 447 start *uint16 448 end *uint16 449 } 450 451 var _ vfs.WritableDynamicBytesSource = (*portRange)(nil) 452 453 // Generate implements vfs.DynamicBytesSource.Generate. 454 func (pr *portRange) Generate(ctx context.Context, buf *bytes.Buffer) error { 455 if pr.start == nil { 456 start, end := pr.stack.PortRange() 457 pr.start = &start 458 pr.end = &end 459 } 460 _, err := fmt.Fprintf(buf, "%d %d\n", *pr.start, *pr.end) 461 return err 462 } 463 464 // Write implements vfs.WritableDynamicBytesSource.Write. 465 func (pr *portRange) Write(ctx context.Context, _ *vfs.FileDescription, src usermem.IOSequence, offset int64) (int64, error) { 466 if offset != 0 { 467 // No need to handle partial writes thus far. 468 return 0, linuxerr.EINVAL 469 } 470 if src.NumBytes() == 0 { 471 return 0, nil 472 } 473 474 // Limit input size so as not to impact performance if input size is 475 // large. 476 src = src.TakeFirst(hostarch.PageSize - 1) 477 478 ports := make([]int32, 2) 479 n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, ports, src.Opts) 480 if err != nil { 481 return 0, err 482 } 483 484 // Port numbers must be uint16s. 485 if ports[0] < 0 || ports[1] < 0 || ports[0] > math.MaxUint16 || ports[1] > math.MaxUint16 { 486 return 0, linuxerr.EINVAL 487 } 488 489 if err := pr.stack.SetPortRange(uint16(ports[0]), uint16(ports[1])); err != nil { 490 return 0, err 491 } 492 if pr.start == nil { 493 pr.start = new(uint16) 494 pr.end = new(uint16) 495 } 496 *pr.start = uint16(ports[0]) 497 *pr.end = uint16(ports[1]) 498 return n, nil 499 } 500 501 // atomicInt32File implements vfs.WritableDynamicBytesSource sysctls 502 // represented by int32 atomic objects. 503 // 504 // +stateify savable 505 type atomicInt32File struct { 506 kernfs.DynamicBytesFile 507 508 val *atomicbitops.Int32 509 min, max int32 510 } 511 512 var _ vfs.WritableDynamicBytesSource = (*atomicInt32File)(nil) 513 514 // Generate implements vfs.DynamicBytesSource.Generate. 515 func (f *atomicInt32File) Generate(ctx context.Context, buf *bytes.Buffer) error { 516 _, err := fmt.Fprintf(buf, "%d\n", f.val.Load()) 517 return err 518 } 519 520 // Write implements vfs.WritableDynamicBytesSource.Write. 521 func (f *atomicInt32File) Write(ctx context.Context, _ *vfs.FileDescription, src usermem.IOSequence, offset int64) (int64, error) { 522 if offset != 0 { 523 // Ignore partial writes. 524 return 0, linuxerr.EINVAL 525 } 526 if src.NumBytes() == 0 { 527 return 0, nil 528 } 529 530 // Limit the amount of memory allocated. 531 src = src.TakeFirst(hostarch.PageSize - 1) 532 533 var v int32 534 n, err := usermem.CopyInt32StringInVec(ctx, src.IO, src.Addrs, &v, src.Opts) 535 if err != nil { 536 return 0, err 537 } 538 539 if v < f.min || v > f.max { 540 return 0, linuxerr.EINVAL 541 } 542 543 f.val.Store(v) 544 return n, nil 545 } 546 547 // randUUID returns a string containing a randomly-generated UUID followed by a 548 // newline. 549 func randUUID() string { 550 var uuid [16]byte 551 if _, err := io.ReadFull(rand.Reader, uuid[:]); err != nil { 552 panic(fmt.Sprintf("failed to read random bytes for UUID: %v", err)) 553 } 554 uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 UUID 555 uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 (random) 556 return fmt.Sprintf("%x-%x-%x-%x-%x\n", uuid[:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:]) 557 }