github.com/alibaba/ilogtail/pkg@v0.0.0-20250526110833-c53b480d046c/helper/containercenter/container_center.go (about) 1 // Copyright 2021 iLogtail Authors 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package containercenter 16 17 import ( 18 "context" 19 "hash/fnv" 20 "path/filepath" 21 "regexp" 22 "runtime" 23 "sort" 24 "strings" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/docker/docker/api/types" 30 "github.com/docker/docker/api/types/events" 31 32 "github.com/alibaba/ilogtail/pkg/helper" 33 "github.com/alibaba/ilogtail/pkg/logger" 34 "github.com/alibaba/ilogtail/pkg/util" 35 ) 36 37 var containerCenterInstance *ContainerCenter 38 var containerFindingManager *ContainerDiscoverManager 39 var onceDocker sync.Once 40 41 // set default value to aliyun_logs_ 42 var envConfigPrefix = "aliyun_logs_" 43 44 const DockerTimeFormat = "2006-01-02T15:04:05.999999999Z" 45 46 var DefaultSyncContainersPeriod = time.Second * 3 // should be same as docker_config_update_interval gflag in C 47 var ContainerInfoDeletedTimeout = time.Second * time.Duration(120) 48 var EventListenerTimeout = time.Second * time.Duration(3600) 49 50 // "io.kubernetes.pod.name": "logtail-z2224", 51 // "io.kubernetes.pod.namespace": "kube-system", 52 // "io.kubernetes.pod.uid": "222e88ff-8f08-11e8-851d-00163f008685", 53 const k8sPodNameLabel = "io.kubernetes.pod.name" 54 const k8sPodNameSpaceLabel = "io.kubernetes.pod.namespace" 55 const k8sPodUUIDLabel = "io.kubernetes.pod.uid" 56 const k8sInnerLabelPrefix = "io.kubernetes" 57 const k8sInnerAnnotationPrefix = "annotation." 58 59 const ( 60 ContainerStatusRunning = "running" 61 ContainerStatusExited = "exited" 62 ) 63 64 type EnvConfigInfo struct { 65 ConfigName string 66 ConfigItemMap map[string]string 67 } 68 69 // K8SFilter used for find specific container 70 type K8SFilter struct { 71 NamespaceReg *regexp.Regexp 72 PodReg *regexp.Regexp 73 ContainerReg *regexp.Regexp 74 IncludeLabels map[string]string 75 ExcludeLabels map[string]string 76 IncludeLabelRegs map[string]*regexp.Regexp 77 ExcludeLabelRegs map[string]*regexp.Regexp 78 hashKey uint64 79 } 80 81 // CreateK8SFilter ... 82 func CreateK8SFilter(ns, pod, container string, includeK8sLabels, excludeK8sLabels map[string]string) (*K8SFilter, error) { 83 var filter K8SFilter 84 var err error 85 var hashStrBuilder strings.Builder 86 if len(ns) > 0 { 87 if filter.NamespaceReg, err = regexp.Compile(ns); err != nil { 88 return nil, err 89 } 90 } 91 hashStrBuilder.WriteString(ns) 92 hashStrBuilder.WriteString("$$$") 93 if len(pod) > 0 { 94 if filter.PodReg, err = regexp.Compile(pod); err != nil { 95 return nil, err 96 } 97 } 98 hashStrBuilder.WriteString(pod) 99 hashStrBuilder.WriteString("$$$") 100 if len(container) > 0 { 101 if filter.ContainerReg, err = regexp.Compile(container); err != nil { 102 return nil, err 103 } 104 } 105 hashStrBuilder.WriteString(container) 106 hashStrBuilder.WriteString("$$$") 107 108 if filter.IncludeLabels, filter.IncludeLabelRegs, err = SplitRegexFromMap(includeK8sLabels); err != nil { 109 return nil, err 110 } 111 for includeKey, val := range includeK8sLabels { 112 hashStrBuilder.WriteString(includeKey) 113 hashStrBuilder.WriteByte('#') 114 hashStrBuilder.WriteString(val) 115 } 116 if filter.ExcludeLabels, filter.ExcludeLabelRegs, err = SplitRegexFromMap(excludeK8sLabels); err != nil { 117 return nil, err 118 } 119 hashStrBuilder.WriteString("$$$") 120 for excludeKey, val := range excludeK8sLabels { 121 hashStrBuilder.WriteString(excludeKey) 122 hashStrBuilder.WriteByte('#') 123 hashStrBuilder.WriteString(val) 124 } 125 126 h := fnv.New64a() 127 _, _ = h.Write([]byte(hashStrBuilder.String())) 128 filter.hashKey = h.Sum64() 129 return &filter, nil 130 } 131 132 // "io.kubernetes.container.logpath": "/var/log/pods/222e88ff-8f08-11e8-851d-00163f008685/logtail_0.log", 133 // "io.kubernetes.container.name": "logtail", 134 // "io.kubernetes.docker.type": "container", 135 // "io.kubernetes.pod.name": "logtail-z2224", 136 // "io.kubernetes.pod.namespace": "kube-system", 137 // "io.kubernetes.pod.uid": "222e88ff-8f08-11e8-851d-00163f008685", 138 type K8SInfo struct { 139 Namespace string 140 Pod string 141 ContainerName string 142 Labels map[string]string 143 PausedContainer bool 144 145 matchedCache map[uint64]bool 146 mu sync.RWMutex 147 } 148 149 func (info *K8SInfo) IsSamePod(o *K8SInfo) bool { 150 return info.Namespace == o.Namespace && info.Pod == o.Pod 151 } 152 153 func (info *K8SInfo) GetLabel(key string) string { 154 info.mu.RLock() 155 defer info.mu.RUnlock() 156 if info.Labels != nil { 157 return info.Labels[key] 158 } 159 return "" 160 } 161 162 // ExtractK8sLabels only work for original docker container. 163 func (info *K8SInfo) ExtractK8sLabels(containerInfo types.ContainerJSON) { 164 // only pause container has k8s labels 165 if info.ContainerName == "POD" || info.ContainerName == "pause" { 166 info.mu.Lock() 167 defer info.mu.Unlock() 168 info.PausedContainer = true 169 if info.Labels == nil { 170 info.Labels = make(map[string]string) 171 } 172 for key, val := range containerInfo.Config.Labels { 173 if strings.HasPrefix(key, k8sInnerLabelPrefix) || strings.HasPrefix(key, k8sInnerAnnotationPrefix) { 174 continue 175 } 176 info.Labels[key] = val 177 } 178 } 179 } 180 181 func (info *K8SInfo) Merge(o *K8SInfo) { 182 info.mu.Lock() 183 o.mu.Lock() 184 defer info.mu.Unlock() 185 defer o.mu.Unlock() 186 187 // only pause container has k8s labels, so we can only check len(labels) 188 if len(o.Labels) > len(info.Labels) { 189 info.Labels = o.Labels 190 info.matchedCache = nil 191 } 192 if len(o.Labels) < len(info.Labels) { 193 o.Labels = info.Labels 194 o.matchedCache = nil 195 } 196 } 197 198 // IsMatch ... 199 func (info *K8SInfo) IsMatch(filter *K8SFilter) bool { 200 if filter == nil { 201 return true 202 } 203 info.mu.RLock() // 使用读锁 204 isPausedContainer := info.PausedContainer 205 info.mu.RUnlock() // 解读锁 206 if isPausedContainer { 207 return false 208 } 209 210 info.mu.Lock() // 使用写锁 211 defer info.mu.Unlock() // 解写锁 212 if info.matchedCache == nil { 213 info.matchedCache = make(map[uint64]bool) 214 } else if cacheRst, ok := info.matchedCache[filter.hashKey]; ok { 215 return cacheRst 216 } 217 var rst = info.innerMatch(filter) 218 info.matchedCache[filter.hashKey] = rst 219 return rst 220 } 221 222 // innerMatch ... 223 func (info *K8SInfo) innerMatch(filter *K8SFilter) bool { 224 if filter.NamespaceReg != nil && !filter.NamespaceReg.MatchString(info.Namespace) { 225 return false 226 } 227 if filter.PodReg != nil && !filter.PodReg.MatchString(info.Pod) { 228 return false 229 } 230 if filter.ContainerReg != nil && !filter.ContainerReg.MatchString(info.ContainerName) { 231 return false 232 } 233 // if labels is nil, create an empty map 234 if info.Labels == nil { 235 info.Labels = make(map[string]string) 236 } 237 return isMapLabelsMatch(filter.IncludeLabels, filter.ExcludeLabels, filter.IncludeLabelRegs, filter.ExcludeLabelRegs, info.Labels) 238 } 239 240 type DockerInfoDetail struct { 241 StdoutPath string 242 ContainerInfo types.ContainerJSON 243 ContainerNameTag map[string]string 244 K8SInfo *K8SInfo 245 EnvConfigInfoMap map[string]*EnvConfigInfo 246 ContainerIP string 247 DefaultRootPath string 248 249 lastUpdateTime time.Time 250 deleteFlag bool 251 } 252 253 func (did *DockerInfoDetail) IDPrefix() string { 254 return GetShortID(did.ContainerInfo.ID) 255 } 256 257 func (did *DockerInfoDetail) PodName() string { 258 if did.K8SInfo != nil { 259 return did.K8SInfo.Pod 260 } 261 return "" 262 } 263 264 func (did *DockerInfoDetail) FinishedAt() string { 265 if did.ContainerInfo.State != nil { 266 return did.ContainerInfo.State.FinishedAt 267 } 268 return "" 269 } 270 271 func (did *DockerInfoDetail) Status() string { 272 if did.ContainerInfo.State != nil { 273 return did.ContainerInfo.State.Status 274 } 275 return "" 276 } 277 278 func (did *DockerInfoDetail) IsTimeout() bool { 279 nowTime := time.Now() 280 if nowTime.Sub(did.lastUpdateTime) > fetchAllSuccessTimeout || 281 (did.deleteFlag && nowTime.Sub(did.lastUpdateTime) > ContainerInfoDeletedTimeout) { 282 return true 283 } 284 return false 285 } 286 287 func (did *DockerInfoDetail) GetExternalTags(envs, k8sLabels map[string]string) map[string]string { 288 tags := map[string]string{} 289 if len(envs) == 0 && len(k8sLabels) == 0 { 290 return tags 291 } 292 did.GetCustomExternalTags(tags, envs, k8sLabels) 293 return tags 294 } 295 296 func (did *DockerInfoDetail) GetCustomExternalTags(tags, envs, k8sLabels map[string]string) { 297 if len(envs) == 0 && len(k8sLabels) == 0 { 298 return 299 } 300 for k, realName := range envs { 301 tags[realName] = did.GetEnv(k) 302 } 303 if did.K8SInfo != nil { 304 for k, realName := range k8sLabels { 305 tags[realName] = did.K8SInfo.GetLabel(k) 306 } 307 } 308 } 309 310 func (did *DockerInfoDetail) GetEnv(key string) string { 311 for _, env := range did.ContainerInfo.Config.Env { 312 kvPair := strings.SplitN(env, "=", 2) 313 if len(kvPair) != 2 { 314 continue 315 } 316 if key == kvPair[0] { 317 return kvPair[1] 318 } 319 } 320 return "" 321 } 322 323 func (did *DockerInfoDetail) DiffName(other *DockerInfoDetail) bool { 324 return did.ContainerInfo.ID != other.ContainerInfo.ID || did.ContainerInfo.Name != other.ContainerInfo.Name 325 } 326 327 func (did *DockerInfoDetail) DiffMount(other *DockerInfoDetail) bool { 328 return len(did.ContainerInfo.Config.Volumes) != len(other.ContainerInfo.Config.Volumes) 329 } 330 331 func isPathSeparator(c byte) bool { 332 return c == '/' || c == '\\' 333 } 334 335 func (did *DockerInfoDetail) FindBestMatchedPath(pth string) (sourcePath, containerPath string) { 336 pth = filepath.Clean(pth) 337 pthSize := len(pth) 338 339 // logger.Debugf(context.Background(), "FindBestMatchedPath for container %s, target path: %s, containerInfo: %+v", did.IDPrefix(), pth, did.ContainerInfo) 340 341 // check mounts 342 var bestMatchedMounts types.MountPoint 343 for _, mount := range did.ContainerInfo.Mounts { 344 // logger.Debugf("container(%s-%s) mount: source-%s destination-%s", did.IDPrefix(), did.ContainerInfo.Name, mount.Source, mount.Destination) 345 346 dst := filepath.Clean(mount.Destination) 347 dstSize := len(dst) 348 349 if strings.HasPrefix(pth, dst) && 350 (pthSize == dstSize || (pthSize > dstSize && isPathSeparator(pth[dstSize]))) && 351 len(bestMatchedMounts.Destination) < dstSize { 352 bestMatchedMounts = mount 353 } 354 } 355 if len(bestMatchedMounts.Source) > 0 { 356 return bestMatchedMounts.Source, bestMatchedMounts.Destination 357 } 358 359 return did.DefaultRootPath, "" 360 } 361 362 func (did *DockerInfoDetail) MakeSureEnvConfigExist(configName string) *EnvConfigInfo { 363 if did.EnvConfigInfoMap == nil { 364 did.EnvConfigInfoMap = make(map[string]*EnvConfigInfo) 365 } 366 config, ok := did.EnvConfigInfoMap[configName] 367 if !ok { 368 envConfig := &EnvConfigInfo{ 369 ConfigName: configName, 370 ConfigItemMap: make(map[string]string), 371 } 372 did.EnvConfigInfoMap[configName] = envConfig 373 return envConfig 374 } 375 return config 376 } 377 378 // FindAllEnvConfig find and pre process all env config, add tags for docker info 379 func (did *DockerInfoDetail) FindAllEnvConfig(envConfigPrefix string, selfConfigFlag bool) { 380 381 if len(envConfigPrefix) == 0 { 382 return 383 } 384 selfEnvConfig := false 385 for _, env := range did.ContainerInfo.Config.Env { 386 kvPair := strings.SplitN(env, "=", 2) 387 if len(kvPair) != 2 { 388 continue 389 } 390 key := kvPair[0] 391 value := kvPair[1] 392 393 if key == "ALICLOUD_LOG_DOCKER_ENV_CONFIG_SELF" && (value == "true" || value == "TRUE") { 394 logger.Debug(context.Background(), "this container is self env config", did.ContainerInfo.Name) 395 selfEnvConfig = true 396 continue 397 } 398 399 if !strings.HasPrefix(key, envConfigPrefix) { 400 continue 401 } 402 logger.Debug(context.Background(), "docker env config, name", did.ContainerInfo.Name, "item", key) 403 envKey := key[len(envConfigPrefix):] 404 lastIndex := strings.LastIndexByte(envKey, '_') 405 var configName string 406 // end with '_', invalid, just skip 407 if lastIndex == len(envKey)-1 { 408 continue 409 } 410 411 // raw config 412 if lastIndex < 0 { 413 configName = envKey 414 } else { 415 configName = envKey[0:lastIndex] 416 } 417 // invalid config name 418 if len(configName) == 0 { 419 continue 420 } 421 envConfig := did.MakeSureEnvConfigExist(configName) 422 if lastIndex < 0 { 423 envConfig.ConfigItemMap[""] = value 424 } else { 425 tailKey := envKey[lastIndex+1:] 426 envConfig.ConfigItemMap[tailKey] = value 427 // process tags 428 if tailKey == "tags" { 429 tagKV := strings.SplitN(value, "=", 2) 430 // if tag exist in EnvTags, just skip this tag 431 if len(tagKV) == 2 { 432 if !helper.HasEnvTags(tagKV[0], tagKV[1]) { 433 did.ContainerNameTag[tagKV[0]] = tagKV[1] 434 } else { 435 logger.Info(context.Background(), "skip set this tag, as this exist in self env tags, key", tagKV[0], "value", tagKV[1]) 436 } 437 } else { 438 if !helper.HasEnvTags(tagKV[0], tagKV[0]) { 439 did.ContainerNameTag[tagKV[0]] = tagKV[0] 440 } else { 441 logger.Info(context.Background(), "skip set this tag, as this exist in self env tags, key&value", tagKV[0]) 442 } 443 } 444 } 445 } 446 } 447 logger.Debug(context.Background(), "docker env", did.ContainerInfo.Config.Env, "prefix", envConfigPrefix, "env config", did.EnvConfigInfoMap, "self env config", selfEnvConfig) 448 // ignore self env config 449 if !selfConfigFlag && selfEnvConfig { 450 did.EnvConfigInfoMap = make(map[string]*EnvConfigInfo) 451 } 452 } 453 454 type ContainerCenter struct { 455 // ContainerMap contains all container information. 456 // For the docker scenario, the container list is the same as the result executed with `docker ps` commands. So the container 457 // list would also contain the sandbox containers when docker is used as an engine in Kubernetes. 458 // For the CRI scenario, the container list only contains the real containers and excludes the sandbox containers. But the 459 // sandbox meta would be saved to its bound container. 460 containerMap map[string]*DockerInfoDetail // all containers will in this map 461 client ClientInterface 462 containerHelper ContainerHelperInterface 463 lastErrMu sync.Mutex 464 lastErr error 465 lock sync.RWMutex 466 lastUpdateMapTime int64 467 eventChan chan events.Message 468 eventChanLock sync.Mutex 469 containerStateLock sync.Mutex 470 imageLock sync.RWMutex 471 imageCache map[string]string 472 initStaticContainerInfoSuccess bool 473 } 474 475 type ClientInterface interface { 476 ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) 477 ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) 478 ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) 479 Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) 480 } 481 482 type ContainerHelperInterface interface { 483 ContainerProcessAlive(pid int) bool 484 } 485 486 type ContainerHelperWrapper struct { 487 } 488 489 func (r *ContainerHelperWrapper) ContainerProcessAlive(pid int) bool { 490 return ContainerProcessAlive(pid) 491 } 492 493 func getIPByHosts(hostFileName, hostname string) string { 494 lines, err := util.ReadLines(hostFileName) 495 if err != nil { 496 logger.Info(context.Background(), "read container hosts file error, file", hostFileName, "error", err.Error()) 497 return "" 498 } 499 for _, line := range lines { 500 if strings.HasPrefix(line, "#") { 501 continue 502 } 503 if strings.Index(line, hostname) > 0 { 504 line = strings.Trim(line, "#/* \t\n") 505 return util.ReadFirstBlock(line) 506 } 507 } 508 if util.GetHostName() == hostname { 509 return util.GetIPAddress() 510 } 511 return "" 512 } 513 514 func (dc *ContainerCenter) registerEventListener(c chan events.Message) { 515 dc.eventChanLock.Lock() 516 defer dc.eventChanLock.Unlock() 517 dc.eventChan = c 518 } 519 520 func (dc *ContainerCenter) unRegisterEventListener(_ chan events.Message) { 521 dc.eventChanLock.Lock() 522 defer dc.eventChanLock.Unlock() 523 dc.eventChan = nil 524 } 525 526 func (dc *ContainerCenter) lookupImageCache(id string) (string, bool) { 527 dc.imageLock.RLock() 528 defer dc.imageLock.RUnlock() 529 imageName, ok := dc.imageCache[id] 530 return imageName, ok 531 } 532 533 func (dc *ContainerCenter) getImageName(id, defaultVal string) string { 534 if len(id) == 0 || dc.client == nil { 535 return defaultVal 536 } 537 if imageName, ok := dc.lookupImageCache(id); ok { 538 return imageName 539 } 540 541 image, _, err := dc.client.ImageInspectWithRaw(context.Background(), id) 542 logger.Debug(context.Background(), "get image name, id", id, "error", err) 543 if err == nil && len(image.RepoTags) > 0 { 544 dc.imageLock.Lock() 545 dc.imageCache[id] = image.RepoTags[0] 546 dc.imageLock.Unlock() 547 return image.RepoTags[0] 548 } 549 return defaultVal 550 } 551 552 func (dc *ContainerCenter) getIPAddress(info types.ContainerJSON) string { 553 if detail, ok := dc.getContainerDetail(info.ID); ok && detail != nil { 554 return detail.ContainerIP 555 } 556 if info.NetworkSettings != nil && len(info.NetworkSettings.IPAddress) > 0 { 557 return info.NetworkSettings.IPAddress 558 } 559 if len(info.Config.Hostname) > 0 && len(info.HostsPath) > 0 { 560 return getIPByHosts(GetMountedFilePath(info.HostsPath), info.Config.Hostname) 561 } 562 return "" 563 } 564 565 // CreateInfoDetail create DockerInfoDetail with docker.Container 566 // Container property used in this function : HostsPath, Config.Hostname, Name, Config.Image, Config.Env, Mounts 567 // ContainerInfo.GraphDriver.Data["UpperDir"] Config.Labels 568 func (dc *ContainerCenter) CreateInfoDetail(info types.ContainerJSON, envConfigPrefix string, selfConfigFlag bool) *DockerInfoDetail { 569 // Generate Log Tags 570 containerNameTag := make(map[string]string) 571 k8sInfo := K8SInfo{} 572 ip := dc.getIPAddress(info) 573 574 containerNameTag["_image_name_"] = dc.getImageName(info.Image, info.Config.Image) 575 if strings.HasPrefix(info.Name, "/k8s_") || strings.HasPrefix(info.Name, "k8s_") || strings.Count(info.Name, "_") >= 4 { 576 // 1. container name is k8s 577 // k8s_php-redis_frontend-2337258262-154p7_default_d8a2e2dd-3617-11e7-a4b0-ecf4bbe5d414_0 578 // terway_terway-multi-ip-mgslw_kube-system_b07b491e-995a-11e9-94ea-00163e080931_8 579 tags := strings.SplitN(info.Name, "_", 6) 580 // containerNamePrefix:k8s 581 // containerName:php-redis 582 // podFullName:frontend-2337258262-154p7 583 // computeHash:154p7 584 // deploymentName:frontend 585 // replicaSetName:frontend-2337258262 586 // namespace:default 587 // podUID:d8a2e2dd-3617-11e7-a4b0-ecf4bbe5d414 588 589 baseIndex := 0 590 if len(tags) == 6 { 591 baseIndex = 1 592 } 593 containerNameTag["_container_name_"] = tags[baseIndex] 594 containerNameTag["_pod_name_"] = tags[baseIndex+1] 595 containerNameTag["_namespace_"] = tags[baseIndex+2] 596 containerNameTag["_pod_uid_"] = tags[baseIndex+3] 597 k8sInfo.ContainerName = tags[baseIndex] 598 k8sInfo.Pod = tags[baseIndex+1] 599 k8sInfo.Namespace = tags[baseIndex+2] 600 k8sInfo.ExtractK8sLabels(info) 601 } else if _, ok := info.Config.Labels[k8sPodNameLabel]; ok { 602 // 2. container labels has k8sPodNameLabel 603 containerNameTag["_container_name_"] = info.Name 604 containerNameTag["_pod_name_"] = info.Config.Labels[k8sPodNameLabel] 605 containerNameTag["_namespace_"] = info.Config.Labels[k8sPodNameSpaceLabel] 606 containerNameTag["_pod_uid_"] = info.Config.Labels[k8sPodUUIDLabel] 607 k8sInfo.ContainerName = info.Name 608 k8sInfo.Pod = info.Config.Labels[k8sPodNameLabel] 609 k8sInfo.Namespace = info.Config.Labels[k8sPodNameSpaceLabel] 610 // the following method is couped with the CRI adapter, only the original docker container labels 611 // would be added to the labels of the k8s info. 612 k8sInfo.ExtractK8sLabels(info) 613 } else { 614 // 3. treat as normal container 615 if strings.HasPrefix(info.Name, "/") { 616 containerNameTag["_container_name_"] = info.Name[1:] 617 } else { 618 containerNameTag["_container_name_"] = info.Name 619 } 620 } 621 if len(ip) > 0 { 622 containerNameTag["_container_ip_"] = ip 623 } 624 for i := range info.Mounts { 625 info.Mounts[i].Source = filepath.Clean(info.Mounts[i].Source) 626 info.Mounts[i].Destination = filepath.Clean(info.Mounts[i].Destination) 627 } 628 sortMounts := func(mounts []types.MountPoint) { 629 sort.Slice(mounts, func(i, j int) bool { 630 return mounts[i].Source < mounts[j].Source 631 }) 632 } 633 sortMounts(info.Mounts) 634 did := &DockerInfoDetail{ 635 StdoutPath: info.LogPath, 636 ContainerInfo: info, 637 ContainerNameTag: containerNameTag, 638 K8SInfo: &k8sInfo, 639 ContainerIP: ip, 640 lastUpdateTime: time.Now(), 641 } 642 643 // Find Env Log Configs 644 did.FindAllEnvConfig(envConfigPrefix, selfConfigFlag) 645 646 // Find Container FS Root Path on Host 647 // @note for overlayfs only, some driver like nas, you can not see it in upper dir 648 if info.GraphDriver.Data != nil { 649 if rootPath, ok := did.ContainerInfo.GraphDriver.Data["UpperDir"]; ok { 650 did.DefaultRootPath = rootPath 651 } 652 } 653 // for cri-runtime 654 if criRuntimeWrapper != nil && info.HostConfig != nil && len(did.DefaultRootPath) == 0 { 655 did.DefaultRootPath = criRuntimeWrapper.lookupContainerRootfsAbsDir(info) 656 } 657 logger.Debugf(context.Background(), "container(id: %s, name: %s) default root path is %s", info.ID, info.Name, did.DefaultRootPath) 658 return did 659 } 660 661 func getContainerCenterInstance() *ContainerCenter { 662 onceDocker.Do(func() { 663 logger.InitLogger() 664 // load EnvTags first 665 helper.LoadEnvTags() 666 containerCenterInstance = &ContainerCenter{ 667 containerHelper: &ContainerHelperWrapper{}, 668 } 669 containerCenterInstance.imageCache = make(map[string]string) 670 containerCenterInstance.containerMap = make(map[string]*DockerInfoDetail) 671 // containerFindingManager works in a producer-consumer model 672 // so even manager is not initialized, it will not affect consumers like service_stdout 673 go func() { 674 retryCount := 0 675 containerFindingManager = NewContainerDiscoverManager() 676 for { 677 if containerFindingManager.Init() { 678 break 679 } 680 if retryCount%10 == 0 { 681 logger.Error(context.Background(), "DOCKER_CENTER_ALARM", "docker center init failed", "retry count", retryCount) 682 } 683 retryCount++ 684 time.Sleep(time.Second * 1) 685 } 686 containerFindingManager.TimerFetch() 687 containerFindingManager.StartSyncContainers() 688 }() 689 }) 690 return containerCenterInstance 691 } 692 693 func SetEnvConfigPrefix(prefix string) { 694 envConfigPrefix = prefix 695 } 696 697 func (dc *ContainerCenter) readStaticConfig(forceFlush bool) { 698 staticDockerContainerLock.Lock() 699 defer staticDockerContainerLock.Unlock() 700 containerInfo, removedIDs, changed, err := tryReadStaticContainerInfo() 701 if err != nil { 702 logger.Warning(context.Background(), "READ_STATIC_CONFIG_ALARM", "read static container info error", err) 703 } 704 if !dc.initStaticContainerInfoSuccess && len(containerInfo) > 0 { 705 dc.initStaticContainerInfoSuccess = true 706 forceFlush = true 707 } 708 709 // 静态文件读取容器信息的时候,只能全量读取,因此使用updateContainers全量更新 710 if forceFlush || changed { 711 containerMap := make(map[string]*DockerInfoDetail) 712 for _, info := range containerInfo { 713 dockerInfoDetail := containerCenterInstance.CreateInfoDetail(info, envConfigPrefix, false) 714 containerMap[info.ID] = dockerInfoDetail 715 } 716 containerCenterInstance.updateContainers(containerMap) 717 } 718 719 if len(removedIDs) > 0 { 720 for _, id := range removedIDs { 721 containerCenterInstance.markRemove(id) 722 } 723 } 724 } 725 726 func (dc *ContainerCenter) flushStaticConfig() { 727 for { 728 dc.readStaticConfig(false) 729 time.Sleep(time.Second) 730 } 731 } 732 733 func (dc *ContainerCenter) setLastError(err error, msg string) { 734 dc.lastErrMu.Lock() 735 dc.lastErr = err 736 dc.lastErrMu.Unlock() 737 if err != nil { 738 logger.Warning(context.Background(), "DOCKER_CENTER_ALARM", "message", msg, "error found", err) 739 } else { 740 logger.Debug(context.Background(), "message", msg) 741 } 742 } 743 744 func isMapLabelsMatch(includeLabel map[string]string, 745 excludeLabel map[string]string, 746 includeLabelRegex map[string]*regexp.Regexp, 747 excludeLabelRegex map[string]*regexp.Regexp, 748 labels map[string]string) bool { 749 if len(includeLabel) != 0 || len(includeLabelRegex) != 0 { 750 matchedFlag := false 751 for key, val := range includeLabel { 752 if dockerVal, ok := labels[key]; ok && (len(val) == 0 || dockerVal == val) { 753 matchedFlag = true 754 break 755 } 756 } 757 // if matched, do not need check regex 758 if !matchedFlag { 759 for key, reg := range includeLabelRegex { 760 if dockerVal, ok := labels[key]; ok && reg.MatchString(dockerVal) { 761 matchedFlag = true 762 break 763 } 764 } 765 } 766 767 if !matchedFlag { 768 return false 769 } 770 } 771 for key, val := range excludeLabel { 772 if dockerVal, ok := labels[key]; ok && (len(val) == 0 || dockerVal == val) { 773 return false 774 } 775 } 776 for key, reg := range excludeLabelRegex { 777 if dockerVal, ok := labels[key]; ok && reg.MatchString(dockerVal) { 778 return false 779 } 780 } 781 return true 782 } 783 784 func isContainerLabelMatch(includeLabel map[string]string, 785 excludeLabel map[string]string, 786 includeLabelRegex map[string]*regexp.Regexp, 787 excludeLabelRegex map[string]*regexp.Regexp, 788 info *DockerInfoDetail) bool { 789 return isMapLabelsMatch(includeLabel, excludeLabel, includeLabelRegex, excludeLabelRegex, info.ContainerInfo.Config.Labels) 790 } 791 792 func isMathEnvItem(env string, 793 staticEnv map[string]string, 794 regexEnv map[string]*regexp.Regexp) bool { 795 var envKey, envValue string 796 splitArray := strings.SplitN(env, "=", 2) 797 if len(splitArray) < 2 { 798 envKey = splitArray[0] 799 } else { 800 envKey = splitArray[0] 801 envValue = splitArray[1] 802 } 803 804 if len(staticEnv) > 0 { 805 if value, ok := staticEnv[envKey]; ok && (len(value) == 0 || value == envValue) { 806 return true 807 } 808 } 809 810 if len(regexEnv) > 0 { 811 if reg, ok := regexEnv[envKey]; ok && reg.MatchString(envValue) { 812 return true 813 } 814 } 815 return false 816 } 817 818 func isContainerEnvMatch(includeEnv map[string]string, 819 excludeEnv map[string]string, 820 includeEnvRegex map[string]*regexp.Regexp, 821 excludeEnvRegex map[string]*regexp.Regexp, 822 info *DockerInfoDetail) bool { 823 824 if len(includeEnv) != 0 || len(includeEnvRegex) != 0 { 825 matchFlag := false 826 for _, env := range info.ContainerInfo.Config.Env { 827 if isMathEnvItem(env, includeEnv, includeEnvRegex) { 828 matchFlag = true 829 break 830 } 831 } 832 if !matchFlag { 833 return false 834 } 835 } 836 837 if len(excludeEnv) != 0 || len(excludeEnvRegex) != 0 { 838 for _, env := range info.ContainerInfo.Config.Env { 839 if isMathEnvItem(env, excludeEnv, excludeEnvRegex) { 840 return false 841 } 842 } 843 } 844 845 return true 846 } 847 848 func (dc *ContainerCenter) getAllAcceptedInfo( 849 includeLabel map[string]string, 850 excludeLabel map[string]string, 851 includeLabelRegex map[string]*regexp.Regexp, 852 excludeLabelRegex map[string]*regexp.Regexp, 853 includeEnv map[string]string, 854 excludeEnv map[string]string, 855 includeEnvRegex map[string]*regexp.Regexp, 856 excludeEnvRegex map[string]*regexp.Regexp, 857 k8sFilter *K8SFilter, 858 ) map[string]*DockerInfoDetail { 859 containerMap := make(map[string]*DockerInfoDetail) 860 dc.lock.RLock() 861 defer dc.lock.RUnlock() 862 for id, info := range dc.containerMap { 863 if isContainerLabelMatch(includeLabel, excludeLabel, includeLabelRegex, excludeLabelRegex, info) && 864 isContainerEnvMatch(includeEnv, excludeEnv, includeEnvRegex, excludeEnvRegex, info) && 865 info.K8SInfo.IsMatch(k8sFilter) { 866 containerMap[id] = info 867 } 868 } 869 return containerMap 870 } 871 872 func (dc *ContainerCenter) getAllAcceptedInfoV2( 873 fullList map[string]bool, 874 matchList map[string]*DockerInfoDetail, 875 includeLabel map[string]string, 876 excludeLabel map[string]string, 877 includeLabelRegex map[string]*regexp.Regexp, 878 excludeLabelRegex map[string]*regexp.Regexp, 879 includeEnv map[string]string, 880 excludeEnv map[string]string, 881 includeEnvRegex map[string]*regexp.Regexp, 882 excludeEnvRegex map[string]*regexp.Regexp, 883 k8sFilter *K8SFilter, 884 ) (newCount, delCount int, matchAddedList, matchDeletedList []string) { 885 886 dc.lock.RLock() 887 defer dc.lock.RUnlock() 888 matchDeletedList = make([]string, 0) 889 matchAddedList = make([]string, 0) 890 // Remove deleted containers from match list and full list. 891 delCount = 0 892 893 for id := range fullList { 894 if _, exist := dc.containerMap[id]; !exist { 895 delete(fullList, id) 896 if _, matched := matchList[id]; matched { 897 delete(matchList, id) 898 matchDeletedList = append(matchDeletedList, id) 899 delCount++ 900 } 901 } 902 } 903 904 // Update matched container status 905 for id := range matchList { 906 c, ok := dc.containerMap[id] 907 if ok { 908 matchList[id] = c 909 } else { 910 logger.Warningf(context.Background(), "DOCKER_MATCH_ALARM", "matched container not in docker center") 911 } 912 } 913 914 // Add new containers to full list and matched to match list. 915 newCount = 0 916 917 for id, info := range dc.containerMap { 918 if _, exist := fullList[id]; !exist { 919 fullList[id] = true 920 if isContainerLabelMatch(includeLabel, excludeLabel, includeLabelRegex, excludeLabelRegex, info) && 921 isContainerEnvMatch(includeEnv, excludeEnv, includeEnvRegex, excludeEnvRegex, info) && 922 info.K8SInfo.IsMatch(k8sFilter) { 923 newCount++ 924 matchList[id] = info 925 matchAddedList = append(matchAddedList, id) 926 } 927 } 928 } 929 return newCount, delCount, matchAddedList, matchDeletedList 930 } 931 932 func (dc *ContainerCenter) getDiffContainers(fullList map[string]struct{}) (fullAddedList, fullDeletedList []string) { 933 dc.lock.RLock() 934 defer dc.lock.RUnlock() 935 fullDeletedList = make([]string, 0) 936 fullAddedList = make([]string, 0) 937 for id := range fullList { 938 if _, exist := dc.containerMap[id]; !exist { 939 delete(fullList, id) 940 fullDeletedList = append(fullDeletedList, id) 941 } 942 } 943 for id := range dc.containerMap { 944 if _, exist := fullList[id]; !exist { 945 fullList[id] = struct{}{} 946 fullAddedList = append(fullAddedList, id) 947 } 948 } 949 return fullAddedList, fullDeletedList 950 } 951 952 func (dc *ContainerCenter) getAllSpecificInfo(filter func(*DockerInfoDetail) bool) (infoList []*DockerInfoDetail) { 953 dc.lock.RLock() 954 defer dc.lock.RUnlock() 955 for _, info := range dc.containerMap { 956 if filter(info) { 957 infoList = append(infoList, info) 958 } 959 } 960 return infoList 961 } 962 963 func (dc *ContainerCenter) processAllContainerInfo(processor func(*DockerInfoDetail)) { 964 dc.lock.RLock() 965 defer dc.lock.RUnlock() 966 for _, info := range dc.containerMap { 967 processor(info) 968 } 969 } 970 971 func (dc *ContainerCenter) getContainerDetail(id string) (containerDetail *DockerInfoDetail, ok bool) { 972 dc.lock.RLock() 973 defer dc.lock.RUnlock() 974 containerDetail, ok = dc.containerMap[id] 975 return 976 } 977 978 func (dc *ContainerCenter) getLastUpdateMapTime() int64 { 979 return atomic.LoadInt64(&dc.lastUpdateMapTime) 980 } 981 982 func (dc *ContainerCenter) refreshLastUpdateMapTime() { 983 atomic.StoreInt64(&dc.lastUpdateMapTime, time.Now().UnixNano()) 984 } 985 986 func (dc *ContainerCenter) updateContainers(containerMap map[string]*DockerInfoDetail) { 987 dc.lock.Lock() 988 defer dc.lock.Unlock() 989 for key, container := range dc.containerMap { 990 // check removed keys 991 if _, ok := containerMap[key]; !ok { 992 if !container.IsTimeout() { 993 // not timeout, put to new map 994 containerMap[key] = container 995 } 996 } 997 } 998 // switch to new container map 999 if logger.DebugFlag() { 1000 for i, c := range containerMap { 1001 logger.Debugf(context.Background(), "Update all containers [%v]: id:%v\tname:%v\tcreated:%v\tstatus:%v detail=%+v", 1002 i, c.IDPrefix(), c.ContainerInfo.Name, c.ContainerInfo.Created, c.Status(), c.ContainerInfo) 1003 } 1004 } 1005 dc.containerMap = containerMap 1006 dc.mergeK8sInfo() 1007 dc.refreshLastUpdateMapTime() 1008 } 1009 1010 func (dc *ContainerCenter) mergeK8sInfo() { 1011 k8sInfoMap := make(map[string][]*K8SInfo) 1012 for _, container := range dc.containerMap { 1013 if container.K8SInfo == nil { 1014 continue 1015 } 1016 key := container.K8SInfo.Namespace + "@" + container.K8SInfo.Pod 1017 k8sInfoMap[key] = append(k8sInfoMap[key], container.K8SInfo) 1018 } 1019 for key, k8sInfo := range k8sInfoMap { 1020 if len(k8sInfo) < 2 { 1021 logger.Debug(context.Background(), "k8s pod's container count < 2", key) 1022 continue 1023 } 1024 // @note we need test pod with many sidecar containers 1025 for i := 1; i < len(k8sInfo); i++ { 1026 k8sInfo[0].Merge(k8sInfo[i]) 1027 } 1028 for i := 1; i < len(k8sInfo); i++ { 1029 k8sInfo[i].Merge(k8sInfo[0]) 1030 } 1031 } 1032 } 1033 1034 func (dc *ContainerCenter) updateContainer(id string, container *DockerInfoDetail) { 1035 dc.lock.Lock() 1036 defer dc.lock.Unlock() 1037 if container.K8SInfo != nil { 1038 if _, ok := dc.containerMap[id]; !ok { 1039 for _, oldContainer := range dc.containerMap { 1040 if oldContainer.K8SInfo != nil && oldContainer.K8SInfo.IsSamePod(container.K8SInfo) { 1041 oldContainer.K8SInfo.Merge(container.K8SInfo) 1042 } 1043 } 1044 } 1045 } 1046 if logger.DebugFlag() { 1047 // bytes, _ := json.Marshal(container) 1048 // logger.Debug(context.Background(), "update container info", string(bytes)) 1049 logger.Debugf(context.Background(), "Update one container: id:%v\tname:%v\tcreated:%v\tstatus:%v detail=%+v", 1050 container.IDPrefix(), container.ContainerInfo.Name, container.ContainerInfo.Created, container.Status(), container.ContainerInfo) 1051 } 1052 dc.containerMap[id] = container 1053 dc.refreshLastUpdateMapTime() 1054 } 1055 1056 func (dc *ContainerCenter) fetchAll() error { 1057 dc.containerStateLock.Lock() 1058 defer dc.containerStateLock.Unlock() 1059 containers, err := dc.client.ContainerList(context.Background(), types.ContainerListOptions{All: true}) 1060 if err != nil { 1061 dc.setLastError(err, "list container error") 1062 return err 1063 } 1064 logger.Debug(context.Background(), "fetch all", containers) 1065 var containerMap = make(map[string]*DockerInfoDetail) 1066 1067 for _, container := range containers { 1068 var containerDetail types.ContainerJSON 1069 for idx := 0; idx < 3; idx++ { 1070 if containerDetail, err = dc.client.ContainerInspect(context.Background(), container.ID); err == nil { 1071 break 1072 } 1073 time.Sleep(time.Second * 5) 1074 } 1075 if err == nil { 1076 if !dc.containerHelper.ContainerProcessAlive(containerDetail.State.Pid) { 1077 continue 1078 } 1079 containerMap[container.ID] = dc.CreateInfoDetail(containerDetail, envConfigPrefix, false) 1080 } else { 1081 dc.setLastError(err, "inspect container error "+container.ID) 1082 } 1083 } 1084 dc.updateContainers(containerMap) 1085 return nil 1086 } 1087 1088 func (dc *ContainerCenter) fetchOne(containerID string, tryFindSandbox bool) error { 1089 dc.containerStateLock.Lock() 1090 defer dc.containerStateLock.Unlock() 1091 containerDetail, err := dc.client.ContainerInspect(context.Background(), containerID) 1092 if err != nil { 1093 dc.setLastError(err, "inspect container error "+containerID) 1094 return err 1095 } 1096 if containerDetail.State.Status == ContainerStatusRunning && !dc.containerHelper.ContainerProcessAlive(containerDetail.State.Pid) { 1097 containerDetail.State.Status = ContainerStatusExited 1098 } 1099 // docker 场景下 1100 // tryFindSandbox如果是false, 那么fetchOne的地方应该会调用两次,一次是sandbox的id,一次是业务容器的id 1101 // tryFindSandbox如果是true, 调用的地方只会有一个业务容器的id,然后依赖fetchOne内部把sandbox信息补全 1102 dc.updateContainer(containerID, dc.CreateInfoDetail(containerDetail, envConfigPrefix, false)) 1103 logger.Debug(context.Background(), "update container", containerID, "detail", containerDetail) 1104 if tryFindSandbox && containerDetail.Config != nil { 1105 if id := containerDetail.Config.Labels["io.kubernetes.sandbox.id"]; id != "" { 1106 containerDetail, err = dc.client.ContainerInspect(context.Background(), id) 1107 if err != nil { 1108 dc.setLastError(err, "inspect sandbox container error "+id) 1109 } else { 1110 if containerDetail.State.Status == ContainerStatusRunning && !dc.containerHelper.ContainerProcessAlive(containerDetail.State.Pid) { 1111 containerDetail.State.Status = ContainerStatusExited 1112 } 1113 dc.updateContainer(id, dc.CreateInfoDetail(containerDetail, envConfigPrefix, false)) 1114 logger.Debug(context.Background(), "update sandbox container", id, "detail", containerDetail) 1115 } 1116 } 1117 } 1118 return err 1119 } 1120 1121 // We mark container removed if it is exited or its metadata cannot be accessed 1122 // e.g. cannot docker inspect / crictl inspect it. 1123 func (dc *ContainerCenter) markRemove(containerID string) { 1124 dc.lock.Lock() 1125 defer dc.lock.Unlock() 1126 if container, ok := dc.containerMap[containerID]; ok { 1127 if container.deleteFlag { 1128 return 1129 } 1130 logger.Debugf(context.Background(), "mark remove container: id:%v\tname:%v\tcreated:%v\tstatus:%v detail=%+v", 1131 container.IDPrefix(), container.ContainerInfo.Name, container.ContainerInfo.Created, container.Status(), container.ContainerInfo) 1132 container.ContainerInfo.State.Status = ContainerStatusExited 1133 container.deleteFlag = true 1134 container.lastUpdateTime = time.Now() 1135 dc.refreshLastUpdateMapTime() 1136 } 1137 } 1138 1139 func (dc *ContainerCenter) cleanTimeoutContainer() { 1140 dc.lock.Lock() 1141 defer dc.lock.Unlock() 1142 hasDelete := false 1143 for key, container := range dc.containerMap { 1144 // Comfirm to delete: 1145 // 1. The container is marked deleted for a while. 1146 // 2. The time of last success fetch all is too old. 1147 if container.IsTimeout() { 1148 logger.Debugf(context.Background(), "delete container, id:%v\tname:%v\tcreated:%v\tstatus:%v\tdetail:%+v", 1149 container.IDPrefix(), container.ContainerInfo.Name, container.ContainerInfo.Created, container.Status(), container.ContainerInfo) 1150 delete(dc.containerMap, key) 1151 hasDelete = true 1152 } 1153 } 1154 if hasDelete { 1155 dc.refreshLastUpdateMapTime() 1156 } 1157 } 1158 1159 func (dc *ContainerCenter) sweepCache() { 1160 // clear unuseful cache 1161 usedImageIDSet := make(map[string]bool) 1162 dc.lock.RLock() 1163 for _, container := range dc.containerMap { 1164 usedImageIDSet[container.ContainerInfo.Image] = true 1165 } 1166 dc.lock.RUnlock() 1167 1168 dc.imageLock.Lock() 1169 1170 for key := range dc.imageCache { 1171 if _, ok := usedImageIDSet[key]; !ok { 1172 delete(dc.imageCache, key) 1173 } 1174 } 1175 dc.imageLock.Unlock() 1176 } 1177 1178 func containerCenterRecover() { 1179 if err := recover(); err != nil { 1180 trace := make([]byte, 2048) 1181 runtime.Stack(trace, true) 1182 logger.Error(context.Background(), "PLUGIN_RUNTIME_ALARM", "docker center runtime error", err, "stack", string(trace)) 1183 } 1184 } 1185 1186 func (dc *ContainerCenter) initClient() error { 1187 var err error 1188 // do not CreateDockerClient multi times 1189 if dc.client == nil { 1190 if dc.client, err = CreateDockerClient(); err != nil { 1191 dc.setLastError(err, "init docker client from env error") 1192 return err 1193 } 1194 } 1195 return nil 1196 } 1197 1198 func (dc *ContainerCenter) eventListener() { 1199 errorCount := 0 1200 defer containerCenterRecover() 1201 timer := time.NewTimer(EventListenerTimeout) 1202 var err error 1203 for { 1204 logger.Info(context.Background(), "docker event listener", "start") 1205 ctx, cancel := context.WithCancel(context.Background()) 1206 events, errors := dc.client.Events(ctx, types.EventsOptions{}) 1207 breakFlag := false 1208 for !breakFlag { 1209 timer.Reset(EventListenerTimeout) 1210 select { 1211 case event, ok := <-events: 1212 if !ok { 1213 logger.Errorf(context.Background(), "DOCKER_EVENT_ALARM", "docker event listener stop") 1214 errorCount++ 1215 breakFlag = true 1216 break 1217 } 1218 logger.Debug(context.Background(), "docker event captured", event) 1219 errorCount = 0 1220 switch event.Status { 1221 case "start", "restart": 1222 _ = dc.fetchOne(event.ID, false) 1223 case "rename": 1224 _ = dc.fetchOne(event.ID, false) 1225 case "die": 1226 dc.markRemove(event.ID) 1227 default: 1228 } 1229 dc.eventChanLock.Lock() 1230 if dc.eventChan != nil { 1231 // no block insert 1232 select { 1233 case dc.eventChan <- event: 1234 default: 1235 logger.Error(context.Background(), "DOCKER_EVENT_ALARM", "event queue is full, miss event", event) 1236 } 1237 } 1238 dc.eventChanLock.Unlock() 1239 case err = <-errors: 1240 logger.Error(context.Background(), "DOCKER_EVENT_ALARM", "docker event listener error", err) 1241 breakFlag = true 1242 case <-timer.C: 1243 logger.Errorf(context.Background(), "DOCKER_EVENT_ALARM", "no docker event in 1 hour. Reset event listener") 1244 breakFlag = true 1245 } 1246 } 1247 cancel() 1248 if errorCount > 10 && criRuntimeWrapper != nil { 1249 logger.Info(context.Background(), "docker listener fails and cri runtime wrapper is valid", "stop docker listener") 1250 break 1251 } 1252 // if always error, sleep 300 secs 1253 if errorCount > 30 { 1254 time.Sleep(time.Duration(300) * time.Second) 1255 } else { 1256 time.Sleep(time.Duration(10) * time.Second) 1257 } 1258 } 1259 dc.setLastError(err, "docker event stream closed") 1260 }