go.mondoo.com/cnquery@v0.0.0-20231005093811-59568235f6ea/explorer/scan/local_scanner.go (about) 1 // Copyright (c) Mondoo, Inc. 2 // SPDX-License-Identifier: BUSL-1.1 3 4 package scan 5 6 import ( 7 "context" 8 "errors" 9 "fmt" 10 "os" 11 "strings" 12 sync "sync" 13 "time" 14 15 "go.mondoo.com/cnquery/providers-sdk/v1/inventory/manager" 16 "go.mondoo.com/cnquery/providers-sdk/v1/plugin" 17 18 "github.com/mattn/go-isatty" 19 "github.com/rs/zerolog/log" 20 "github.com/segmentio/ksuid" 21 "github.com/spf13/viper" 22 "go.mondoo.com/cnquery" 23 "go.mondoo.com/cnquery/cli/config" 24 "go.mondoo.com/cnquery/cli/progress" 25 "go.mondoo.com/cnquery/explorer" 26 "go.mondoo.com/cnquery/explorer/executor" 27 "go.mondoo.com/cnquery/internal/datalakes/inmemory" 28 "go.mondoo.com/cnquery/llx" 29 "go.mondoo.com/cnquery/logger" 30 "go.mondoo.com/cnquery/mql" 31 "go.mondoo.com/cnquery/mrn" 32 "go.mondoo.com/cnquery/providers" 33 "go.mondoo.com/cnquery/providers-sdk/v1/inventory" 34 "go.mondoo.com/cnquery/providers-sdk/v1/upstream" 35 "go.mondoo.com/cnquery/utils/multierr" 36 "go.mondoo.com/ranger-rpc/codes" 37 "go.mondoo.com/ranger-rpc/status" 38 "google.golang.org/protobuf/proto" 39 ) 40 41 type assetWithRuntime struct { 42 asset *inventory.Asset 43 runtime *providers.Runtime 44 } 45 46 type LocalScanner struct { 47 ctx context.Context 48 fetcher *fetcher 49 upstream *upstream.UpstreamConfig 50 recording providers.Recording 51 } 52 53 type ScannerOption func(*LocalScanner) 54 55 func WithUpstream(u *upstream.UpstreamConfig) func(s *LocalScanner) { 56 return func(s *LocalScanner) { 57 s.upstream = u 58 } 59 } 60 61 func WithRecording(r providers.Recording) func(s *LocalScanner) { 62 return func(s *LocalScanner) { 63 s.recording = r 64 } 65 } 66 67 func NewLocalScanner(opts ...ScannerOption) *LocalScanner { 68 ls := &LocalScanner{ 69 fetcher: newFetcher(), 70 } 71 72 for i := range opts { 73 opts[i](ls) 74 } 75 76 return ls 77 } 78 79 func (s *LocalScanner) Run(ctx context.Context, job *Job) (*explorer.ReportCollection, error) { 80 if job == nil { 81 return nil, status.Errorf(codes.InvalidArgument, "missing scan job") 82 } 83 84 if job.Inventory == nil { 85 return nil, status.Errorf(codes.InvalidArgument, "missing inventory") 86 } 87 88 if ctx == nil { 89 return nil, errors.New("no context provided to run job with local scanner") 90 } 91 92 upstreamConfig, err := s.getUpstreamConfig(job.Inventory, false) 93 if err != nil { 94 return nil, err 95 } 96 reports, _, err := s.distributeJob(job, ctx, upstreamConfig) 97 if err != nil { 98 if code := status.Code(err); code == codes.Unauthenticated { 99 return nil, multierr.Wrap(err, 100 "The Mondoo Platform credentials provided at "+viper.ConfigFileUsed()+ 101 " didn't successfully authenticate with Mondoo Platform. "+ 102 "Please re-authenticate with Mondoo Platform. "+ 103 "To learn how, read https://mondoo.com/docs/cnspec/cnspec-adv-install/registration.") 104 } 105 return nil, err 106 } 107 108 return reports, nil 109 } 110 111 // returns the upstream config for the job. If the job has a specified config, it has precedence 112 // over the automatically detected one 113 func (s *LocalScanner) getUpstreamConfig(inv *inventory.Inventory, incognito bool) (*upstream.UpstreamConfig, error) { 114 jobCreds := inv.GetSpec().GetUpstreamCredentials() 115 if s.upstream == nil && jobCreds == nil { 116 return nil, errors.New("no default or job upstream config provided") 117 } 118 u := proto.Clone(s.upstream).(*upstream.UpstreamConfig) 119 u.Incognito = incognito 120 if jobCreds != nil { 121 u.ApiEndpoint = jobCreds.GetApiEndpoint() 122 u.Creds = jobCreds 123 u.SpaceMrn = jobCreds.GetParentMrn() 124 } 125 return u, nil 126 } 127 128 func (s *LocalScanner) RunIncognito(ctx context.Context, job *Job) (*explorer.ReportCollection, error) { 129 if job == nil { 130 return nil, status.Errorf(codes.InvalidArgument, "missing scan job") 131 } 132 133 if job.Inventory == nil { 134 return nil, status.Errorf(codes.InvalidArgument, "missing inventory") 135 } 136 137 if ctx == nil { 138 return nil, errors.New("no context provided to run job with local scanner") 139 } 140 141 // skip the error check, we are running in incognito 142 upstreamConfig, _ := s.getUpstreamConfig(job.Inventory, true) 143 reports, _, err := s.distributeJob(job, ctx, upstreamConfig) 144 if err != nil { 145 return nil, err 146 } 147 148 return reports, nil 149 } 150 151 // preprocessPolicyFilters expends short registry mrns into full mrns 152 func preprocessQueryPackFilters(filters []string) []string { 153 res := make([]string, len(filters)) 154 for i := range filters { 155 f := filters[i] 156 if strings.HasPrefix(f, "//") { 157 res[i] = f 158 continue 159 } 160 161 // expand short registry mrns 162 m := strings.Split(f, "/") 163 if len(m) == 2 { 164 res[i] = explorer.NewQueryPackMrn(m[0], m[1]) 165 } else { 166 res[i] = f 167 } 168 } 169 return res 170 } 171 172 func (s *LocalScanner) distributeJob(job *Job, ctx context.Context, upstream *upstream.UpstreamConfig) (*explorer.ReportCollection, bool, error) { 173 log.Info().Msgf("discover related assets for %d asset(s)", len(job.Inventory.Spec.Assets)) 174 175 im, err := manager.NewManager(manager.WithInventory(job.Inventory, providers.DefaultRuntime())) 176 if err != nil { 177 return nil, false, errors.New("failed to resolve inventory for connection") 178 } 179 assetList := im.GetAssets() 180 181 var assets []*assetWithRuntime 182 // note: asset candidate runtimes are the runtime that discovered them 183 var assetCandidates []*assetWithRuntime 184 185 // we connect and perform discovery for each asset in the job inventory 186 for i := range assetList { 187 asset := assetList[i] 188 resolvedAsset, err := im.ResolveAsset(asset) 189 if err != nil { 190 return nil, false, err 191 } 192 193 runtime, err := providers.Coordinator.RuntimeFor(asset, providers.DefaultRuntime()) 194 if err != nil { 195 log.Error().Err(err).Str("asset", asset.Name).Msg("unable to create runtime for asset") 196 continue 197 } 198 runtime.SetRecording(s.recording) 199 200 if err := runtime.Connect(&plugin.ConnectReq{ 201 Features: cnquery.GetFeatures(ctx), 202 Asset: resolvedAsset, 203 Upstream: upstream, 204 }); err != nil { 205 log.Error().Err(err).Msg("unable to connect to asset") 206 continue 207 } 208 209 // for all discovered assets, we apply mondoo-specific labels that come from the root asset 210 for _, a := range runtime.Provider.Connection.GetInventory().GetSpec().GetAssets() { 211 a.AddMondooLabels(asset) 212 } 213 processedAssets, err := providers.ProcessAssetCandidates(runtime, runtime.Provider.Connection, upstream, "") 214 if err != nil { 215 return nil, false, err 216 } 217 for i := range processedAssets { 218 assetCandidates = append(assetCandidates, &assetWithRuntime{ 219 asset: processedAssets[i], 220 runtime: runtime, 221 }) 222 } 223 // TODO: we want to keep better track of errors, since there may be 224 // multiple assets coming in. It's annoying to abort the scan if we get one 225 // error at this stage. 226 227 // we grab the asset from the connection, because it contains all the 228 // detected metadata (and IDs) 229 // assets = append(assets, runtime.Provider.Connection.Asset) 230 } 231 232 // for each asset candidate, we initialize a new runtime and connect to it. 233 for i := range assetCandidates { 234 candidate := assetCandidates[i] 235 236 var runtime *providers.Runtime 237 if candidate.asset.Connections[0].Type == "k8s" { 238 runtime, err = providers.Coordinator.RuntimeFor(candidate.asset, providers.DefaultRuntime()) 239 if err != nil { 240 return nil, false, err 241 } 242 } else { 243 runtime, err = providers.Coordinator.EphemeralRuntimeFor(candidate.asset) 244 if err != nil { 245 return nil, false, err 246 } 247 } 248 runtime.SetRecording(candidate.runtime.Recording) 249 250 err = runtime.Connect(&plugin.ConnectReq{ 251 Features: config.Features, 252 Asset: candidate.asset, 253 Upstream: upstream, 254 }) 255 if err != nil { 256 log.Error().Err(err).Str("asset", candidate.asset.Name).Msg("unable to connect to asset") 257 continue 258 } 259 260 assets = append(assets, &assetWithRuntime{ 261 asset: candidate.asset, 262 runtime: runtime, 263 }) 264 } 265 266 if len(assets) == 0 { 267 return nil, false, nil 268 } 269 270 justAssets := []*inventory.Asset{} 271 for _, asset := range assets { 272 asset.asset.AddAnnotations(job.GetAnnotations()) 273 asset.asset.KindString = asset.asset.GetPlatform().Kind 274 justAssets = append(justAssets, asset.asset) 275 } 276 277 // sync assets 278 if upstream != nil && upstream.ApiEndpoint != "" && !upstream.Incognito { 279 log.Info().Msg("synchronize assets") 280 client, err := upstream.InitClient() 281 if err != nil { 282 return nil, false, err 283 } 284 285 services, err := explorer.NewRemoteServices(client.ApiEndpoint, client.Plugins, client.HttpClient) 286 if err != nil { 287 return nil, false, err 288 } 289 290 inventory.DeprecatedV8CompatAssets(justAssets) 291 resp, err := services.SynchronizeAssets(ctx, &explorer.SynchronizeAssetsReq{ 292 SpaceMrn: client.SpaceMrn, 293 List: justAssets, 294 }) 295 if err != nil { 296 return nil, false, err 297 } 298 log.Debug().Int("assets", len(resp.Details)).Msg("got assets details") 299 platformAssetMapping := make(map[string]*explorer.SynchronizeAssetsRespAssetDetail) 300 for i := range resp.Details { 301 log.Debug().Str("platform-mrn", resp.Details[i].PlatformMrn).Str("asset", resp.Details[i].AssetMrn).Msg("asset mapping") 302 platformAssetMapping[resp.Details[i].PlatformMrn] = resp.Details[i] 303 } 304 305 // attach the asset details to the assets list 306 for i := range assets { 307 log.Debug().Str("asset", assets[i].asset.Name).Strs("platform-ids", assets[i].asset.PlatformIds).Msg("update asset") 308 platformMrn := assets[i].asset.PlatformIds[0] 309 assets[i].asset.Mrn = platformAssetMapping[platformMrn].AssetMrn 310 assets[i].asset.Url = platformAssetMapping[platformMrn].Url 311 } 312 } else { 313 // ensure we have non-empty asset MRNs 314 for i := range assets { 315 cur := assets[i] 316 if cur.asset.Mrn == "" { 317 randID := "//" + explorer.SERVICE_NAME + "/" + explorer.MRN_RESOURCE_ASSET + "/" + ksuid.New().String() 318 x, err := mrn.NewMRN(randID) 319 if err != nil { 320 return nil, false, multierr.Wrap(err, "failed to generate a random asset MRN") 321 } 322 cur.asset.Mrn = x.String() 323 } 324 } 325 } 326 327 // plan scan jobs 328 reporter := NewAggregateReporter(justAssets) 329 // if a bundle was provided check that it matches the filter, bundles can also be downloaded 330 // later therefore we do not want to stop execution here 331 if job.Bundle != nil && job.Bundle.FilterQueryPacks(job.QueryPackFilters) { 332 return nil, false, errors.New("all available packs filtered out. nothing to do") 333 } 334 335 progressBarElements := map[string]string{} 336 orderedKeys := []string{} 337 for i := range assets { 338 // this shouldn't happen, but might 339 // it normally indicates a bug in the provider 340 if presentAsset, present := progressBarElements[assets[i].asset.PlatformIds[0]]; present { 341 return nil, false, fmt.Errorf("asset %s and %s have the same platform id %s", presentAsset, assets[i].asset.Name, assets[i].asset.PlatformIds[0]) 342 } 343 progressBarElements[assets[i].asset.PlatformIds[0]] = assets[i].asset.Name 344 orderedKeys = append(orderedKeys, assets[i].asset.PlatformIds[0]) 345 } 346 var multiprogress progress.MultiProgress 347 if isatty.IsTerminal(os.Stdout.Fd()) && !strings.EqualFold(logger.GetLevel(), "debug") && !strings.EqualFold(logger.GetLevel(), "trace") { 348 var err error 349 multiprogress, err = progress.NewMultiProgressBars(progressBarElements, orderedKeys) 350 if err != nil { 351 return nil, false, multierr.Wrap(err, "failed to create progress bars") 352 } 353 } else { 354 // TODO: adjust naming 355 multiprogress = progress.NoopMultiProgressBars{} 356 } 357 358 scanGroup := sync.WaitGroup{} 359 scanGroup.Add(1) 360 finished := false 361 go func() { 362 defer scanGroup.Done() 363 for i := range assets { 364 asset := assets[i].asset 365 runtime := assets[i].runtime 366 367 // Make sure the context has not been canceled in the meantime. Note that this approach works only for single threaded execution. If we have more than 1 thread calling this function, 368 // we need to solve this at a different level. 369 select { 370 case <-ctx.Done(): 371 log.Warn().Msg("request context has been canceled") 372 // When we scan concurrently, we need to call Errored(asset.Mrn) status for this asset 373 multiprogress.Close() 374 return 375 default: 376 } 377 378 p := &progress.MultiProgressAdapter{Key: asset.PlatformIds[0], Multi: multiprogress} 379 s.RunAssetJob(&AssetJob{ 380 DoRecord: job.DoRecord, 381 UpstreamConfig: upstream, 382 Asset: asset, 383 Bundle: job.Bundle, 384 Props: job.Props, 385 QueryPackFilters: preprocessQueryPackFilters(job.QueryPackFilters), 386 Ctx: ctx, 387 Reporter: reporter, 388 ProgressReporter: p, 389 runtime: runtime, 390 }) 391 392 // we don't need the runtime anymore, so close it 393 runtime.Close() 394 } 395 finished = true 396 }() 397 398 scanGroup.Add(1) 399 go func() { 400 defer scanGroup.Done() 401 multiprogress.Open() 402 }() 403 404 scanGroup.Wait() 405 return reporter.Reports(), finished, nil 406 } 407 408 func (s *LocalScanner) RunAssetJob(job *AssetJob) { 409 log.Debug().Msgf("connecting to asset %s", job.Asset.HumanName()) 410 results, err := s.runMotorizedAsset(job) 411 if err != nil { 412 log.Debug().Err(err).Str("asset", job.Asset.Name).Msg("could not scan asset") 413 job.Reporter.AddScanError(job.Asset, err) 414 415 es := explorer.NewErrorStatus(err) 416 if es.ErrorCode() == explorer.NotApplicable { 417 job.ProgressReporter.NotApplicable() 418 } else { 419 job.ProgressReporter.Errored() 420 } 421 return 422 } 423 424 job.Reporter.AddReport(job.Asset, results) 425 } 426 427 func (s *LocalScanner) runMotorizedAsset(job *AssetJob) (*AssetReport, error) { 428 var res *AssetReport 429 var scanErr error 430 431 runtimeErr := inmemory.WithDb(job.runtime, func(db *inmemory.Db, services *explorer.LocalServices) error { 432 if job.UpstreamConfig != nil && job.UpstreamConfig.ApiEndpoint != "" && !job.UpstreamConfig.Incognito { 433 log.Debug().Msg("using API endpoint " + job.UpstreamConfig.ApiEndpoint) 434 client, err := job.UpstreamConfig.InitClient() 435 if err != nil { 436 return err 437 } 438 439 upstream, err := explorer.NewRemoteServices(client.ApiEndpoint, client.Plugins, client.HttpClient) 440 if err != nil { 441 return err 442 } 443 services.Upstream = upstream 444 } 445 446 scanner := &localAssetScanner{ 447 db: db, 448 services: services, 449 job: job, 450 fetcher: s.fetcher, 451 Runtime: job.runtime, 452 } 453 res, scanErr = scanner.run() 454 return scanErr 455 }) 456 if runtimeErr != nil { 457 return res, runtimeErr 458 } 459 460 return res, scanErr 461 } 462 463 type localAssetScanner struct { 464 db *inmemory.Db 465 services *explorer.LocalServices 466 job *AssetJob 467 fetcher *fetcher 468 469 Runtime llx.Runtime 470 Progress progress.Progress 471 } 472 473 func (s *localAssetScanner) run() (*AssetReport, error) { 474 if err := s.prepareAsset(); err != nil { 475 return nil, err 476 } 477 478 res, err := s.runQueryPack() 479 log.Debug().Str("asset", s.job.Asset.Mrn).Msg("scan complete") 480 return res, err 481 } 482 483 func (s *localAssetScanner) prepareAsset() error { 484 var hub explorer.QueryHub = s.services 485 var conductor explorer.QueryConductor = s.services 486 487 // if we are using upstream we get the bundle from there 488 if s.job.UpstreamConfig != nil && !s.job.UpstreamConfig.Incognito { 489 return nil 490 } 491 492 if err := s.ensureBundle(); err != nil { 493 return err 494 } 495 496 if s.job.Bundle == nil { 497 return errors.New("no bundle provided to run") 498 } 499 500 if len(s.job.Bundle.Packs) == 0 { 501 return errors.New("bundle doesn't contain any query packs") 502 } 503 504 // FIXME: we do not currently respect bundle filters! 505 _, err := hub.SetBundle(s.job.Ctx, s.job.Bundle) 506 if err != nil { 507 return err 508 } 509 510 querypackMrns := make([]string, len(s.job.Bundle.Packs)) 511 for i := range s.job.Bundle.Packs { 512 querypackMrns[i] = s.job.Bundle.Packs[i].Mrn 513 } 514 515 _, err = conductor.Assign(s.job.Ctx, &explorer.Assignment{ 516 AssetMrn: s.job.Asset.Mrn, 517 PackMrns: querypackMrns, 518 }) 519 if err != nil { 520 return err 521 } 522 523 if len(s.job.Props) != 0 { 524 propsReq := explorer.PropsReq{ 525 EntityMrn: s.job.Asset.Mrn, 526 Props: make([]*explorer.Property, len(s.job.Props)), 527 } 528 i := 0 529 for k, v := range s.job.Props { 530 propsReq.Props[i] = &explorer.Property{ 531 Uid: k, 532 Mql: v, 533 } 534 i++ 535 } 536 537 _, err = conductor.SetProps(s.job.Ctx, &propsReq) 538 if err != nil { 539 return err 540 } 541 } 542 543 return nil 544 } 545 546 var _assetDetectBundle *llx.CodeBundle 547 548 func assetDetectBundle() *llx.CodeBundle { 549 if _assetDetectBundle == nil { 550 // NOTE: we need to make sure this is loaded after the logger has been 551 // initialized, otherwise the provider detection will print annoying logs 552 _assetDetectBundle = executor.MustCompile("asset { kind platform runtime version family }") 553 } 554 return _assetDetectBundle 555 } 556 557 func (s *localAssetScanner) ensureBundle() error { 558 if s.job.Bundle != nil { 559 return nil 560 } 561 562 features := cnquery.GetFeatures(s.job.Ctx) 563 res, err := mql.ExecuteCode(s.Runtime, assetDetectBundle(), nil, features) 564 if err != nil { 565 panic(err) 566 } 567 568 if err != nil { 569 return multierr.Wrap(err, "failed to run asset detection query") 570 } 571 572 // FIXME: remove hardcoded lookup and use embedded datastructures instead 573 data := res["IA0bVPKFxIh8Z735sqDh7bo/FNIYUQ/B4wLijN+YhiBZePu1x2sZCMcHoETmWM9jocdWbwGykKvNom/7QSm8ew=="].Data.Value.(map[string]interface{}) 574 kind := data["1oxYPIhW1eZ+14s234VsQ0Q7p9JSmUaT/RTWBtDRG1ZwKr8YjMcXz76x10J9iu13AcMmGZd43M1NNqPXZtTuKQ=="].(*llx.RawData).Value.(string) 575 platform := data["W+8HW/v60Fx0nqrVz+yTIQjImy4ki4AiqxcedooTPP3jkbCESy77ptEhq9PlrKjgLafHFn8w4vrimU4bwCi6aQ=="].(*llx.RawData).Value.(string) 576 runtime := data["a3RMPjrhk+jqkeXIISqDSi7EEP8QybcXCeefqNJYVUNcaDGcVDdONFvcTM2Wts8qTRXL3akVxpskitXWuI/gdA=="].(*llx.RawData).Value.(string) 577 version := data["5d4FZxbPkZu02MQaHp3C356NJ9TeVsJBw8Enu+TDyBGdWlZM/AE+J5UT/TQ72AmDViKZe97Hxz1Jt3MjcEH/9Q=="].(*llx.RawData).Value.(string) 578 fraw := data["l/aGjrixdNHvCxu5ib4NwkYb0Qrh3sKzcrGTkm7VxNWfWaaVbOxOEoGEMnjGJTo31jhYNeRm39/zpepZaSbUIw=="].(*llx.RawData).Value.([]interface{}) 579 family := make([]string, len(fraw)) 580 for i := range fraw { 581 family[i] = fraw[i].(string) 582 } 583 584 var hub explorer.QueryHub = s.services 585 urls, err := hub.DefaultPacks(s.job.Ctx, &explorer.DefaultPacksReq{ 586 Kind: kind, 587 Platform: platform, 588 Runtime: runtime, 589 Version: version, 590 Family: family, 591 }) 592 if err != nil { 593 return err 594 } 595 596 if len(urls.Urls) == 0 { 597 return errors.New("cannot find any default policies for this asset (" + platform + ")") 598 } 599 600 s.job.Bundle, err = s.fetcher.fetchBundles(s.job.Ctx, s.Runtime.Schema(), urls.Urls...) 601 if err != nil { 602 return err 603 } 604 605 // filter bundle by ID 606 if s.job.Bundle.FilterQueryPacks(s.job.QueryPackFilters) { 607 return errors.New("all available packs filtered out. nothing to do.") 608 } 609 610 return err 611 } 612 613 func (s *localAssetScanner) runQueryPack() (*AssetReport, error) { 614 var hub explorer.QueryHub = s.services 615 var conductor explorer.QueryConductor = s.services 616 617 log.Debug().Str("asset", s.job.Asset.Mrn).Msg("client> request bundle for asset") 618 assetBundle, err := hub.GetBundle(s.job.Ctx, &explorer.Mrn{Mrn: s.job.Asset.Mrn}) 619 if err != nil { 620 return nil, err 621 } 622 log.Debug().Msg("client> got bundle") 623 logger.TraceJSON(assetBundle) 624 logger.DebugDumpJSON("assetBundle", assetBundle) 625 626 rawFilters, err := hub.GetFilters(s.job.Ctx, &explorer.Mrn{Mrn: s.job.Asset.Mrn}) 627 if err != nil { 628 return nil, err 629 } 630 log.Debug().Str("asset", s.job.Asset.Mrn).Msg("client> got filters") 631 logger.TraceJSON(rawFilters) 632 633 filters, err := s.UpdateFilters(rawFilters, 5*time.Second) 634 if err != nil { 635 return nil, err 636 } 637 log.Debug().Str("asset", s.job.Asset.Mrn).Msg("client> shell update filters") 638 logger.DebugJSON(filters) 639 640 resolvedPack, err := conductor.Resolve(s.job.Ctx, &explorer.ResolveReq{ 641 EntityMrn: s.job.Asset.Mrn, 642 AssetFilters: filters, 643 }) 644 if err != nil { 645 return nil, err 646 } 647 log.Debug().Str("asset", s.job.Asset.Mrn).Msg("client> got resolved bundle for asset") 648 logger.DebugDumpJSON("resolvedPack", resolvedPack) 649 650 features := cnquery.GetFeatures(s.job.Ctx) 651 e, err := executor.RunExecutionJob(s.Runtime, conductor, s.job.Asset.Mrn, resolvedPack.ExecutionJob, features, s.job.ProgressReporter) 652 if err != nil { 653 return nil, err 654 } 655 656 err = e.WaitUntilDone(10 * time.Second) 657 if err != nil { 658 return nil, err 659 } 660 661 err = e.StoreData() 662 if err != nil { 663 return nil, err 664 } 665 666 ar := &AssetReport{ 667 Mrn: s.job.Asset.Mrn, 668 Bundle: assetBundle, 669 Resolved: resolvedPack, 670 } 671 672 log.Debug().Str("asset", s.job.Asset.Mrn).Msg("generate report") 673 report, err := conductor.GetReport(s.job.Ctx, &explorer.EntityDataRequest{ 674 // NOTE: we assign packs to the asset before we execute the tests, 675 // therefore this resolves all packs assigned to the asset 676 EntityMrn: s.job.Asset.Mrn, 677 DataMrn: s.job.Asset.Mrn, 678 }) 679 if err != nil { 680 ar.Report = &explorer.Report{ 681 EntityMrn: s.job.Asset.Mrn, 682 PackMrn: s.job.Asset.Mrn, 683 } 684 return ar, err 685 } 686 687 ar.Report = report 688 return ar, nil 689 } 690 691 // FilterQueries returns all queries whose result is truthy 692 func (s *localAssetScanner) FilterQueries(queries []*explorer.Mquery, timeout time.Duration) ([]*explorer.Mquery, []error) { 693 return executor.ExecuteFilterQueries(s.Runtime, queries, timeout) 694 } 695 696 // UpdateFilters takes a list of test filters and runs them against the backend 697 // to return the matching ones 698 func (s *localAssetScanner) UpdateFilters(filters *explorer.Mqueries, timeout time.Duration) ([]*explorer.Mquery, error) { 699 queries, errs := s.FilterQueries(filters.Items, timeout) 700 701 var err error 702 if len(errs) != 0 { 703 w := strings.Builder{} 704 for i := range errs { 705 w.WriteString(errs[i].Error() + "\n") 706 } 707 err = errors.New("received multiple errors: " + w.String()) 708 } 709 710 return queries, err 711 }