github.com/jfrog/jfrog-cli-core/v2@v2.51.0/artifactory/commands/transferfiles/utils.go (about) 1 package transferfiles 2 3 import ( 4 "context" 5 "encoding/json" 6 "errors" 7 "fmt" 8 "io" 9 "os" 10 "path" 11 "path/filepath" 12 "strconv" 13 "strings" 14 "time" 15 16 buildInfoUtils "github.com/jfrog/build-info-go/utils" 17 "github.com/jfrog/gofrog/datastructures" 18 "github.com/jfrog/gofrog/parallel" 19 "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/transferfiles/api" 20 "github.com/jfrog/jfrog-cli-core/v2/artifactory/commands/transferfiles/state" 21 "github.com/jfrog/jfrog-cli-core/v2/artifactory/utils" 22 "github.com/jfrog/jfrog-cli-core/v2/utils/config" 23 "github.com/jfrog/jfrog-cli-core/v2/utils/coreutils" 24 "github.com/jfrog/jfrog-cli-core/v2/utils/reposnapshot" 25 "github.com/jfrog/jfrog-client-go/artifactory" 26 "github.com/jfrog/jfrog-client-go/artifactory/services" 27 serviceUtils "github.com/jfrog/jfrog-client-go/artifactory/services/utils" 28 clientUtils "github.com/jfrog/jfrog-client-go/utils" 29 "github.com/jfrog/jfrog-client-go/utils/errorutils" 30 "github.com/jfrog/jfrog-client-go/utils/io/fileutils" 31 "github.com/jfrog/jfrog-client-go/utils/log" 32 "golang.org/x/exp/maps" 33 ) 34 35 const ( 36 waitTimeBetweenChunkStatusSeconds = 3 37 waitTimeBetweenThreadsUpdateSeconds = 20 38 DefaultAqlPaginationLimit = 10000 39 40 SyncErrorReason = "un-synchronized chunk status due to network issue" 41 SyncErrorStatusCode = 404 42 43 StopFileName = "stop" 44 ) 45 46 var AqlPaginationLimit = DefaultAqlPaginationLimit 47 var curChunkBuilderThreads int 48 var curChunkUploaderThreads int 49 50 type UploadedChunk struct { 51 api.UploadChunkResponse 52 UploadedChunkData 53 } 54 55 type UploadedChunkData struct { 56 ChunkFiles []api.FileRepresentation 57 TimeSent time.Time 58 } 59 60 type ChunksLifeCycleManager struct { 61 // deletedChunksSet stores chunk uuids that have received a 'DONE' response from the source Artifactory instance 62 // It is used to notify the source Artifactory instance that these chunks can be deleted from the source's status map. 63 deletedChunksSet *datastructures.Set[api.ChunkId] 64 // nodeToChunksMap stores a map of the node IDs of the source Artifactory instance, 65 // In each node, we store a map of the chunks that are currently in progress and their matching files. 66 // In case network fails, and the uploaded chunks data is lost, 67 // These chunks files will be written to the errors file using this map. 68 nodeToChunksMap map[api.NodeId]map[api.ChunkId]UploadedChunkData 69 } 70 71 // Convert to map of nodeID to list of chunk IDs to allow printing it 72 func (clcm *ChunksLifeCycleManager) GetNodeIdToChunkIdsMap() map[api.NodeId][]api.ChunkId { 73 nodeIdToChunks := make(map[api.NodeId][]api.ChunkId, len(clcm.nodeToChunksMap)) 74 for nodeId, chunks := range clcm.nodeToChunksMap { 75 nodeIdToChunks[nodeId] = maps.Keys(chunks) 76 } 77 return nodeIdToChunks 78 } 79 80 func (clcm *ChunksLifeCycleManager) GetInProgressTokensSlice() []api.ChunkId { 81 var inProgressTokens []api.ChunkId 82 for _, node := range clcm.nodeToChunksMap { 83 for id := range node { 84 inProgressTokens = append(inProgressTokens, id) 85 } 86 } 87 88 return inProgressTokens 89 } 90 91 func (clcm *ChunksLifeCycleManager) GetInProgressTokensSliceByNodeId(nodeId api.NodeId) []api.ChunkId { 92 var inProgressTokens []api.ChunkId 93 for chunkId := range clcm.nodeToChunksMap[nodeId] { 94 inProgressTokens = append(inProgressTokens, chunkId) 95 } 96 97 return inProgressTokens 98 } 99 100 // Save in the TransferRunStatus the chunks that have been in transit for more than 30 minutes. 101 // This allows them to be displayed using the '--status' option. 102 // stateManager - Transfer state manager 103 func (clcm *ChunksLifeCycleManager) StoreStaleChunks(stateManager *state.TransferStateManager) error { 104 var staleChunks []state.StaleChunks 105 for nodeId, chunkIdToData := range clcm.nodeToChunksMap { 106 staleNodeChunks := state.StaleChunks{NodeID: string(nodeId)} 107 for chunkId, uploadedChunkData := range chunkIdToData { 108 if time.Since(uploadedChunkData.TimeSent).Hours() < 0.5 { 109 continue 110 } 111 staleNodeChunk := state.StaleChunk{ 112 ChunkID: string(chunkId), 113 Sent: uploadedChunkData.TimeSent.Unix(), 114 } 115 for _, file := range uploadedChunkData.ChunkFiles { 116 var sizeStr string 117 if file.Size > 0 { 118 sizeStr = " (" + utils.ConvertIntToStorageSizeString(file.Size) + ")" 119 } 120 staleNodeChunk.Files = append(staleNodeChunk.Files, path.Join(file.Repo, file.Path, file.Name)+sizeStr) 121 } 122 staleNodeChunks.Chunks = append(staleNodeChunks.Chunks, staleNodeChunk) 123 } 124 if len(staleNodeChunks.Chunks) > 0 { 125 staleChunks = append(staleChunks, staleNodeChunks) 126 } 127 } 128 return stateManager.SetStaleChunks(staleChunks) 129 } 130 131 // Set the JFrog CLI temp dir to be ~/.jfrog/transfer/tmp/ 132 func initTempDir() (unsetTempDir func(), err error) { 133 // If JFROG_CLI_TEMP_DIR environment variable provided, use it 134 if os.Getenv(coreutils.TempDir) != "" { 135 return 136 } 137 138 oldTempDir := fileutils.GetTempDirBase() 139 var transferTempDir string 140 if transferTempDir, err = coreutils.GetJfrogTransferTempDir(); err != nil { 141 return 142 } 143 144 if err = fileutils.CreateDirIfNotExist(transferTempDir); err != nil { 145 return 146 } 147 148 if err = fileutils.RemoveDirContents(transferTempDir); err != nil { 149 return 150 } 151 fileutils.SetTempDirBase(transferTempDir) 152 unsetTempDir = func() { 153 fileutils.SetTempDirBase(oldTempDir) 154 } 155 return 156 } 157 158 type InterruptionErr struct{} 159 160 func (m *InterruptionErr) Error() string { 161 return "Files transfer was interrupted by user" 162 } 163 164 func createTransferServiceManager(ctx context.Context, serverDetails *config.ServerDetails) (artifactory.ArtifactoryServicesManager, error) { 165 return utils.CreateServiceManagerWithContext(ctx, serverDetails, false, 0, retries, retriesWaitMilliSecs, time.Minute) 166 } 167 168 func createSrcRtUserPluginServiceManager(ctx context.Context, sourceRtDetails *config.ServerDetails) (*srcUserPluginService, error) { 169 serviceManager, err := createTransferServiceManager(ctx, sourceRtDetails) 170 if err != nil { 171 return nil, err 172 } 173 return NewSrcUserPluginService(serviceManager.GetConfig().GetServiceDetails(), serviceManager.Client()), nil 174 } 175 176 func appendDistinctIfNeeded(disabledDistinctiveAql bool) string { 177 if disabledDistinctiveAql { 178 return `.distinct(false)` 179 } 180 return "" 181 } 182 183 func runAql(ctx context.Context, sourceRtDetails *config.ServerDetails, query string) (result *serviceUtils.AqlSearchResult, err error) { 184 serviceManager, err := createTransferServiceManager(ctx, sourceRtDetails) 185 if err != nil { 186 return nil, err 187 } 188 reader, err := serviceManager.Aql(query) 189 if err != nil { 190 return nil, err 191 } 192 defer func() { 193 if reader != nil { 194 err = errors.Join(err, errorutils.CheckError(reader.Close())) 195 } 196 }() 197 198 respBody, err := io.ReadAll(reader) 199 if err != nil { 200 return nil, errorutils.CheckError(err) 201 } 202 203 result = &serviceUtils.AqlSearchResult{} 204 err = json.Unmarshal(respBody, result) 205 return result, errorutils.CheckError(err) 206 } 207 208 func createTargetAuth(targetRtDetails *config.ServerDetails, proxyKey string) api.TargetAuth { 209 targetAuth := api.TargetAuth{ 210 TargetArtifactoryUrl: targetRtDetails.ArtifactoryUrl, 211 TargetToken: targetRtDetails.AccessToken, 212 TargetProxyKey: proxyKey, 213 } 214 if targetAuth.TargetToken == "" { 215 targetAuth.TargetUsername = targetRtDetails.User 216 targetAuth.TargetPassword = targetRtDetails.Password 217 } 218 return targetAuth 219 } 220 221 func handleFilesOfCompletedChunk(chunkFiles []api.FileUploadStatusResponse, errorsChannelMng *ErrorsChannelMng) (stopped bool) { 222 for _, file := range chunkFiles { 223 if file.Status == api.Fail || file.Status == api.SkippedLargeProps { 224 stopped = addErrorToChannel(errorsChannelMng, file) 225 if stopped { 226 return 227 } 228 } 229 } 230 return 231 } 232 233 // Uploads chunk when there is room in queue. 234 // This is a blocking method. 235 func uploadChunkWhenPossible(pcWrapper *producerConsumerWrapper, phaseBase *phaseBase, chunk api.UploadChunk, uploadTokensChan chan UploadedChunk, errorsChannelMng *ErrorsChannelMng) (stopped bool) { 236 for { 237 if ShouldStop(phaseBase, nil, errorsChannelMng) { 238 return true 239 } 240 // If increment done, this go routine can proceed to upload the chunk. Otherwise, sleep and try again. 241 isIncr := pcWrapper.incProcessedChunksWhenPossible() 242 if !isIncr { 243 time.Sleep(waitTimeBetweenChunkStatusSeconds * time.Second) 244 continue 245 } 246 err := uploadChunkAndAddToken(phaseBase.srcUpService, chunk, uploadTokensChan) 247 if err != nil { 248 // Chunk not uploaded due to error. Reduce processed chunks count and send all chunk content to error channel, so that the files could be uploaded on next run. 249 pcWrapper.decProcessedChunks() 250 // If the transfer is interrupted by the user, we shouldn't write it in the CSV file 251 if errors.Is(err, context.Canceled) { 252 return true 253 } 254 return sendAllChunkToErrorChannel(chunk, errorsChannelMng, err, phaseBase.stateManager) 255 } 256 return ShouldStop(phaseBase, nil, errorsChannelMng) 257 } 258 } 259 260 func sendAllChunkToErrorChannel(chunk api.UploadChunk, errorsChannelMng *ErrorsChannelMng, errReason error, stateManager *state.TransferStateManager) (stopped bool) { 261 var failures []api.FileUploadStatusResponse 262 for _, file := range chunk.UploadCandidates { 263 fileFailureResponse := api.FileUploadStatusResponse{ 264 FileRepresentation: file, 265 Reason: errReason.Error(), 266 } 267 // In case an error occurred while handling errors files - stop transferring. 268 stopped = addErrorToChannel(errorsChannelMng, fileFailureResponse) 269 if stopped { 270 return 271 } 272 failures = append(failures, fileFailureResponse) 273 } 274 err := setChunkCompletedInRepoSnapshot(stateManager, failures) 275 if err != nil { 276 // We are logging the error instead of returning it since the original error is already handled. 277 log.Error(err) 278 } 279 return 280 } 281 282 // If repo snapshot is tracked, mark all files of a chunk as completed in their directory's node and check if node completed (done handling the directory and child directories). 283 func setChunkCompletedInRepoSnapshot(stateManager *state.TransferStateManager, chunkFiles []api.FileUploadStatusResponse) (err error) { 284 if !stateManager.IsRepoTransferSnapshotEnabled() { 285 return 286 } 287 288 var dirNode *reposnapshot.Node 289 for _, file := range chunkFiles { 290 dirNode, err = stateManager.GetDirectorySnapshotNodeWithLru(file.Path) 291 if err != nil { 292 return 293 } 294 295 // If empty dir, skip to checking completion. 296 if file.Name != "" { 297 if err = dirNode.DecrementFilesCount(); err != nil { 298 return 299 } 300 } 301 302 if err = dirNode.CheckCompleted(); err != nil { 303 return 304 } 305 } 306 return 307 } 308 309 // Sends an upload chunk to the source Artifactory instance, to be handled asynchronously by the data-transfer plugin. 310 // An uuid token is returned in order to poll on it for status. 311 // This function sends the token to the uploadTokensChan for the pollUploads function to read and poll on. 312 func uploadChunkAndAddToken(sup *srcUserPluginService, chunk api.UploadChunk, uploadTokensChan chan UploadedChunk) error { 313 uploadResponse, err := sup.uploadChunk(chunk) 314 if err != nil { 315 return err 316 } 317 318 // Add chunk data for polling. 319 log.Debug("Chunk sent to node " + uploadResponse.NodeId + ". Adding chunk token '" + uploadResponse.UuidToken + "' to poll on for status.") 320 uploadTokensChan <- newUploadedChunkStruct(uploadResponse, chunk) 321 return nil 322 } 323 324 func newUploadedChunkStruct(uploadChunkResponse api.UploadChunkResponse, chunk api.UploadChunk) UploadedChunk { 325 return UploadedChunk{ 326 UploadChunkResponse: uploadChunkResponse, 327 UploadedChunkData: UploadedChunkData{ 328 ChunkFiles: chunk.UploadCandidates, 329 TimeSent: time.Now(), 330 }, 331 } 332 } 333 334 func GetChunkBuilderThreads() int { 335 return curChunkBuilderThreads 336 } 337 338 func GetChunkUploaderThreads() int { 339 return curChunkUploaderThreads 340 } 341 342 // Periodically reads settings file and updates the number of threads. 343 // Number of threads in the settings files is expected to change by running a separate command. 344 // The new number of threads should be almost immediately (checked every waitTimeBetweenThreadsUpdateSeconds) reflected on 345 // the CLI side (by updating the producer consumer if used and the local variable) and as a result reflected on the Artifactory User Plugin side. 346 // This method also looks for '~/.jfrog/transfer/stop' file and interrupts the transfer if exists. 347 func periodicallyUpdateThreadsAndStopStatus(pcWrapper *producerConsumerWrapper, doneChan chan bool, buildInfoRepo bool, stopSignal chan os.Signal) { 348 log.Debug("Initializing polling on the settings and stop files...") 349 for { 350 time.Sleep(waitTimeBetweenThreadsUpdateSeconds * time.Second) 351 if err := interruptIfRequested(stopSignal); err != nil { 352 log.Error(err) 353 } 354 if shouldStopPolling(doneChan) { 355 log.Debug("Stopping the polling on the settings and stop files for the current phase.") 356 return 357 } 358 if err := updateThreads(pcWrapper, buildInfoRepo); err != nil { 359 log.Error(err) 360 } 361 } 362 } 363 364 func updateThreads(pcWrapper *producerConsumerWrapper, buildInfoRepo bool) error { 365 settings, err := utils.LoadTransferSettings() 366 if err != nil || settings == nil { 367 return err 368 } 369 calculatedChunkBuilderThreads, calculatedChunkUploaderThreads := settings.CalcNumberOfThreads(buildInfoRepo) 370 if curChunkUploaderThreads != calculatedChunkUploaderThreads { 371 if pcWrapper != nil { 372 if curChunkBuilderThreads != calculatedChunkBuilderThreads { 373 updateProducerConsumerMaxParallel(pcWrapper.chunkBuilderProducerConsumer, calculatedChunkBuilderThreads) 374 } 375 updateProducerConsumerMaxParallel(pcWrapper.chunkUploaderProducerConsumer, calculatedChunkUploaderThreads) 376 } 377 log.Info(fmt.Sprintf("Number of threads has been updated to %s (was %s).", strconv.Itoa(calculatedChunkUploaderThreads), strconv.Itoa(curChunkUploaderThreads))) 378 curChunkBuilderThreads = calculatedChunkBuilderThreads 379 curChunkUploaderThreads = calculatedChunkUploaderThreads 380 } else { 381 log.Debug(fmt.Sprintf("No change to the number of threads has been detected. Max chunks builder threads: %d. Max chunks uploader threads: %d.", 382 calculatedChunkBuilderThreads, calculatedChunkUploaderThreads)) 383 } 384 return nil 385 } 386 387 // Interrupt the transfer by populating the stopSignal channel with the Interrupt signal if the '~/.jfrog/transfer/stop' file exists. 388 func interruptIfRequested(stopSignal chan os.Signal) error { 389 transferDir, err := coreutils.GetJfrogTransferDir() 390 if err != nil { 391 return err 392 } 393 exist, err := fileutils.IsFileExists(filepath.Join(transferDir, StopFileName), false) 394 if err != nil { 395 return err 396 } 397 if exist { 398 select { 399 case stopSignal <- os.Interrupt: 400 default: 401 } 402 } 403 return nil 404 } 405 406 func updateProducerConsumerMaxParallel(producerConsumer parallel.Runner, calculatedNumberOfThreads int) { 407 if producerConsumer != nil { 408 producerConsumer.SetMaxParallel(calculatedNumberOfThreads) 409 } 410 } 411 412 func uploadChunkWhenPossibleHandler(pcWrapper *producerConsumerWrapper, phaseBase *phaseBase, chunk api.UploadChunk, 413 uploadTokensChan chan UploadedChunk, errorsChannelMng *ErrorsChannelMng) parallel.TaskFunc { 414 return func(threadId int) error { 415 logMsgPrefix := clientUtils.GetLogMsgPrefix(threadId, false) 416 log.Debug(logMsgPrefix + "Handling chunk upload") 417 shouldStop := uploadChunkWhenPossible(pcWrapper, phaseBase, chunk, uploadTokensChan, errorsChannelMng) 418 if shouldStop { 419 // The specific error that triggered the stop is already in the errors channel 420 return errorutils.CheckErrorf(logMsgPrefix + "stopped") 421 } 422 return nil 423 } 424 } 425 426 // Collects files in chunks of size uploadChunkSize and sends them to be uploaded whenever possible (the amount of chunks uploaded is limited by the number of threads). 427 // An uuid token is returned after the chunk is sent and is being polled on for status. 428 func uploadByChunks(files []api.FileRepresentation, uploadTokensChan chan UploadedChunk, base phaseBase, delayHelper delayUploadHelper, errorsChannelMng *ErrorsChannelMng, pcWrapper *producerConsumerWrapper) (shouldStop bool, err error) { 429 curUploadChunk := api.UploadChunk{ 430 TargetAuth: createTargetAuth(base.targetRtDetails, base.proxyKey), 431 CheckExistenceInFilestore: base.checkExistenceInFilestore, 432 SkipFileFiltering: base.locallyGeneratedFilter.IsEnabled(), 433 MinCheckSumDeploySize: base.minCheckSumDeploySize, 434 } 435 436 for _, item := range files { 437 file := api.FileRepresentation{Repo: item.Repo, Path: item.Path, Name: item.Name, Size: item.Size} 438 var delayed bool 439 delayed, shouldStop = delayHelper.delayUploadIfNecessary(base, file) 440 if shouldStop { 441 return 442 } 443 if delayed { 444 continue 445 } 446 curUploadChunk.AppendUploadCandidateIfNeeded(file, base.buildInfoRepo) 447 if curUploadChunk.IsChunkFull() { 448 _, err = pcWrapper.chunkUploaderProducerConsumer.AddTaskWithError(uploadChunkWhenPossibleHandler(pcWrapper, &base, curUploadChunk, uploadTokensChan, errorsChannelMng), pcWrapper.errorsQueue.AddError) 449 if err != nil { 450 return 451 } 452 // Empty the uploaded chunk. 453 curUploadChunk.UploadCandidates = []api.FileRepresentation{} 454 } 455 } 456 // Chunk didn't reach full size. Upload the remaining files. 457 if len(curUploadChunk.UploadCandidates) > 0 { 458 _, err = pcWrapper.chunkUploaderProducerConsumer.AddTaskWithError(uploadChunkWhenPossibleHandler(pcWrapper, &base, curUploadChunk, uploadTokensChan, errorsChannelMng), pcWrapper.errorsQueue.AddError) 459 if err != nil { 460 return 461 } 462 } 463 return 464 } 465 466 // Add a new error to the common error channel. 467 // In case an error occurs when creating the upload errors files, we would like to stop the transfer right away and stop adding elements to the channel. 468 func addErrorToChannel(errorsChannelMng *ErrorsChannelMng, file api.FileUploadStatusResponse) (stopped bool) { 469 if errorsChannelMng.add(file) { 470 log.Debug("Stop transferring data - error occurred while handling transfer's errors files.") 471 return true 472 } 473 return false 474 } 475 476 // ShouldStop Stop transferring if one of the following happened: 477 // * Error occurred while handling errors (for example - not enough space in file system) 478 // * Error occurred during delayed artifacts handling 479 // * User interrupted the process (ctrl+c) 480 func ShouldStop(phase *phaseBase, delayHelper *delayUploadHelper, errorsChannelMng *ErrorsChannelMng) bool { 481 if phase != nil && phase.ShouldStop() { 482 log.Debug("Stop transferring data - Interrupted.") 483 return true 484 } 485 if delayHelper != nil && delayHelper.delayedArtifactsChannelMng.shouldStop() { 486 log.Debug("Stop transferring data - error occurred while handling transfer's delayed artifacts files.") 487 return true 488 } 489 if errorsChannelMng != nil && errorsChannelMng.shouldStop() { 490 log.Debug("Stop transferring data - error occurred while handling transfer's errors.") 491 return true 492 } 493 return false 494 } 495 496 func getRunningNodes(ctx context.Context, sourceRtDetails *config.ServerDetails) ([]string, error) { 497 serviceManager, err := createTransferServiceManager(ctx, sourceRtDetails) 498 if err != nil { 499 return nil, err 500 } 501 return serviceManager.GetRunningNodes() 502 } 503 504 func stopTransferInArtifactoryNodes(srcUpService *srcUserPluginService, runningNodes []string) { 505 remainingNodesToStop := make(map[string]string) 506 for _, s := range runningNodes { 507 remainingNodesToStop[s] = s 508 } 509 log.Debug("Running Artifactory nodes to stop transfer on:", remainingNodesToStop) 510 // Send a stop command up to 5 times the number of Artifactory nodes, to make sure we reach out to all nodes 511 for i := 0; i < len(runningNodes)*5; i++ { 512 if len(remainingNodesToStop) == 0 { 513 log.Debug("Transfer on all Artifactory nodes stopped successfully") 514 return 515 } 516 nodeId, err := srcUpService.stop() 517 if err != nil { 518 log.Error(err) 519 } else { 520 log.Debug("Node " + nodeId + " stopped") 521 delete(remainingNodesToStop, nodeId) 522 } 523 } 524 } 525 526 // getMaxUniqueSnapshots gets the local repository's setting of max unique snapshots (Maven, Gradle, NuGet, Ivy and SBT) 527 // or max unique tags (Docker). 528 // For repositories of other package types or if an error is thrown, this function returns -1. 529 func getMaxUniqueSnapshots(ctx context.Context, rtDetails *config.ServerDetails, repoSummary *serviceUtils.RepositorySummary) (maxUniqueSnapshots int, err error) { 530 maxUniqueSnapshots = -1 531 serviceManager, err := createTransferServiceManager(ctx, rtDetails) 532 if err != nil { 533 return 534 } 535 switch repoSummary.PackageType { 536 case maven: 537 mavenLocalRepoParams := services.MavenLocalRepositoryParams{} 538 err = serviceManager.GetRepository(repoSummary.RepoKey, &mavenLocalRepoParams) 539 if err != nil { 540 return 541 } 542 maxUniqueSnapshots = *mavenLocalRepoParams.MaxUniqueSnapshots 543 case gradle: 544 gradleLocalRepoParams := services.GradleLocalRepositoryParams{} 545 err = serviceManager.GetRepository(repoSummary.RepoKey, &gradleLocalRepoParams) 546 if err != nil { 547 return 548 } 549 maxUniqueSnapshots = *gradleLocalRepoParams.MaxUniqueSnapshots 550 case nuget: 551 nugetLocalRepoParams := services.NugetLocalRepositoryParams{} 552 err = serviceManager.GetRepository(repoSummary.RepoKey, &nugetLocalRepoParams) 553 if err != nil { 554 return 555 } 556 maxUniqueSnapshots = *nugetLocalRepoParams.MaxUniqueSnapshots 557 case ivy: 558 ivyLocalRepoParams := services.IvyLocalRepositoryParams{} 559 err = serviceManager.GetRepository(repoSummary.RepoKey, &ivyLocalRepoParams) 560 if err != nil { 561 return 562 } 563 maxUniqueSnapshots = *ivyLocalRepoParams.MaxUniqueSnapshots 564 case sbt: 565 sbtLocalRepoParams := services.SbtLocalRepositoryParams{} 566 err = serviceManager.GetRepository(repoSummary.RepoKey, &sbtLocalRepoParams) 567 if err != nil { 568 return 569 } 570 maxUniqueSnapshots = *sbtLocalRepoParams.MaxUniqueSnapshots 571 case docker: 572 dockerLocalRepoParams := services.DockerLocalRepositoryParams{} 573 err = serviceManager.GetRepository(repoSummary.RepoKey, &dockerLocalRepoParams) 574 if err != nil { 575 return 576 } 577 maxUniqueSnapshots = *dockerLocalRepoParams.MaxUniqueTags 578 } 579 return 580 } 581 582 // updateMaxUniqueSnapshots updates the local repository's setting of max unique snapshots (Maven, Gradle, NuGet, Ivy and SBT) 583 // or max unique tags (Docker). 584 // For repositories of other package types, this function does nothing. 585 func updateMaxUniqueSnapshots(ctx context.Context, rtDetails *config.ServerDetails, repoSummary *serviceUtils.RepositorySummary, newMaxUniqueSnapshots int) error { 586 serviceManager, err := createTransferServiceManager(ctx, rtDetails) 587 if err != nil { 588 return err 589 } 590 switch repoSummary.PackageType { 591 case maven: 592 return updateMaxMavenUniqueSnapshots(serviceManager, repoSummary, newMaxUniqueSnapshots) 593 case gradle: 594 return updateMaxGradleUniqueSnapshots(serviceManager, repoSummary, newMaxUniqueSnapshots) 595 case nuget: 596 return updateMaxNugetUniqueSnapshots(serviceManager, repoSummary, newMaxUniqueSnapshots) 597 case ivy: 598 return updateMaxIvyUniqueSnapshots(serviceManager, repoSummary, newMaxUniqueSnapshots) 599 case sbt: 600 return updateMaxSbtUniqueSnapshots(serviceManager, repoSummary, newMaxUniqueSnapshots) 601 case docker: 602 return updateMaxDockerUniqueSnapshots(serviceManager, repoSummary, newMaxUniqueSnapshots) 603 } 604 return nil 605 } 606 607 func updateMaxMavenUniqueSnapshots(serviceManager artifactory.ArtifactoryServicesManager, repoSummary *serviceUtils.RepositorySummary, newMaxUniqueSnapshots int) error { 608 if strings.ToLower(repoSummary.RepoType) == services.FederatedRepositoryRepoType { 609 repoParams := services.NewMavenFederatedRepositoryParams() 610 repoParams.Key = repoSummary.RepoKey 611 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 612 return serviceManager.UpdateFederatedRepository().Maven(repoParams) 613 } 614 repoParams := services.NewMavenLocalRepositoryParams() 615 repoParams.Key = repoSummary.RepoKey 616 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 617 return serviceManager.UpdateLocalRepository().Maven(repoParams) 618 } 619 620 func updateMaxGradleUniqueSnapshots(serviceManager artifactory.ArtifactoryServicesManager, repoSummary *serviceUtils.RepositorySummary, newMaxUniqueSnapshots int) error { 621 if strings.ToLower(repoSummary.RepoType) == services.FederatedRepositoryRepoType { 622 repoParams := services.NewGradleFederatedRepositoryParams() 623 repoParams.Key = repoSummary.RepoKey 624 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 625 return serviceManager.UpdateFederatedRepository().Gradle(repoParams) 626 } 627 repoParams := services.NewGradleLocalRepositoryParams() 628 repoParams.Key = repoSummary.RepoKey 629 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 630 return serviceManager.UpdateLocalRepository().Gradle(repoParams) 631 } 632 633 func updateMaxNugetUniqueSnapshots(serviceManager artifactory.ArtifactoryServicesManager, repoSummary *serviceUtils.RepositorySummary, newMaxUniqueSnapshots int) error { 634 if strings.ToLower(repoSummary.RepoType) == services.FederatedRepositoryRepoType { 635 repoParams := services.NewNugetFederatedRepositoryParams() 636 repoParams.Key = repoSummary.RepoKey 637 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 638 return serviceManager.UpdateFederatedRepository().Nuget(repoParams) 639 } 640 repoParams := services.NewNugetLocalRepositoryParams() 641 repoParams.Key = repoSummary.RepoKey 642 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 643 return serviceManager.UpdateLocalRepository().Nuget(repoParams) 644 } 645 646 func updateMaxIvyUniqueSnapshots(serviceManager artifactory.ArtifactoryServicesManager, repoSummary *serviceUtils.RepositorySummary, newMaxUniqueSnapshots int) error { 647 if strings.ToLower(repoSummary.RepoType) == services.FederatedRepositoryRepoType { 648 repoParams := services.NewIvyFederatedRepositoryParams() 649 repoParams.Key = repoSummary.RepoKey 650 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 651 return serviceManager.UpdateFederatedRepository().Ivy(repoParams) 652 } 653 repoParams := services.NewIvyLocalRepositoryParams() 654 repoParams.Key = repoSummary.RepoKey 655 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 656 return serviceManager.UpdateLocalRepository().Ivy(repoParams) 657 } 658 659 func updateMaxSbtUniqueSnapshots(serviceManager artifactory.ArtifactoryServicesManager, repoSummary *serviceUtils.RepositorySummary, newMaxUniqueSnapshots int) error { 660 if strings.ToLower(repoSummary.RepoType) == services.FederatedRepositoryRepoType { 661 repoParams := services.NewSbtFederatedRepositoryParams() 662 repoParams.Key = repoSummary.RepoKey 663 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 664 return serviceManager.UpdateFederatedRepository().Sbt(repoParams) 665 } 666 repoParams := services.NewSbtLocalRepositoryParams() 667 repoParams.Key = repoSummary.RepoKey 668 repoParams.MaxUniqueSnapshots = &newMaxUniqueSnapshots 669 return serviceManager.UpdateLocalRepository().Sbt(repoParams) 670 } 671 672 func updateMaxDockerUniqueSnapshots(serviceManager artifactory.ArtifactoryServicesManager, repoSummary *serviceUtils.RepositorySummary, newMaxUniqueSnapshots int) error { 673 if strings.ToLower(repoSummary.RepoType) == services.FederatedRepositoryRepoType { 674 repoParams := services.NewDockerFederatedRepositoryParams() 675 repoParams.Key = repoSummary.RepoKey 676 repoParams.MaxUniqueTags = &newMaxUniqueSnapshots 677 return serviceManager.UpdateFederatedRepository().Docker(repoParams) 678 } 679 repoParams := services.NewDockerLocalRepositoryParams() 680 repoParams.Key = repoSummary.RepoKey 681 repoParams.MaxUniqueTags = &newMaxUniqueSnapshots 682 return serviceManager.UpdateLocalRepository().Docker(repoParams) 683 } 684 685 func stopTransferInArtifactory(serverDetails *config.ServerDetails, srcUpService *srcUserPluginService) error { 686 // To avoid situations where context has already been canceled, we use a new context here instead of the old context of the transfer phase. 687 runningNodes, err := getRunningNodes(context.Background(), serverDetails) 688 if err != nil { 689 return err 690 } else { 691 stopTransferInArtifactoryNodes(srcUpService, runningNodes) 692 } 693 return nil 694 } 695 696 func getJfrogTransferRepoDelaysDir(repoKey string) (string, error) { 697 return state.GetJfrogTransferRepoSubDir(repoKey, coreutils.JfrogTransferDelaysDirName) 698 } 699 700 func getJfrogTransferRepoErrorsDir(repoKey string) (string, error) { 701 return state.GetJfrogTransferRepoSubDir(repoKey, coreutils.JfrogTransferErrorsDirName) 702 } 703 704 func getJfrogTransferRepoErrorsSubDir(repoKey, subDirName string) (string, error) { 705 errorsDir, err := getJfrogTransferRepoErrorsDir(repoKey) 706 if err != nil { 707 return "", err 708 } 709 return filepath.Join(errorsDir, subDirName), nil 710 } 711 712 func getJfrogTransferRepoRetryableDir(repoKey string) (string, error) { 713 return getJfrogTransferRepoErrorsSubDir(repoKey, coreutils.JfrogTransferRetryableErrorsDirName) 714 } 715 716 func getJfrogTransferRepoSkippedDir(repoKey string) (string, error) { 717 return getJfrogTransferRepoErrorsSubDir(repoKey, coreutils.JfrogTransferSkippedErrorsDirName) 718 } 719 720 func getErrorOrDelayFiles(repoKeys []string, getDirPathFunc func(string) (string, error)) (filesPaths []string, err error) { 721 var dirPath string 722 for _, repoKey := range repoKeys { 723 dirPath, err = getDirPathFunc(repoKey) 724 if err != nil { 725 return []string{}, err 726 } 727 exist, err := buildInfoUtils.IsDirExists(dirPath, false) 728 if err != nil { 729 return []string{}, err 730 } 731 if !exist { 732 continue 733 } 734 files, err := buildInfoUtils.ListFiles(dirPath, false) 735 if err != nil { 736 return nil, err 737 } 738 filesPaths = append(filesPaths, files...) 739 } 740 return 741 } 742 743 // Increments index until the file path is unique. 744 func getUniqueErrorOrDelayFilePath(dirPath string, getFileNamePrefix func() string) (delayFilePath string, err error) { 745 var exists bool 746 index := 0 747 for { 748 delayFilePath = filepath.Join(dirPath, fmt.Sprintf("%s-%d.json", getFileNamePrefix(), index)) 749 exists, err = fileutils.IsFileExists(delayFilePath, false) 750 if err != nil { 751 return "", err 752 } 753 if !exists { 754 break 755 } 756 index++ 757 } 758 return 759 } 760 761 func deleteAllFiles(filesToDelete []string) (err error) { 762 for _, fileToDelete := range filesToDelete { 763 log.Debug("Deleting:", fileToDelete, "...") 764 err = errors.Join(err, errorutils.CheckError(os.Remove(fileToDelete))) 765 } 766 return 767 }