go.chromium.org/luci@v0.0.0-20240309015107-7cdc2e660f33/cv/internal/retention/run_test.go (about)

     1  // Copyright 2024 The LUCI Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package retention
    16  
    17  import (
    18  	"context"
    19  	"sync"
    20  	"testing"
    21  	"time"
    22  
    23  	"go.chromium.org/luci/common/data/rand/mathrand"
    24  	"go.chromium.org/luci/gae/service/datastore"
    25  
    26  	cfgpb "go.chromium.org/luci/cv/api/config/v2"
    27  	"go.chromium.org/luci/cv/internal/common"
    28  	"go.chromium.org/luci/cv/internal/configs/prjcfg/prjcfgtest"
    29  	"go.chromium.org/luci/cv/internal/cvtesting"
    30  	"go.chromium.org/luci/cv/internal/run"
    31  
    32  	. "github.com/smartystreets/goconvey/convey"
    33  	. "go.chromium.org/luci/common/testing/assertions"
    34  )
    35  
    36  func TestScheduleWipeoutRuns(t *testing.T) {
    37  	t.Parallel()
    38  
    39  	Convey("Schedule wipeout runs tasks", t, func() {
    40  		ct := cvtesting.Test{}
    41  		ctx, cancel := ct.SetUp(t)
    42  		defer cancel()
    43  		registerWipeoutRunsTask(ct.TQDispatcher, &mockRM{})
    44  
    45  		// Test Scenario: Create a lot of runs under 2 LUCI Projects (1 disabled).
    46  		// Making sure the tasks are scheduled for all runs that are out of
    47  		// retention period.
    48  
    49  		cfg := &cfgpb.Config{
    50  			ConfigGroups: []*cfgpb.ConfigGroup{{
    51  				Name: "main",
    52  			}},
    53  		}
    54  		const projFoo = "foo"
    55  		const projDisabled = "disabled"
    56  		prjcfgtest.Create(ctx, projFoo, cfg)
    57  		prjcfgtest.Create(ctx, projDisabled, cfg)
    58  		prjcfgtest.Disable(ctx, projDisabled)
    59  
    60  		createNRunsBetween := func(proj string, n int, start, end time.Time) []*run.Run {
    61  			So(n, ShouldBeGreaterThan, 0)
    62  			So(end, ShouldHappenAfter, start)
    63  			runs := make([]*run.Run, n)
    64  			for i := range runs {
    65  				createTime := start.Add(time.Duration(mathrand.Int63n(ctx, int64(end.Sub(start))))).Truncate(time.Millisecond)
    66  				runs[i] = &run.Run{
    67  					ID:         common.MakeRunID(proj, createTime, 1, []byte("deadbeef")),
    68  					CreateTime: createTime,
    69  				}
    70  			}
    71  			So(datastore.Put(ctx, runs), ShouldBeNil)
    72  			return runs
    73  		}
    74  
    75  		cutOff := ct.Clock.Now().UTC().Add(-retentionPeriod)
    76  		var allRuns []*run.Run
    77  		allRuns = append(allRuns, createNRunsBetween(projFoo, 1000, cutOff.Add(-time.Hour), cutOff.Add(time.Hour))...)
    78  		allRuns = append(allRuns, createNRunsBetween(projDisabled, 500, cutOff.Add(-time.Minute), cutOff.Add(time.Minute))...)
    79  
    80  		var expectedRuns common.RunIDs
    81  		for _, r := range allRuns {
    82  			if r.CreateTime.Before(cutOff) {
    83  				expectedRuns.InsertSorted(r.ID)
    84  			}
    85  		}
    86  
    87  		err := scheduleWipeoutRuns(ctx, ct.TQDispatcher)
    88  		So(err, ShouldBeNil)
    89  		var actualRuns common.RunIDs
    90  		for _, task := range ct.TQ.Tasks() {
    91  			So(task.ETA, ShouldHappenWithin, wipeoutTasksDistInterval, ct.Clock.Now())
    92  			ids := task.Payload.(*WipeoutRunsTask).GetIds()
    93  			So(len(ids), ShouldBeLessThanOrEqualTo, runsPerTask)
    94  			for _, id := range ids {
    95  				actualRuns.InsertSorted(common.RunID(id))
    96  			}
    97  		}
    98  		So(actualRuns, ShouldResemble, expectedRuns)
    99  	})
   100  }
   101  
   102  func TestWipeoutRuns(t *testing.T) {
   103  	t.Parallel()
   104  
   105  	Convey("Wipeout", t, func() {
   106  		ct := cvtesting.Test{}
   107  		ctx, cancel := ct.SetUp(t)
   108  		defer cancel()
   109  
   110  		const lProject = "infra"
   111  		mockRM := &mockRM{}
   112  		makeRun := func(createTime time.Time) *run.Run {
   113  			r := &run.Run{
   114  				ID:         common.MakeRunID(lProject, createTime, 1, []byte("deadbeef")),
   115  				CreateTime: createTime,
   116  				Status:     run.Status_SUCCEEDED,
   117  			}
   118  			So(datastore.Put(ctx, r), ShouldBeNil)
   119  			return r
   120  		}
   121  
   122  		Convey("wipeout Run and children", func() {
   123  			r := makeRun(ct.Clock.Now().Add(-2 * retentionPeriod).UTC())
   124  			cl1 := &run.RunCL{
   125  				ID:  1,
   126  				Run: datastore.KeyForObj(ctx, r),
   127  			}
   128  			cl2 := &run.RunCL{
   129  				ID:  2,
   130  				Run: datastore.KeyForObj(ctx, r),
   131  			}
   132  			Convey("run and run cls", func() {
   133  				So(datastore.Put(ctx, cl1, cl2), ShouldBeNil)
   134  				So(wipeoutRuns(ctx, common.RunIDs{r.ID}, mockRM), ShouldBeNil)
   135  				So(datastore.Get(ctx, r), ShouldErrLike, datastore.ErrNoSuchEntity)
   136  				So(datastore.Get(ctx, cl1), ShouldErrLike, datastore.ErrNoSuchEntity)
   137  				So(datastore.Get(ctx, cl2), ShouldErrLike, datastore.ErrNoSuchEntity)
   138  			})
   139  
   140  			Convey("with a lot of log", func() {
   141  				logs := make([]*run.RunLog, 5000)
   142  				for i := range logs {
   143  					logs[i] = &run.RunLog{
   144  						ID:  int64(i + 1000),
   145  						Run: datastore.KeyForObj(ctx, r),
   146  					}
   147  				}
   148  				So(datastore.Put(ctx, cl1, cl2, logs), ShouldBeNil)
   149  				So(wipeoutRuns(ctx, common.RunIDs{r.ID}, mockRM), ShouldBeNil)
   150  				for _, log := range logs {
   151  					So(datastore.Get(ctx, log), ShouldErrLike, datastore.ErrNoSuchEntity)
   152  				}
   153  				So(datastore.Get(ctx, r), ShouldErrLike, datastore.ErrNoSuchEntity)
   154  				So(datastore.Get(ctx, cl1), ShouldErrLike, datastore.ErrNoSuchEntity)
   155  				So(datastore.Get(ctx, cl2), ShouldErrLike, datastore.ErrNoSuchEntity)
   156  			})
   157  		})
   158  
   159  		Convey("handle run doesn't exist", func() {
   160  			createTime := ct.Clock.Now().Add(-2 * retentionPeriod).UTC()
   161  			rid := common.MakeRunID(lProject, createTime, 1, []byte("deadbeef"))
   162  			So(wipeoutRuns(ctx, common.RunIDs{rid}, mockRM), ShouldBeNil)
   163  		})
   164  
   165  		Convey("handle run should still be retained", func() {
   166  			r := makeRun(ct.Clock.Now().Add(-retentionPeriod / 2).UTC())
   167  			So(wipeoutRuns(ctx, common.RunIDs{r.ID}, mockRM), ShouldBeNil)
   168  			So(datastore.Get(ctx, r), ShouldBeNil)
   169  		})
   170  
   171  		Convey("Poke run if it is not ended", func() {
   172  			r := makeRun(ct.Clock.Now().Add(-2 * retentionPeriod).UTC())
   173  			r.Status = run.Status_PENDING
   174  			So(datastore.Put(ctx, r), ShouldBeNil)
   175  			So(wipeoutRuns(ctx, common.RunIDs{r.ID}, mockRM), ShouldBeNil)
   176  			So(datastore.Get(ctx, r), ShouldBeNil)
   177  			So(mockRM.called, ShouldResemble, common.RunIDs{r.ID})
   178  		})
   179  	})
   180  }
   181  
   182  type mockRM struct {
   183  	called   common.RunIDs
   184  	calledMu sync.Mutex
   185  }
   186  
   187  func (rm *mockRM) PokeNow(ctx context.Context, runID common.RunID) error {
   188  	rm.calledMu.Lock()
   189  	rm.called = append(rm.called, runID)
   190  	rm.calledMu.Unlock()
   191  	return nil
   192  }