github.com/grafana/pyroscope@v1.18.0/pkg/phlaredb/block/block_test.go (about) 1 // SPDX-License-Identifier: AGPL-3.0-only 2 // Provenance-includes-location: https://github.com/thanos-io/thanos/blob/main/pkg/block/block_test.go 3 // Provenance-includes-license: Apache-2.0 4 // Provenance-includes-copyright: The Thanos Authors. 5 6 package block_test 7 8 import ( 9 "bytes" 10 "context" 11 "encoding/json" 12 "io" 13 "os" 14 "path" 15 "strings" 16 "testing" 17 "time" 18 19 "github.com/go-kit/log" 20 "github.com/oklog/ulid/v2" 21 "github.com/pkg/errors" 22 "github.com/prometheus/client_golang/prometheus" 23 "github.com/prometheus/client_golang/prometheus/promauto" 24 promtest "github.com/prometheus/client_golang/prometheus/testutil" 25 "github.com/stretchr/testify/require" 26 "github.com/thanos-io/objstore" 27 "go.uber.org/goleak" 28 29 objstore_testutil "github.com/grafana/pyroscope/pkg/objstore/testutil" 30 "github.com/grafana/pyroscope/pkg/phlaredb/block" 31 block_testutil "github.com/grafana/pyroscope/pkg/phlaredb/block/testutil" 32 "github.com/grafana/pyroscope/pkg/pprof/testhelper" 33 "github.com/grafana/pyroscope/pkg/test" 34 ) 35 36 func TestIsBlockDir(t *testing.T) { 37 for _, tc := range []struct { 38 input string 39 id ulid.ULID 40 bdir bool 41 }{ 42 { 43 input: "", 44 bdir: false, 45 }, 46 { 47 input: "something", 48 bdir: false, 49 }, 50 { 51 id: ulid.MustNew(1, nil), 52 input: ulid.MustNew(1, nil).String(), 53 bdir: true, 54 }, 55 { 56 id: ulid.MustNew(2, nil), 57 input: "/" + ulid.MustNew(2, nil).String(), 58 bdir: true, 59 }, 60 { 61 id: ulid.MustNew(3, nil), 62 input: "some/path/" + ulid.MustNew(3, nil).String(), 63 bdir: true, 64 }, 65 { 66 input: ulid.MustNew(4, nil).String() + "/something", 67 bdir: false, 68 }, 69 } { 70 t.Run(tc.input, func(t *testing.T) { 71 id, ok := block.IsBlockDir(tc.input) 72 require.Equal(t, tc.bdir, ok) 73 74 if id.Compare(tc.id) != 0 { 75 t.Errorf("expected %s got %s", tc.id, id) 76 t.FailNow() 77 } 78 }) 79 } 80 } 81 82 func TestDelete(t *testing.T) { 83 defer goleak.VerifyNone(t, goleak.IgnoreCurrent()) 84 ctx := context.Background() 85 86 runTest := func(t *testing.T, bkt objstore.Bucket) { 87 { 88 meta, dir := block_testutil.CreateBlock(t, func() []*testhelper.ProfileBuilder { 89 return []*testhelper.ProfileBuilder{ 90 testhelper.NewProfileBuilder(int64(1)). 91 CPUProfile(). 92 WithLabels( 93 "job", "a", 94 ).ForStacktraceString("foo", "bar", "baz").AddSamples(1), 95 } 96 }) 97 98 require.NoError(t, block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(dir, meta.ULID.String()))) 99 require.Equal(t, 9, len(objects(t, bkt, meta.ULID))) 100 101 markedForDeletion := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"}) 102 require.NoError(t, block.MarkForDeletion(ctx, log.NewNopLogger(), bkt, meta.ULID, "", false, markedForDeletion)) 103 104 // Full delete. 105 require.NoError(t, block.Delete(ctx, log.NewNopLogger(), bkt, meta.ULID)) 106 require.Equal(t, 0, len(objects(t, bkt, meta.ULID))) 107 } 108 { 109 b2, tmpDir := block_testutil.CreateBlock(t, func() []*testhelper.ProfileBuilder { 110 return []*testhelper.ProfileBuilder{ 111 testhelper.NewProfileBuilder(int64(1)). 112 CPUProfile(). 113 WithLabels( 114 "job", "a", 115 ).ForStacktraceString("foo", "bar", "baz").AddSamples(1), 116 } 117 }) 118 require.NoError(t, block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b2.ULID.String()))) 119 require.Equal(t, 9, len(objects(t, bkt, b2.ULID))) 120 121 // Remove meta.json and check if delete can delete it. 122 require.NoError(t, bkt.Delete(ctx, path.Join(b2.ULID.String(), block.MetaFilename))) 123 require.NoError(t, block.Delete(ctx, log.NewNopLogger(), bkt, b2.ULID)) 124 require.Equal(t, 0, len(objects(t, bkt, b2.ULID))) 125 } 126 } 127 128 t.Run(t.Name()+"_inmemory", func(t *testing.T) { 129 bkt := objstore.NewInMemBucket() 130 runTest(t, bkt) 131 }) 132 133 t.Run(t.Name()+"_filesystem", func(t *testing.T) { 134 bkt, _ := objstore_testutil.NewFilesystemBucket(t, context.Background(), t.TempDir()) 135 runTest(t, bkt) 136 }) 137 } 138 139 func objects(t *testing.T, bkt objstore.Bucket, id ulid.ULID) (objects []string) { 140 t.Helper() 141 require.NoError(t, 142 bkt.Iter(context.Background(), id.String(), func(name string) error { 143 if strings.HasSuffix(name, objstore.DirDelim) { 144 return nil 145 } 146 objects = append(objects, name) 147 return nil 148 }, objstore.WithRecursiveIter())) 149 return 150 } 151 152 func TestUpload(t *testing.T) { 153 ctx := context.Background() 154 155 bkt := objstore.NewInMemBucket() 156 b1, tmpDir := block_testutil.CreateBlock(t, func() []*testhelper.ProfileBuilder { 157 return []*testhelper.ProfileBuilder{ 158 testhelper.NewProfileBuilder(int64(1)). 159 CPUProfile(). 160 WithLabels( 161 "a", "3", "b", "1", 162 ).ForStacktraceString("foo", "bar", "baz").AddSamples(1), 163 } 164 }) 165 166 require.NoError(t, os.MkdirAll(path.Join(tmpDir, "test", b1.ULID.String()), os.ModePerm)) 167 168 t.Run("wrong dir", func(t *testing.T) { 169 // Wrong dir. 170 err := block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "not-existing")) 171 require.Error(t, err) 172 require.Contains(t, err.Error(), "/not-existing: no such file or directory") 173 }) 174 175 t.Run("wrong existing dir (not a block)", func(t *testing.T) { 176 err := block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test")) 177 require.EqualError(t, err, "not a block dir: ulid: bad data size when unmarshaling") 178 }) 179 180 t.Run("empty block dir", func(t *testing.T) { 181 err := block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.ULID.String())) 182 require.Error(t, err) 183 require.Contains(t, err.Error(), "/meta.json: no such file or directory") 184 }) 185 186 t.Run("missing meta.json file", func(t *testing.T) { 187 test.Copy(t, path.Join(tmpDir, b1.ULID.String(), block.IndexFilename), path.Join(tmpDir, "test", b1.ULID.String(), block.IndexFilename)) 188 189 // Missing meta.json file. 190 err := block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, "test", b1.ULID.String())) 191 require.Error(t, err) 192 require.Contains(t, err.Error(), "/meta.json: no such file or directory") 193 }) 194 195 test.Copy(t, path.Join(tmpDir, b1.ULID.String(), block.MetaFilename), path.Join(tmpDir, "test", b1.ULID.String(), block.MetaFilename)) 196 197 t.Run("full block", func(t *testing.T) { 198 require.NoError(t, block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b1.ULID.String()))) 199 require.Equal(t, 9, len(bkt.Objects())) 200 objs := bkt.Objects() 201 require.Contains(t, objs, path.Join(b1.ULID.String(), block.MetaFilename)) 202 require.Contains(t, objs, path.Join(b1.ULID.String(), block.IndexFilename)) 203 require.Contains(t, objs, path.Join(b1.ULID.String(), "profiles.parquet")) 204 }) 205 206 t.Run("upload is idempotent", func(t *testing.T) { 207 require.NoError(t, block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, b1.ULID.String()))) 208 require.Equal(t, 9, len(bkt.Objects())) 209 objs := bkt.Objects() 210 require.Contains(t, objs, path.Join(b1.ULID.String(), block.MetaFilename)) 211 require.Contains(t, objs, path.Join(b1.ULID.String(), block.IndexFilename)) 212 require.Contains(t, objs, path.Join(b1.ULID.String(), "profiles.parquet")) 213 }) 214 } 215 216 func TestMarkForDeletion(t *testing.T) { 217 defer goleak.VerifyNone(t, goleak.IgnoreCurrent()) 218 ctx := context.Background() 219 220 for _, tcase := range []struct { 221 name string 222 preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) 223 224 blocksMarked int 225 }{ 226 { 227 name: "block marked for deletion", 228 preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, 229 blocksMarked: 1, 230 }, 231 { 232 name: "block with deletion mark already, expected log and no metric increment", 233 preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) { 234 deletionMark, err := json.Marshal(block.DeletionMark{ 235 ID: id, 236 DeletionTime: time.Now().Unix(), 237 Version: block.DeletionMarkVersion1, 238 }) 239 require.NoError(t, err) 240 require.NoError(t, bkt.Upload(ctx, path.Join(id.String(), block.DeletionMarkFilename), bytes.NewReader(deletionMark))) 241 }, 242 blocksMarked: 0, 243 }, 244 } { 245 t.Run(tcase.name, func(t *testing.T) { 246 bkt := objstore.NewInMemBucket() 247 b1, tmpDir := block_testutil.CreateBlock(t, func() []*testhelper.ProfileBuilder { 248 return []*testhelper.ProfileBuilder{ 249 testhelper.NewProfileBuilder(int64(1)). 250 CPUProfile(). 251 WithLabels( 252 "a", "3", "b", "1", 253 ).ForStacktraceString("foo", "bar", "baz").AddSamples(1), 254 } 255 }) 256 id := b1.ULID 257 258 tcase.preUpload(t, id, bkt) 259 260 require.NoError(t, block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()))) 261 262 c := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) 263 err := block.MarkForDeletion(ctx, log.NewNopLogger(), bkt, id, "", false, c) 264 require.NoError(t, err) 265 require.Equal(t, float64(tcase.blocksMarked), promtest.ToFloat64(c)) 266 }) 267 } 268 } 269 270 func TestMarkForNoCompact(t *testing.T) { 271 defer goleak.VerifyNone(t, goleak.IgnoreCurrent()) 272 ctx := context.Background() 273 274 for _, tcase := range []struct { 275 name string 276 preUpload func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) 277 278 blocksMarked int 279 }{ 280 { 281 name: "block marked", 282 preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) {}, 283 blocksMarked: 1, 284 }, 285 { 286 name: "block with no-compact mark already, expected log and no metric increment", 287 preUpload: func(t testing.TB, id ulid.ULID, bkt objstore.Bucket) { 288 m, err := json.Marshal(block.NoCompactMark{ 289 ID: id, 290 NoCompactTime: time.Now().Unix(), 291 Version: block.NoCompactMarkVersion1, 292 }) 293 require.NoError(t, err) 294 require.NoError(t, bkt.Upload(ctx, path.Join(id.String(), block.NoCompactMarkFilename), bytes.NewReader(m))) 295 }, 296 blocksMarked: 0, 297 }, 298 } { 299 t.Run(tcase.name, func(t *testing.T) { 300 bkt := objstore.NewInMemBucket() 301 meta, tmpDir := block_testutil.CreateBlock(t, func() []*testhelper.ProfileBuilder { 302 return []*testhelper.ProfileBuilder{ 303 testhelper.NewProfileBuilder(int64(1)). 304 CPUProfile(). 305 WithLabels( 306 "a", "3", "b", "1", 307 ).ForStacktraceString("foo", "bar", "baz").AddSamples(1), 308 } 309 }) 310 id := meta.ULID 311 312 tcase.preUpload(t, id, bkt) 313 314 require.NoError(t, block.Upload(ctx, log.NewNopLogger(), bkt, path.Join(tmpDir, id.String()))) 315 316 c := promauto.With(nil).NewCounter(prometheus.CounterOpts{}) 317 err := block.MarkForNoCompact(ctx, log.NewNopLogger(), bkt, id, block.ManualNoCompactReason, "", c) 318 require.NoError(t, err) 319 require.Equal(t, float64(tcase.blocksMarked), promtest.ToFloat64(c)) 320 }) 321 } 322 } 323 324 func TestUploadCleanup(t *testing.T) { 325 defer goleak.VerifyNone(t, goleak.IgnoreCurrent()) 326 327 ctx := context.Background() 328 329 bkt := objstore.NewInMemBucket() 330 meta, tmpDir := block_testutil.CreateBlock(t, func() []*testhelper.ProfileBuilder { 331 return []*testhelper.ProfileBuilder{ 332 testhelper.NewProfileBuilder(int64(1)). 333 CPUProfile(). 334 WithLabels( 335 "a", "3", "b", "1", 336 ).ForStacktraceString("foo", "bar", "baz").AddSamples(1), 337 } 338 }) 339 b1 := meta.ULID 340 341 { 342 errBkt := errBucket{Bucket: bkt, failSuffix: "/index.tsdb"} 343 344 uploadErr := block.Upload(ctx, log.NewNopLogger(), errBkt, path.Join(tmpDir, b1.String())) 345 require.ErrorIs(t, uploadErr, errUploadFailed) 346 347 // If upload of index fails, block is deleted. 348 require.Equal(t, 0, len(bkt.Objects())) 349 } 350 351 { 352 errBkt := errBucket{Bucket: bkt, failSuffix: "/meta.json"} 353 354 uploadErr := block.Upload(ctx, log.NewNopLogger(), errBkt, path.Join(tmpDir, b1.String())) 355 require.ErrorIs(t, uploadErr, errUploadFailed) 356 357 // If upload of meta.json fails, nothing is cleaned up. 358 require.Equal(t, 9, len(bkt.Objects())) 359 require.Greater(t, len(bkt.Objects()[path.Join(b1.String(), block.IndexFilename)]), 0) 360 require.Greater(t, len(bkt.Objects()[path.Join(b1.String(), block.MetaFilename)]), 0) 361 } 362 } 363 364 var errUploadFailed = errors.New("upload failed") 365 366 type errBucket struct { 367 objstore.Bucket 368 369 failSuffix string 370 } 371 372 func (eb errBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error { 373 err := eb.Bucket.Upload(ctx, name, r, opts...) 374 if err != nil { 375 return err 376 } 377 378 if strings.HasSuffix(name, eb.failSuffix) { 379 return errUploadFailed 380 } 381 return nil 382 }