github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/helper/boltdd/boltdd_test.go (about) 1 package boltdd 2 3 import ( 4 "bytes" 5 "fmt" 6 "io/ioutil" 7 "os" 8 "path/filepath" 9 "testing" 10 11 "github.com/boltdb/bolt" 12 "github.com/hashicorp/go-msgpack/codec" 13 "github.com/hashicorp/nomad/nomad/mock" 14 "github.com/hashicorp/nomad/nomad/structs" 15 "github.com/stretchr/testify/require" 16 ) 17 18 type testingT interface { 19 Fatalf(format string, args ...interface{}) 20 Logf(format string, args ...interface{}) 21 } 22 23 func setupBoltDB(t testingT) (*DB, func()) { 24 dir, err := ioutil.TempDir("", "nomadtest_") 25 if err != nil { 26 t.Fatalf("error creating tempdir: %v", err) 27 } 28 29 cleanup := func() { 30 if err := os.RemoveAll(dir); err != nil { 31 t.Logf("error removing test dir: %v", err) 32 } 33 } 34 35 dbFilename := filepath.Join(dir, "nomadtest.db") 36 db, err := Open(dbFilename, 0600, nil) 37 if err != nil { 38 cleanup() 39 t.Fatalf("error creating boltdb: %v", err) 40 } 41 42 return db, func() { 43 db.Close() 44 cleanup() 45 } 46 } 47 48 func TestDB_Open(t *testing.T) { 49 t.Parallel() 50 require := require.New(t) 51 52 db, cleanup := setupBoltDB(t) 53 defer cleanup() 54 55 require.Equal(0, db.BoltDB().Stats().TxStats.Write) 56 } 57 58 func TestDB_Close(t *testing.T) { 59 t.Parallel() 60 61 db, cleanup := setupBoltDB(t) 62 defer cleanup() 63 64 db.Close() 65 66 require.Equal(t, db.Update(func(tx *Tx) error { 67 _, err := tx.CreateBucketIfNotExists([]byte("foo")) 68 return err 69 }), bolt.ErrDatabaseNotOpen) 70 71 require.Equal(t, db.Update(func(tx *Tx) error { 72 _, err := tx.CreateBucket([]byte("foo")) 73 return err 74 }), bolt.ErrDatabaseNotOpen) 75 } 76 77 func TestBucket_Create(t *testing.T) { 78 t.Parallel() 79 require := require.New(t) 80 81 db, cleanup := setupBoltDB(t) 82 defer cleanup() 83 84 name := []byte("create_test") 85 86 require.NoError(db.Update(func(tx *Tx) error { 87 // Trying to get a nonexistent bucket should return nil 88 require.Nil(tx.Bucket(name)) 89 90 // Creating a nonexistent bucket should work 91 b, err := tx.CreateBucket(name) 92 require.NoError(err) 93 require.NotNil(b) 94 95 // Recreating a bucket that exists should fail 96 b, err = tx.CreateBucket(name) 97 require.Error(err) 98 require.Nil(b) 99 100 // get or create should work 101 b, err = tx.CreateBucketIfNotExists(name) 102 require.NoError(err) 103 require.NotNil(b) 104 return nil 105 })) 106 107 // Bucket should be visible 108 require.NoError(db.View(func(tx *Tx) error { 109 require.NotNil(tx.Bucket(name)) 110 return nil 111 })) 112 } 113 114 func TestBucket_DedupeWrites(t *testing.T) { 115 t.Parallel() 116 require := require.New(t) 117 118 db, cleanup := setupBoltDB(t) 119 defer cleanup() 120 121 bname := []byte("dedupewrites_test") 122 k1name := []byte("k1") 123 k2name := []byte("k2") 124 125 // Put 2 keys 126 require.NoError(db.Update(func(tx *Tx) error { 127 b, err := tx.CreateBucket(bname) 128 require.NoError(err) 129 130 require.NoError(b.Put(k1name, k1name)) 131 require.NoError(b.Put(k2name, k2name)) 132 return nil 133 })) 134 135 // Assert there was at least 1 write 136 origWrites := db.BoltDB().Stats().TxStats.Write 137 require.NotZero(origWrites) 138 139 // Write the same values again and expect no new writes 140 require.NoError(db.Update(func(tx *Tx) error { 141 b := tx.Bucket(bname) 142 require.NoError(b.Put(k1name, k1name)) 143 require.NoError(b.Put(k2name, k2name)) 144 return nil 145 })) 146 147 putWrites := db.BoltDB().Stats().TxStats.Write 148 149 // Unforunately every committed transaction causes two writes, so this 150 // only saves 1 write operation 151 require.Equal(origWrites+2, putWrites) 152 153 // Write new values and assert more writes took place 154 require.NoError(db.Update(func(tx *Tx) error { 155 b := tx.Bucket(bname) 156 require.NoError(b.Put(k1name, []byte("newval1"))) 157 require.NoError(b.Put(k2name, []byte("newval2"))) 158 return nil 159 })) 160 161 putWrites2 := db.BoltDB().Stats().TxStats.Write 162 163 // Expect 3 additional writes: 2 for the transaction and one for the 164 // dirty page 165 require.Equal(putWrites+3, putWrites2) 166 } 167 168 func TestBucket_Delete(t *testing.T) { 169 t.Parallel() 170 require := require.New(t) 171 172 db, cleanup := setupBoltDB(t) 173 defer cleanup() 174 175 parentName := []byte("delete_test") 176 parentKey := []byte("parent_key") 177 childName := []byte("child") 178 childKey := []byte("child_key") 179 grandchildName1 := []byte("grandchild1") 180 grandchildKey1 := []byte("grandchild_key1") 181 grandchildName2 := []byte("grandchild2") 182 grandchildKey2 := []byte("grandchild_key2") 183 184 // Create a parent bucket with 1 child and 2 grandchildren 185 require.NoError(db.Update(func(tx *Tx) error { 186 pb, err := tx.CreateBucket(parentName) 187 require.NoError(err) 188 189 require.NoError(pb.Put(parentKey, parentKey)) 190 191 child, err := pb.CreateBucket(childName) 192 require.NoError(err) 193 194 require.NoError(child.Put(childKey, childKey)) 195 196 grandchild1, err := child.CreateBucket(grandchildName1) 197 require.NoError(err) 198 199 require.NoError(grandchild1.Put(grandchildKey1, grandchildKey1)) 200 201 grandchild2, err := child.CreateBucket(grandchildName2) 202 require.NoError(err) 203 204 require.NoError(grandchild2.Put(grandchildKey2, grandchildKey2)) 205 return nil 206 })) 207 208 // Verify grandchild keys wrote 209 require.NoError(db.View(func(tx *Tx) error { 210 grandchild1 := tx.Bucket(parentName).Bucket(childName).Bucket(grandchildName1) 211 var v1 []byte 212 grandchild1.Get(grandchildKey1, &v1) 213 require.Equal(grandchildKey1, v1) 214 215 grandchild2 := tx.Bucket(parentName).Bucket(childName).Bucket(grandchildName2) 216 var v2 []byte 217 grandchild2.Get(grandchildKey2, &v2) 218 require.Equal(grandchildKey2, v2) 219 return nil 220 })) 221 222 // Delete grandchildKey1 and grandchild2 223 require.NoError(db.Update(func(tx *Tx) error { 224 child := tx.Bucket(parentName).Bucket(childName) 225 226 require.NoError(child.DeleteBucket(grandchildName2)) 227 228 grandchild1 := child.Bucket(grandchildName1) 229 require.NoError(grandchild1.Delete(grandchildKey1)) 230 return nil 231 })) 232 233 // Ensure grandchild2 alone was deleted 234 require.NoError(db.View(func(tx *Tx) error { 235 grandchild1 := tx.Bucket(parentName).Bucket(childName).Bucket(grandchildName1) 236 var v1 []byte 237 grandchild1.Get(grandchildKey1, &v1) 238 require.Equal(([]byte)(nil), v1) 239 240 grandchild2 := tx.Bucket(parentName).Bucket(childName).Bucket(grandchildName2) 241 require.Nil(grandchild2) 242 return nil 243 })) 244 245 // Deleting child bucket should delete grandchild1 as well 246 require.NoError(db.Update(func(tx *Tx) error { 247 parent := tx.Bucket(parentName) 248 require.NoError(parent.DeleteBucket(childName)) 249 250 // Recreate child bucket and ensure childKey and grandchild are gone 251 child, err := parent.CreateBucket(childName) 252 require.NoError(err) 253 254 var v []byte 255 err = child.Get(childKey, &v) 256 require.Error(err) 257 require.True(IsErrNotFound(err)) 258 require.Equal(([]byte)(nil), v) 259 260 require.Nil(child.Bucket(grandchildName1)) 261 262 // Rewrite childKey1 to make sure it doesn't get dedupe incorrectly 263 require.NoError(child.Put(childKey, childKey)) 264 return nil 265 })) 266 267 // Ensure childKey1 was rewritten and not deduped incorrectly 268 require.NoError(db.View(func(tx *Tx) error { 269 var v []byte 270 require.NoError(tx.Bucket(parentName).Bucket(childName).Get(childKey, &v)) 271 require.Equal(childKey, v) 272 return nil 273 })) 274 } 275 276 func BenchmarkWriteDeduplication_On(b *testing.B) { 277 db, cleanup := setupBoltDB(b) 278 defer cleanup() 279 280 bucketName := []byte("allocations") 281 alloc := mock.Alloc() 282 allocID := []byte(alloc.ID) 283 284 err := db.Update(func(tx *Tx) error { 285 allocs, err := tx.CreateBucket(bucketName) 286 if err != nil { 287 return err 288 } 289 290 return allocs.Put(allocID, alloc) 291 }) 292 293 if err != nil { 294 b.Fatalf("error setting up: %v", err) 295 } 296 297 b.ResetTimer() 298 for i := 0; i < b.N; i++ { 299 err := db.Update(func(tx *Tx) error { 300 return tx.Bucket(bucketName).Put(allocID, alloc) 301 }) 302 303 if err != nil { 304 b.Fatalf("error at runtime: %v", err) 305 } 306 } 307 } 308 309 func BenchmarkWriteDeduplication_Off(b *testing.B) { 310 dir, err := ioutil.TempDir("", "nomadtest_") 311 if err != nil { 312 b.Fatalf("error creating tempdir: %v", err) 313 } 314 315 defer func() { 316 if err := os.RemoveAll(dir); err != nil { 317 b.Logf("error removing test dir: %v", err) 318 } 319 }() 320 321 dbFilename := filepath.Join(dir, "nomadtest.db") 322 db, err := Open(dbFilename, 0600, nil) 323 if err != nil { 324 b.Fatalf("error creating boltdb: %v", err) 325 } 326 327 defer db.Close() 328 329 bucketName := []byte("allocations") 330 alloc := mock.Alloc() 331 allocID := []byte(alloc.ID) 332 333 err = db.Update(func(tx *Tx) error { 334 allocs, err := tx.CreateBucket(bucketName) 335 if err != nil { 336 return err 337 } 338 339 var buf bytes.Buffer 340 if err := codec.NewEncoder(&buf, structs.MsgpackHandle).Encode(alloc); err != nil { 341 return fmt.Errorf("failed to encode passed object: %v", err) 342 } 343 344 return allocs.Put(allocID, buf) 345 }) 346 347 if err != nil { 348 b.Fatalf("error setting up: %v", err) 349 } 350 351 b.ResetTimer() 352 for i := 0; i < b.N; i++ { 353 err := db.Update(func(tx *Tx) error { 354 var buf bytes.Buffer 355 if err := codec.NewEncoder(&buf, structs.MsgpackHandle).Encode(alloc); err != nil { 356 return fmt.Errorf("failed to encode passed object: %v", err) 357 } 358 359 return tx.Bucket(bucketName).Put(allocID, buf) 360 }) 361 362 if err != nil { 363 b.Fatalf("error at runtime: %v", err) 364 } 365 } 366 }