2023-04-10 15:36:59 +00:00
|
|
|
// Copyright (c) HashiCorp, Inc.
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
2018-08-16 18:14:52 +00:00
|
|
|
package boltdd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
|
|
|
"path/filepath"
|
|
|
|
"testing"
|
|
|
|
|
2023-05-01 21:18:34 +00:00
|
|
|
"github.com/hashicorp/go-msgpack/codec"
|
2022-03-15 12:42:43 +00:00
|
|
|
"github.com/hashicorp/nomad/ci"
|
2018-08-16 18:14:52 +00:00
|
|
|
"github.com/hashicorp/nomad/nomad/mock"
|
|
|
|
"github.com/hashicorp/nomad/nomad/structs"
|
2022-07-03 16:18:47 +00:00
|
|
|
"github.com/shoenig/test/must"
|
2022-02-23 20:04:44 +00:00
|
|
|
"go.etcd.io/bbolt"
|
2018-08-16 18:14:52 +00:00
|
|
|
)
|
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
const (
|
|
|
|
testDB = "nomad-test.db"
|
|
|
|
testDBPerms = 0600
|
|
|
|
)
|
|
|
|
|
|
|
|
// a simple struct type for testing msg pack en/decoding
|
|
|
|
type employee struct {
|
|
|
|
Name string
|
|
|
|
ID int
|
|
|
|
}
|
|
|
|
|
2022-05-12 15:42:40 +00:00
|
|
|
func setupBoltDB(t testing.TB) *DB {
|
|
|
|
dir := t.TempDir()
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
dbFilename := filepath.Join(dir, testDB)
|
|
|
|
db, err := Open(dbFilename, testDBPerms, nil)
|
|
|
|
must.NoError(t, err)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-05-12 15:42:40 +00:00
|
|
|
t.Cleanup(func() {
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Close())
|
2022-05-12 15:42:40 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
return db
|
2018-08-16 18:14:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestDB_Open(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2022-05-12 15:42:40 +00:00
|
|
|
db := setupBoltDB(t)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Zero(t, db.BoltDB().Stats().TxStats.Write)
|
2018-08-16 18:14:52 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 22:09:33 +00:00
|
|
|
func TestDB_Close(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-11-13 22:09:33 +00:00
|
|
|
|
2022-05-12 15:42:40 +00:00
|
|
|
db := setupBoltDB(t)
|
2018-11-13 22:09:33 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Close())
|
2018-11-13 22:09:33 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Eq(t, db.Update(func(tx *Tx) error {
|
2018-11-13 22:09:33 +00:00
|
|
|
_, err := tx.CreateBucketIfNotExists([]byte("foo"))
|
|
|
|
return err
|
2022-02-23 20:04:44 +00:00
|
|
|
}), bbolt.ErrDatabaseNotOpen)
|
2018-11-13 22:09:33 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Eq(t, db.Update(func(tx *Tx) error {
|
2018-11-13 22:09:33 +00:00
|
|
|
_, err := tx.CreateBucket([]byte("foo"))
|
|
|
|
return err
|
2022-02-23 20:04:44 +00:00
|
|
|
}), bbolt.ErrDatabaseNotOpen)
|
2018-11-13 22:09:33 +00:00
|
|
|
}
|
|
|
|
|
2018-08-16 18:14:52 +00:00
|
|
|
func TestBucket_Create(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-05-12 15:42:40 +00:00
|
|
|
db := setupBoltDB(t)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
name := []byte("create_test")
|
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
// Trying to get a nonexistent bucket should return nil
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Nil(t, tx.Bucket(name))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
// Creating a nonexistent bucket should work
|
|
|
|
b, err := tx.CreateBucket(name)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, err)
|
|
|
|
must.NotNil(t, b)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
// Recreating a bucket that exists should fail
|
|
|
|
b, err = tx.CreateBucket(name)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Error(t, err)
|
|
|
|
must.Nil(t, b)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
// get or create should work
|
|
|
|
b, err = tx.CreateBucketIfNotExists(name)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, err)
|
|
|
|
must.NotNil(t, b)
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Bucket should be visible
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.View(func(tx *Tx) error {
|
|
|
|
must.NotNil(t, tx.Bucket(name))
|
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBucket_Iterate(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
db := setupBoltDB(t)
|
|
|
|
|
|
|
|
bucket := []byte("iterate_test")
|
|
|
|
|
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
|
|
|
b, err := tx.CreateBucketIfNotExists(bucket)
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.NotNil(t, b)
|
|
|
|
|
|
|
|
must.NoError(t, b.Put([]byte("ceo"), employee{Name: "dave", ID: 15}))
|
2022-07-06 22:03:00 +00:00
|
|
|
must.NoError(t, b.Put([]byte("founder"), employee{Name: "mitchell", ID: 1}))
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, b.Put([]byte("cto"), employee{Name: "armon", ID: 2}))
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
2022-07-03 16:18:47 +00:00
|
|
|
|
|
|
|
t.Run("success", func(t *testing.T) {
|
|
|
|
var result []employee
|
|
|
|
err := db.View(func(tx *Tx) error {
|
|
|
|
b := tx.Bucket(bucket)
|
|
|
|
return Iterate(b, nil, func(key []byte, e employee) {
|
|
|
|
result = append(result, e)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.Eq(t, []employee{
|
2022-07-06 22:03:00 +00:00
|
|
|
{"dave", 15}, {"armon", 2}, {"mitchell", 1},
|
2022-07-03 16:18:47 +00:00
|
|
|
}, result)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("failure", func(t *testing.T) {
|
|
|
|
err := db.View(func(tx *Tx) error {
|
|
|
|
b := tx.Bucket(bucket)
|
|
|
|
// will fail to encode employee into an int
|
|
|
|
return Iterate(b, nil, func(key []byte, i int) {
|
2022-07-06 22:05:35 +00:00
|
|
|
must.Unreachable(t)
|
2022-07-03 16:18:47 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
must.Error(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestBucket_DeletePrefix(t *testing.T) {
|
|
|
|
ci.Parallel(t)
|
|
|
|
|
|
|
|
db := setupBoltDB(t)
|
|
|
|
|
|
|
|
bucket := []byte("delete_prefix_test")
|
|
|
|
|
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
|
|
|
b, err := tx.CreateBucketIfNotExists(bucket)
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.NotNil(t, b)
|
|
|
|
|
|
|
|
must.NoError(t, b.Put([]byte("exec_a"), employee{Name: "dave", ID: 15}))
|
|
|
|
must.NoError(t, b.Put([]byte("intern_a"), employee{Name: "alice", ID: 7384}))
|
|
|
|
must.NoError(t, b.Put([]byte("exec_c"), employee{Name: "armon", ID: 2}))
|
|
|
|
must.NoError(t, b.Put([]byte("intern_b"), employee{Name: "bob", ID: 7312}))
|
2022-07-06 22:03:00 +00:00
|
|
|
must.NoError(t, b.Put([]byte("exec_b"), employee{Name: "mitchell", ID: 1}))
|
2022-07-03 16:18:47 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
// remove interns
|
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
|
|
|
bkt := tx.Bucket(bucket)
|
|
|
|
return bkt.DeletePrefix([]byte("intern_"))
|
|
|
|
}))
|
|
|
|
|
|
|
|
// assert 3 exec remain
|
|
|
|
var result []employee
|
|
|
|
err := db.View(func(tx *Tx) error {
|
|
|
|
bkt := tx.Bucket(bucket)
|
|
|
|
return Iterate(bkt, nil, func(k []byte, e employee) {
|
|
|
|
result = append(result, e)
|
|
|
|
})
|
|
|
|
})
|
|
|
|
must.NoError(t, err)
|
|
|
|
must.Eq(t, []employee{
|
2022-07-06 22:03:00 +00:00
|
|
|
{"dave", 15}, {"mitchell", 1}, {"armon", 2},
|
2022-07-03 16:18:47 +00:00
|
|
|
}, result)
|
2018-08-16 18:14:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestBucket_DedupeWrites(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-05-12 15:42:40 +00:00
|
|
|
db := setupBoltDB(t)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
bname := []byte("dedupewrites_test")
|
|
|
|
k1name := []byte("k1")
|
|
|
|
k2name := []byte("k2")
|
|
|
|
|
|
|
|
// Put 2 keys
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
b, err := tx.CreateBucket(bname)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, err)
|
|
|
|
must.NoError(t, b.Put(k1name, k1name))
|
|
|
|
must.NoError(t, b.Put(k2name, k2name))
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Assert there was at least 1 write
|
|
|
|
origWrites := db.BoltDB().Stats().TxStats.Write
|
2022-09-26 13:28:03 +00:00
|
|
|
must.Positive(t, origWrites)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
// Write the same values again and expect no new writes
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
b := tx.Bucket(bname)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, b.Put(k1name, k1name))
|
|
|
|
must.NoError(t, b.Put(k2name, k2name))
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
putWrites := db.BoltDB().Stats().TxStats.Write
|
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
// Unfortunately every committed transaction causes two writes, so this
|
2018-08-16 18:14:52 +00:00
|
|
|
// only saves 1 write operation
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Eq(t, origWrites+2, putWrites)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
// Write new values and assert more writes took place
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
b := tx.Bucket(bname)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, b.Put(k1name, []byte("newval1")))
|
|
|
|
must.NoError(t, b.Put(k2name, []byte("newval2")))
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
putWrites2 := db.BoltDB().Stats().TxStats.Write
|
|
|
|
|
|
|
|
// Expect 3 additional writes: 2 for the transaction and one for the
|
|
|
|
// dirty page
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Eq(t, putWrites+3, putWrites2)
|
2018-08-16 18:14:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestBucket_Delete(t *testing.T) {
|
2022-03-15 12:42:43 +00:00
|
|
|
ci.Parallel(t)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-05-12 15:42:40 +00:00
|
|
|
db := setupBoltDB(t)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
parentName := []byte("delete_test")
|
|
|
|
parentKey := []byte("parent_key")
|
|
|
|
childName := []byte("child")
|
|
|
|
childKey := []byte("child_key")
|
|
|
|
grandchildName1 := []byte("grandchild1")
|
|
|
|
grandchildKey1 := []byte("grandchild_key1")
|
|
|
|
grandchildName2 := []byte("grandchild2")
|
|
|
|
grandchildKey2 := []byte("grandchild_key2")
|
|
|
|
|
|
|
|
// Create a parent bucket with 1 child and 2 grandchildren
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
pb, err := tx.CreateBucket(parentName)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, err)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, pb.Put(parentKey, parentKey))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
child, err := pb.CreateBucket(childName)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, err)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, child.Put(childKey, childKey))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
grandchild1, err := child.CreateBucket(grandchildName1)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, err)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, grandchild1.Put(grandchildKey1, grandchildKey1))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
grandchild2, err := child.CreateBucket(grandchildName2)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, err)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, grandchild2.Put(grandchildKey2, grandchildKey2))
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Verify grandchild keys wrote
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.View(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
grandchild1 := tx.Bucket(parentName).Bucket(childName).Bucket(grandchildName1)
|
|
|
|
var v1 []byte
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, grandchild1.Get(grandchildKey1, &v1))
|
|
|
|
must.Eq(t, grandchildKey1, v1)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
grandchild2 := tx.Bucket(parentName).Bucket(childName).Bucket(grandchildName2)
|
|
|
|
var v2 []byte
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, grandchild2.Get(grandchildKey2, &v2))
|
|
|
|
must.Eq(t, grandchildKey2, v2)
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Delete grandchildKey1 and grandchild2
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
child := tx.Bucket(parentName).Bucket(childName)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, child.DeleteBucket(grandchildName2))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
grandchild1 := child.Bucket(grandchildName1)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, grandchild1.Delete(grandchildKey1))
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Ensure grandchild2 alone was deleted
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.View(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
grandchild1 := tx.Bucket(parentName).Bucket(childName).Bucket(grandchildName1)
|
|
|
|
var v1 []byte
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Error(t, grandchild1.Get(grandchildKey1, &v1))
|
|
|
|
must.Eq(t, ([]byte)(nil), v1)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
grandchild2 := tx.Bucket(parentName).Bucket(childName).Bucket(grandchildName2)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Nil(t, grandchild2)
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
|
|
|
// Deleting child bucket should delete grandchild1 as well
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
parent := tx.Bucket(parentName)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, parent.DeleteBucket(childName))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
// Recreate child bucket and ensure childKey and grandchild are gone
|
|
|
|
child, err := parent.CreateBucket(childName)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, err)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
var v []byte
|
2018-10-16 22:17:36 +00:00
|
|
|
err = child.Get(childKey, &v)
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Error(t, err)
|
|
|
|
must.True(t, IsErrNotFound(err))
|
|
|
|
must.Eq(t, ([]byte)(nil), v)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.Nil(t, child.Bucket(grandchildName1))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
// Rewrite childKey1 to make sure it doesn't get de-dupe incorrectly
|
|
|
|
must.NoError(t, child.Put(childKey, childKey))
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
// Ensure childKey1 was rewritten and not de-duped incorrectly
|
|
|
|
must.NoError(t, db.View(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
var v []byte
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(t, tx.Bucket(parentName).Bucket(childName).Get(childKey, &v))
|
|
|
|
must.Eq(t, childKey, v)
|
2018-08-16 18:14:52 +00:00
|
|
|
return nil
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkWriteDeduplication_On(b *testing.B) {
|
2022-05-12 15:42:40 +00:00
|
|
|
db := setupBoltDB(b)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
bucketName := []byte("allocations")
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
allocID := []byte(alloc.ID)
|
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(b, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
allocs, err := tx.CreateBucket(bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return allocs.Put(allocID, alloc)
|
2022-07-03 16:18:47 +00:00
|
|
|
}))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(b, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
return tx.Bucket(bucketName).Put(allocID, alloc)
|
2022-07-03 16:18:47 +00:00
|
|
|
}))
|
2018-08-16 18:14:52 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkWriteDeduplication_Off(b *testing.B) {
|
2022-05-12 15:42:40 +00:00
|
|
|
dir := b.TempDir()
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
dbFilename := filepath.Join(dir, testDB)
|
|
|
|
db, openErr := Open(dbFilename, testDBPerms, nil)
|
|
|
|
must.NoError(b, openErr)
|
2018-08-16 18:14:52 +00:00
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
b.Cleanup(func() {
|
|
|
|
must.NoError(b, db.Close())
|
|
|
|
})
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
bucketName := []byte("allocations")
|
|
|
|
alloc := mock.Alloc()
|
|
|
|
allocID := []byte(alloc.ID)
|
|
|
|
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(b, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
allocs, err := tx.CreateBucket(bucketName)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
2022-07-03 16:18:47 +00:00
|
|
|
if err = codec.NewEncoder(&buf, structs.MsgpackHandle).Encode(alloc); err != nil {
|
2018-08-16 18:14:52 +00:00
|
|
|
return fmt.Errorf("failed to encode passed object: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return allocs.Put(allocID, buf)
|
2022-07-03 16:18:47 +00:00
|
|
|
}))
|
2018-08-16 18:14:52 +00:00
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2022-07-03 16:18:47 +00:00
|
|
|
must.NoError(b, db.Update(func(tx *Tx) error {
|
2018-08-16 18:14:52 +00:00
|
|
|
var buf bytes.Buffer
|
|
|
|
if err := codec.NewEncoder(&buf, structs.MsgpackHandle).Encode(alloc); err != nil {
|
|
|
|
return fmt.Errorf("failed to encode passed object: %v", err)
|
|
|
|
}
|
|
|
|
return tx.Bucket(bucketName).Put(allocID, buf)
|
2022-07-03 16:18:47 +00:00
|
|
|
}))
|
2018-08-16 18:14:52 +00:00
|
|
|
}
|
|
|
|
}
|