Improves Test Coverage by covering error cases (#95)

* Add Unit  Test for testing a corrupted config

* Add Unit Test for testing errors from .Stats()

* Refactor  Datafile into an interface and add Unit Tests for testing Merge() errors

* Refactor indexer into an interface and add Unit Tests for .Close() errors

* Add Unit Tests for .Delete() errors

* Add Unit Tests for  testing Put/Get errors

* Add Unit Test for testing Open errors (bad path for example)

* Refactor out bitcask.writeConfig

* Add more tests for config errors

* Add unit test for options that might error

* Add more test cases for close errors

* Add test case for rotating datafiles

* Fix a possible data race in .Stats()

* Add test case for checksum errors

* Add test case for Sync errors with Put and WithSync enabled

* Refactor and use testify.mock for mocks and generate mocks for all interfaces

* Refactor TestCloseErrors

* Refactored TestDeleteErrors

* Refactored TestGetErrors

* Refactored TestPutErrors

* Refactored TestMergeErrors and fixed a bug with .Fold()

* Add test case for Scan() errors

* Apparently only Scan() can return nil Node()s?
This commit is contained in:
James Mills 2019-09-09 07:18:38 +10:00 committed by GitHub
parent 13e35b7acc
commit d59d5ad8c2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 756 additions and 82 deletions

@ -39,6 +39,9 @@ profile: build
bench: build
@go test -v -benchmem -bench=. .
mocks:
@mockery -all -case underscore -output ./internal/mocks -recursive
test: build
@go test -v \
-cover -coverprofile=coverage.txt -covermode=atomic \

@ -50,9 +50,10 @@ type Bitcask struct {
config *config.Config
options []Option
path string
curr *data.Datafile
datafiles map[int]*data.Datafile
curr data.Datafile
datafiles map[int]data.Datafile
trie art.Tree
indexer index.Indexer
}
// Stats is a struct returned by Stats() on an open Bitcask instance
@ -65,18 +66,14 @@ type Stats struct {
// Stats returns statistics about the database including the number of
// data files, keys and overall size on disk of the data
func (b *Bitcask) Stats() (stats Stats, err error) {
var size int64
size, err = internal.DirSize(b.path)
if err != nil {
if stats.Size, err = internal.DirSize(b.path); err != nil {
return
}
stats.Datafiles = len(b.datafiles)
b.mu.RLock()
stats.Datafiles = len(b.datafiles)
stats.Keys = b.trie.Size()
b.mu.RUnlock()
stats.Size = size
return
}
@ -90,16 +87,7 @@ func (b *Bitcask) Close() error {
os.Remove(b.Flock.Path())
}()
f, err := os.OpenFile(filepath.Join(b.path, "index"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
if err := index.WriteIndex(b.trie, f); err != nil {
return err
}
if err := f.Sync(); err != nil {
if err := b.indexer.Save(b.trie, filepath.Join(b.path, "index")); err != nil {
return err
}
@ -120,7 +108,7 @@ func (b *Bitcask) Sync() error {
// Get retrieves the value of the given key. If the key is not found or an/I/O
// error occurs a null byte slice is returned along with the error.
func (b *Bitcask) Get(key []byte) ([]byte, error) {
var df *data.Datafile
var df data.Datafile
b.mu.RLock()
value, found := b.trie.Search(key)
@ -238,12 +226,6 @@ func (b *Bitcask) Keys() chan []byte {
for it := b.trie.Iterator(); it.HasNext(); {
node, _ := it.Next()
// Skip the root node
if len(node.Key()) == 0 {
continue
}
ch <- node.Key()
}
close(ch)
@ -255,18 +237,18 @@ func (b *Bitcask) Keys() chan []byte {
// Fold iterates over all keys in the database calling the function `f` for
// each key. If the function returns an error, no further keys are processed
// and the error returned.
func (b *Bitcask) Fold(f func(key []byte) error) error {
func (b *Bitcask) Fold(f func(key []byte) error) (err error) {
b.mu.RLock()
defer b.mu.RUnlock()
b.trie.ForEach(func(node art.Node) bool {
if err := f(node.Key()); err != nil {
if err = f(node.Key()); err != nil {
return false
}
return true
})
return nil
return
}
func (b *Bitcask) put(key, value []byte) (int64, int64, error) {
@ -298,14 +280,6 @@ func (b *Bitcask) put(key, value []byte) (int64, int64, error) {
return b.curr.Write(e)
}
func (b *Bitcask) writeConfig() error {
data, err := b.config.Encode()
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(b.path, "config.json"), data, 0600)
}
func (b *Bitcask) reopen() error {
b.mu.Lock()
defer b.mu.Unlock()
@ -320,7 +294,7 @@ func (b *Bitcask) reopen() error {
return err
}
datafiles := make(map[int]*data.Datafile, len(ids))
datafiles := make(map[int]data.Datafile, len(ids))
for _, id := range ids {
df, err := data.NewDatafile(b.path, id, true)
@ -330,7 +304,7 @@ func (b *Bitcask) reopen() error {
datafiles[id] = df
}
t, found, err := index.ReadFromFile(b.path, b.config.MaxKeySize)
t, found, err := b.indexer.Load(filepath.Join(b.path, "index"), b.config.MaxKeySize)
if err != nil {
return err
}
@ -469,8 +443,13 @@ func Open(path string, options ...Option) (*Bitcask, error) {
return nil, err
}
cfg, err = config.Decode(path)
if err != nil {
configPath := filepath.Join(path, "config.json")
if internal.Exists(configPath) {
cfg, err = config.Load(configPath)
if err != nil {
return nil, err
}
} else {
cfg = newDefaultConfig()
}
@ -479,6 +458,7 @@ func Open(path string, options ...Option) (*Bitcask, error) {
config: cfg,
options: options,
path: path,
indexer: index.NewIndexer(),
}
for _, opt := range options {
@ -496,7 +476,7 @@ func Open(path string, options ...Option) (*Bitcask, error) {
return nil, ErrDatabaseLocked
}
if err := bitcask.writeConfig(); err != nil {
if err := cfg.Save(configPath); err != nil {
return nil, err
}

@ -2,6 +2,7 @@ package bitcask
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
@ -13,6 +14,14 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/prologic/bitcask/internal"
"github.com/prologic/bitcask/internal/config"
"github.com/prologic/bitcask/internal/mocks"
)
var (
ErrMockError = errors.New("error: mock error")
)
type sortByteArrays [][]byte
@ -195,6 +204,36 @@ func TestDeletedKeys(t *testing.T) {
})
}
func TestConfigErrors(t *testing.T) {
assert := assert.New(t)
t.Run("CorruptConfig", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
db, err := Open(testdir)
assert.NoError(err)
assert.NoError(db.Close())
assert.NoError(ioutil.WriteFile(filepath.Join(testdir, "config.json"), []byte("foo bar baz"), 0600))
_, err = Open(testdir)
assert.Error(err)
})
t.Run("BadConfigPath", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
assert.NoError(os.Mkdir(filepath.Join(testdir, "config.json"), 0700))
_, err = Open(testdir)
assert.Error(err)
})
}
func TestReIndex(t *testing.T) {
assert := assert.New(t)
@ -450,6 +489,109 @@ func TestStats(t *testing.T) {
})
}
func TestStatsError(t *testing.T) {
var (
db *Bitcask
err error
)
assert := assert.New(t)
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
t.Run("Setup", func(t *testing.T) {
t.Run("Open", func(t *testing.T) {
db, err = Open(testdir)
assert.NoError(err)
})
t.Run("Put", func(t *testing.T) {
err := db.Put([]byte("foo"), []byte("bar"))
assert.NoError(err)
})
t.Run("Get", func(t *testing.T) {
val, err := db.Get([]byte("foo"))
assert.NoError(err)
assert.Equal([]byte("bar"), val)
})
t.Run("Stats", func(t *testing.T) {
stats, err := db.Stats()
assert.NoError(err)
assert.Equal(stats.Datafiles, 0)
assert.Equal(stats.Keys, 1)
})
t.Run("FabricatedDestruction", func(t *testing.T) {
// This would never happen in reality :D
// Or would it? :)
err = os.RemoveAll(testdir)
assert.NoError(err)
})
t.Run("Stats", func(t *testing.T) {
_, err := db.Stats()
assert.Error(err)
})
})
}
func TestMaxDatafileSize(t *testing.T) {
var (
db *Bitcask
err error
)
assert := assert.New(t)
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
t.Run("Setup", func(t *testing.T) {
t.Run("Open", func(t *testing.T) {
db, err = Open(testdir, WithMaxDatafileSize(32))
assert.NoError(err)
})
t.Run("Put", func(t *testing.T) {
err := db.Put([]byte("foo"), []byte("bar"))
assert.NoError(err)
})
})
t.Run("Put", func(t *testing.T) {
for i := 0; i < 10; i++ {
err := db.Put([]byte(fmt.Sprintf("key_%d", i)), []byte("bar"))
assert.NoError(err)
}
})
t.Run("Sync", func(t *testing.T) {
err = db.Sync()
assert.NoError(err)
})
t.Run("Get", func(t *testing.T) {
val, err := db.Get([]byte("foo"))
assert.NoError(err)
assert.Equal([]byte("bar"), val)
for i := 0; i < 10; i++ {
val, err = db.Get([]byte(fmt.Sprintf("key_%d", i)))
assert.NoError(err)
assert.Equal([]byte("bar"), val)
}
})
t.Run("Close", func(t *testing.T) {
err = db.Close()
assert.NoError(err)
})
}
func TestMerge(t *testing.T) {
var (
db *Bitcask
@ -514,6 +656,286 @@ func TestMerge(t *testing.T) {
})
}
func TestGetErrors(t *testing.T) {
assert := assert.New(t)
t.Run("ReadError", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
db, err := Open(testdir, WithMaxDatafileSize(32))
assert.NoError(err)
err = db.Put([]byte("foo"), []byte("bar"))
assert.NoError(err)
mockDatafile := new(mocks.Datafile)
mockDatafile.On("FileID").Return(0)
mockDatafile.On("ReadAt", int64(0), int64(22)).Return(
internal.Entry{},
ErrMockError,
)
db.curr = mockDatafile
_, err = db.Get([]byte("foo"))
assert.Error(err)
assert.Equal(ErrMockError, err)
})
t.Run("ChecksumError", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
db, err := Open(testdir, WithMaxDatafileSize(32))
assert.NoError(err)
err = db.Put([]byte("foo"), []byte("bar"))
assert.NoError(err)
mockDatafile := new(mocks.Datafile)
mockDatafile.On("FileID").Return(0)
mockDatafile.On("ReadAt", int64(0), int64(22)).Return(
internal.Entry{
Checksum: 0x0,
Key: []byte("foo"),
Offset: 0,
Value: []byte("bar"),
},
nil,
)
db.curr = mockDatafile
_, err = db.Get([]byte("foo"))
assert.Error(err)
assert.Equal(ErrChecksumFailed, err)
})
}
func TestPutErrors(t *testing.T) {
assert := assert.New(t)
t.Run("WriteError", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
db, err := Open(testdir)
assert.NoError(err)
mockDatafile := new(mocks.Datafile)
mockDatafile.On("Size").Return(int64(0))
mockDatafile.On(
"Write",
internal.Entry{
Checksum: 0x76ff8caa,
Key: []byte("foo"),
Offset: 0,
Value: []byte("bar"),
},
).Return(int64(0), int64(0), ErrMockError)
db.curr = mockDatafile
err = db.Put([]byte("foo"), []byte("bar"))
assert.Error(err)
assert.Equal(ErrMockError, err)
})
t.Run("SyncError", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
db, err := Open(testdir, WithSync(true))
assert.NoError(err)
mockDatafile := new(mocks.Datafile)
mockDatafile.On("Size").Return(int64(0))
mockDatafile.On(
"Write",
internal.Entry{
Checksum: 0x78240498,
Key: []byte("bar"),
Offset: 0,
Value: []byte("baz"),
},
).Return(int64(0), int64(0), nil)
mockDatafile.On("Sync").Return(ErrMockError)
db.curr = mockDatafile
err = db.Put([]byte("bar"), []byte("baz"))
assert.Error(err)
assert.Equal(ErrMockError, err)
})
}
func TestOpenErrors(t *testing.T) {
assert := assert.New(t)
t.Run("BadPath", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
assert.NoError(ioutil.WriteFile(filepath.Join(testdir, "foo"), []byte("foo"), 0600))
_, err = Open(filepath.Join(testdir, "foo", "tmp.db"))
assert.Error(err)
})
t.Run("BadOption", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
withBogusOption := func() Option {
return func(cfg *config.Config) error {
return errors.New("mocked error")
}
}
_, err = Open(testdir, withBogusOption())
assert.Error(err)
})
}
func TestCloseErrors(t *testing.T) {
assert := assert.New(t)
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
t.Run("CloseIndexError", func(t *testing.T) {
db, err := Open(testdir, WithMaxDatafileSize(32))
assert.NoError(err)
mockIndexer := new(mocks.Indexer)
mockIndexer.On("Save", db.trie, filepath.Join(db.path, "index")).Return(ErrMockError)
db.indexer = mockIndexer
err = db.Close()
assert.Error(err)
assert.Equal(ErrMockError, err)
})
t.Run("CloseDatafilesError", func(t *testing.T) {
db, err := Open(testdir, WithMaxDatafileSize(32))
assert.NoError(err)
mockDatafile := new(mocks.Datafile)
mockDatafile.On("Close").Return(ErrMockError)
db.datafiles[0] = mockDatafile
err = db.Close()
assert.Error(err)
assert.Equal(ErrMockError, err)
})
t.Run("CloseActiveDatafileError", func(t *testing.T) {
db, err := Open(testdir, WithMaxDatafileSize(32))
assert.NoError(err)
mockDatafile := new(mocks.Datafile)
mockDatafile.On("Close").Return(ErrMockError)
db.curr = mockDatafile
err = db.Close()
assert.Error(err)
assert.Equal(ErrMockError, err)
})
}
func TestDeleteErrors(t *testing.T) {
assert := assert.New(t)
t.Run("WriteError", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
db, err := Open(testdir, WithMaxDatafileSize(32))
assert.NoError(err)
err = db.Put([]byte("foo"), []byte("bar"))
assert.NoError(err)
mockDatafile := new(mocks.Datafile)
mockDatafile.On("Size").Return(int64(0))
mockDatafile.On(
"Write",
internal.Entry{
Checksum: 0x0,
Key: []byte("foo"),
Offset: 0,
Value: []byte{},
},
).Return(int64(0), int64(0), ErrMockError)
db.curr = mockDatafile
err = db.Delete([]byte("foo"))
assert.Error(err)
})
}
func TestMergeErrors(t *testing.T) {
assert := assert.New(t)
t.Run("RemoveDatabaseDirectory", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
db, err := Open(testdir, WithMaxDatafileSize(32))
assert.NoError(err)
assert.NoError(os.RemoveAll(testdir))
err = db.Merge()
assert.Error(err)
})
t.Run("EmptyCloseError", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
db, err := Open(testdir)
assert.NoError(err)
mockDatafile := new(mocks.Datafile)
mockDatafile.On("Close").Return(ErrMockError)
db.curr = mockDatafile
err = db.Merge()
assert.Error(err)
assert.Equal(ErrMockError, err)
})
t.Run("ReadError", func(t *testing.T) {
testdir, err := ioutil.TempDir("", "bitcask")
assert.NoError(err)
defer os.RemoveAll(testdir)
db, err := Open(testdir)
assert.NoError(err)
assert.NoError(db.Put([]byte("foo"), []byte("bar")))
mockDatafile := new(mocks.Datafile)
mockDatafile.On("FileID").Return(0)
mockDatafile.On("ReadAt", int64(0), int64(22)).Return(
internal.Entry{},
ErrMockError,
)
db.curr = mockDatafile
err = db.Merge()
assert.Error(err)
assert.Equal(ErrMockError, err)
})
}
func TestConcurrent(t *testing.T) {
var (
db *Bitcask
@ -642,6 +1064,14 @@ func TestScan(t *testing.T) {
vals = SortByteArrays(vals)
assert.Equal(expected, vals)
})
t.Run("ScanErrors", func(t *testing.T) {
err = db.Scan([]byte("fo"), func(key []byte) error {
return ErrMockError
})
assert.Error(err)
assert.Equal(ErrMockError, err)
})
}
func TestLocking(t *testing.T) {

@ -2,6 +2,7 @@ package main
import (
"os"
"path/filepath"
"github.com/prologic/bitcask"
"github.com/prologic/bitcask/internal/config"
@ -36,11 +37,11 @@ func init() {
func recover(path string, dryRun bool) int {
maxKeySize := bitcask.DefaultMaxKeySize
if cfg, err := config.Decode(path); err == nil {
if cfg, err := config.Load(filepath.Join(path, "config.json")); err == nil {
maxKeySize = cfg.MaxKeySize
}
t, found, err := index.ReadFromFile(path, maxKeySize)
t, found, err := index.NewIndexer().Load(path, maxKeySize)
if err != nil && !index.IsIndexCorruption(err) {
log.WithError(err).Info("error while opening the index file")
}
@ -60,24 +61,12 @@ func recover(path string, dryRun bool) int {
return 0
}
fi, err := os.OpenFile("index.recovered", os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
log.WithError(err).Info("error while creating recovered index file")
return 1
}
// Leverage that t has the partiatially read tree even on corrupted files
err = index.WriteIndex(t, fi)
err = index.NewIndexer().Save(t, "index.recovered")
if err != nil {
log.WithError(err).Info("error while writing the recovered index file")
fi.Close()
return 1
}
err = fi.Close()
if err != nil {
log.WithError(err).Info("the recovered file index coudn't be saved correctly")
}
log.Debug("the index was recovered in the index.recovered new file")
return 0

1
go.sum

@ -116,6 +116,7 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=

@ -3,7 +3,7 @@ package config
import (
"encoding/json"
"io/ioutil"
"path/filepath"
"os"
)
// Config contains the bitcask configuration parameters
@ -14,11 +14,11 @@ type Config struct {
Sync bool `json:"sync"`
}
// Decode decodes a serialized configuration
func Decode(path string) (*Config, error) {
// Load loads a configuration from the given path
func Load(path string) (*Config, error) {
var cfg Config
data, err := ioutil.ReadFile(filepath.Join(path, "config.json"))
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
@ -30,7 +30,25 @@ func Decode(path string) (*Config, error) {
return &cfg, nil
}
// Encode encodes the configuration for storage
func (c *Config) Encode() ([]byte, error) {
return json.Marshal(c)
// Save saves the configuration to the provided path
func (c *Config) Save(path string) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0600)
if err != nil {
return err
}
data, err := json.Marshal(c)
if err != nil {
return err
}
if _, err = f.Write(data); err != nil {
return err
}
if err = f.Sync(); err != nil {
return err
}
return f.Close()
}

@ -22,7 +22,18 @@ var (
mxMemPool sync.RWMutex
)
type Datafile struct {
type Datafile interface {
FileID() int
Name() string
Close() error
Sync() error
Size() int64
Read() (internal.Entry, int64, error)
ReadAt(index, size int64) (internal.Entry, error)
Write(internal.Entry) (int64, int64, error)
}
type datafile struct {
sync.RWMutex
id int
@ -34,7 +45,7 @@ type Datafile struct {
enc *Encoder
}
func NewDatafile(path string, id int, readonly bool) (*Datafile, error) {
func NewDatafile(path string, id int, readonly bool) (Datafile, error) {
var (
r *os.File
ra *mmap.ReaderAt
@ -70,7 +81,7 @@ func NewDatafile(path string, id int, readonly bool) (*Datafile, error) {
dec := NewDecoder(r)
enc := NewEncoder(w)
return &Datafile{
return &datafile{
id: id,
r: r,
ra: ra,
@ -81,21 +92,21 @@ func NewDatafile(path string, id int, readonly bool) (*Datafile, error) {
}, nil
}
func (df *Datafile) FileID() int {
func (df *datafile) FileID() int {
return df.id
}
func (df *Datafile) Name() string {
func (df *datafile) Name() string {
return df.r.Name()
}
func (df *Datafile) Close() error {
func (df *datafile) Close() error {
defer func() {
df.ra.Close()
df.r.Close()
}()
// Readonly Datafile -- Nothing further to close on the write side
// Readonly datafile -- Nothing further to close on the write side
if df.w == nil {
return nil
}
@ -107,20 +118,20 @@ func (df *Datafile) Close() error {
return df.w.Close()
}
func (df *Datafile) Sync() error {
func (df *datafile) Sync() error {
if df.w == nil {
return nil
}
return df.w.Sync()
}
func (df *Datafile) Size() int64 {
func (df *datafile) Size() int64 {
df.RLock()
defer df.RUnlock()
return df.offset
}
func (df *Datafile) Read() (e internal.Entry, n int64, err error) {
func (df *datafile) Read() (e internal.Entry, n int64, err error) {
df.Lock()
defer df.Unlock()
@ -132,7 +143,7 @@ func (df *Datafile) Read() (e internal.Entry, n int64, err error) {
return
}
func (df *Datafile) ReadAt(index, size int64) (e internal.Entry, err error) {
func (df *datafile) ReadAt(index, size int64) (e internal.Entry, err error) {
var n int
b := make([]byte, size)
@ -156,7 +167,7 @@ func (df *Datafile) ReadAt(index, size int64) (e internal.Entry, err error) {
return
}
func (df *Datafile) Write(e internal.Entry) (int64, int64, error) {
func (df *datafile) Write(e internal.Entry) (int64, int64, error) {
if df.w == nil {
return -1, 0, ErrReadonly
}

@ -108,8 +108,7 @@ func readIndex(r io.Reader, t art.Tree, maxKeySize int) error {
return nil
}
// WriteIndex persists a Tree into a io.Writer
func WriteIndex(t art.Tree, w io.Writer) (err error) {
func writeIndex(t art.Tree, w io.Writer) (err error) {
t.ForEach(func(node art.Node) bool {
err = writeBytes(node.Key(), w)
if err != nil {

@ -2,26 +2,55 @@ package index
import (
"os"
"path"
art "github.com/plar/go-adaptive-radix-tree"
"github.com/prologic/bitcask/internal"
)
// ReadFromFile reads an index from a persisted file
func ReadFromFile(filePath string, maxKeySize int) (art.Tree, bool, error) {
type Indexer interface {
Load(path string, maxkeySize int) (art.Tree, bool, error)
Save(t art.Tree, path string) error
}
func NewIndexer() Indexer {
return &indexer{}
}
type indexer struct{}
func (i *indexer) Load(path string, maxKeySize int) (art.Tree, bool, error) {
t := art.New()
if !internal.Exists(path.Join(filePath, "index")) {
if !internal.Exists(path) {
return t, false, nil
}
f, err := os.Open(path.Join(filePath, "index"))
f, err := os.Open(path)
if err != nil {
return t, true, err
}
defer f.Close()
if err := readIndex(f, t, maxKeySize); err != nil {
return t, true, err
}
return t, true, nil
}
func (i *indexer) Save(t art.Tree, path string) error {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return err
}
defer f.Close()
if err := writeIndex(t, f); err != nil {
return err
}
if err := f.Sync(); err != nil {
return err
}
return f.Close()
}

158
internal/mocks/datafile.go Normal file

@ -0,0 +1,158 @@
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import internal "github.com/prologic/bitcask/internal"
import mock "github.com/stretchr/testify/mock"
// Datafile is an autogenerated mock type for the Datafile type
type Datafile struct {
mock.Mock
}
// Close provides a mock function with given fields:
func (_m *Datafile) Close() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// FileID provides a mock function with given fields:
func (_m *Datafile) FileID() int {
ret := _m.Called()
var r0 int
if rf, ok := ret.Get(0).(func() int); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int)
}
return r0
}
// Name provides a mock function with given fields:
func (_m *Datafile) Name() string {
ret := _m.Called()
var r0 string
if rf, ok := ret.Get(0).(func() string); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(string)
}
return r0
}
// Read provides a mock function with given fields:
func (_m *Datafile) Read() (internal.Entry, int64, error) {
ret := _m.Called()
var r0 internal.Entry
if rf, ok := ret.Get(0).(func() internal.Entry); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(internal.Entry)
}
var r1 int64
if rf, ok := ret.Get(1).(func() int64); ok {
r1 = rf()
} else {
r1 = ret.Get(1).(int64)
}
var r2 error
if rf, ok := ret.Get(2).(func() error); ok {
r2 = rf()
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// ReadAt provides a mock function with given fields: index, size
func (_m *Datafile) ReadAt(index int64, size int64) (internal.Entry, error) {
ret := _m.Called(index, size)
var r0 internal.Entry
if rf, ok := ret.Get(0).(func(int64, int64) internal.Entry); ok {
r0 = rf(index, size)
} else {
r0 = ret.Get(0).(internal.Entry)
}
var r1 error
if rf, ok := ret.Get(1).(func(int64, int64) error); ok {
r1 = rf(index, size)
} else {
r1 = ret.Error(1)
}
return r0, r1
}
// Size provides a mock function with given fields:
func (_m *Datafile) Size() int64 {
ret := _m.Called()
var r0 int64
if rf, ok := ret.Get(0).(func() int64); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(int64)
}
return r0
}
// Sync provides a mock function with given fields:
func (_m *Datafile) Sync() error {
ret := _m.Called()
var r0 error
if rf, ok := ret.Get(0).(func() error); ok {
r0 = rf()
} else {
r0 = ret.Error(0)
}
return r0
}
// Write provides a mock function with given fields: _a0
func (_m *Datafile) Write(_a0 internal.Entry) (int64, int64, error) {
ret := _m.Called(_a0)
var r0 int64
if rf, ok := ret.Get(0).(func(internal.Entry) int64); ok {
r0 = rf(_a0)
} else {
r0 = ret.Get(0).(int64)
}
var r1 int64
if rf, ok := ret.Get(1).(func(internal.Entry) int64); ok {
r1 = rf(_a0)
} else {
r1 = ret.Get(1).(int64)
}
var r2 error
if rf, ok := ret.Get(2).(func(internal.Entry) error); ok {
r2 = rf(_a0)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}

56
internal/mocks/indexer.go Normal file

@ -0,0 +1,56 @@
// Code generated by mockery v1.0.0. DO NOT EDIT.
package mocks
import art "github.com/plar/go-adaptive-radix-tree"
import mock "github.com/stretchr/testify/mock"
// Indexer is an autogenerated mock type for the Indexer type
type Indexer struct {
mock.Mock
}
// Load provides a mock function with given fields: path, maxkeySize
func (_m *Indexer) Load(path string, maxkeySize int) (art.Tree, bool, error) {
ret := _m.Called(path, maxkeySize)
var r0 art.Tree
if rf, ok := ret.Get(0).(func(string, int) art.Tree); ok {
r0 = rf(path, maxkeySize)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).(art.Tree)
}
}
var r1 bool
if rf, ok := ret.Get(1).(func(string, int) bool); ok {
r1 = rf(path, maxkeySize)
} else {
r1 = ret.Get(1).(bool)
}
var r2 error
if rf, ok := ret.Get(2).(func(string, int) error); ok {
r2 = rf(path, maxkeySize)
} else {
r2 = ret.Error(2)
}
return r0, r1, r2
}
// Save provides a mock function with given fields: t, path
func (_m *Indexer) Save(t art.Tree, path string) error {
ret := _m.Called(t, path)
var r0 error
if rf, ok := ret.Get(0).(func(art.Tree, string) error); ok {
r0 = rf(t, path)
} else {
r0 = ret.Error(0)
}
return r0
}