-
Notifications
You must be signed in to change notification settings - Fork 8
/
compact.go
224 lines (191 loc) · 5.52 KB
/
compact.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
package barrel
import (
"fmt"
"os"
"path/filepath"
"time"
"github.com/mr-karan/barreldb/internal/datafile"
)
// ExamineFileSize checks for file size at a periodic interval.
// It examines the file size of the active db file and marks it as stale
// if the file size exceeds the configured size.
func (b *Barrel) ExamineFileSize(evalInterval time.Duration) {
var (
evalTicker = time.NewTicker(evalInterval).C
)
for range evalTicker {
if err := b.rotateDF(); err != nil {
b.lo.Error("error rotating db file", "error", err)
}
}
}
// RunCompaction runs cleanup process to compact the keys and cleanup
// dead/expired keys at a periodic interval. This helps to save disk space
// and merge old inactive db files in a single file. It also generates a hints file
// which helps in caching all the keys during a cold start.
func (b *Barrel) RunCompaction(evalInterval time.Duration) {
var (
evalTicker = time.NewTicker(evalInterval).C
)
for range evalTicker {
b.Lock()
if err := b.cleanupExpired(); err != nil {
b.lo.Error("error removing expired keys", "error", err)
}
if err := b.merge(); err != nil {
b.lo.Error("error merging old files", "error", err)
}
if err := b.generateHints(); err != nil {
b.lo.Error("error generating hints file", "error", err)
}
b.Unlock()
}
}
// SyncFile checks for file size at a periodic interval.
// It examines the file size of the active db file and marks it as stale
// if the file size exceeds the configured size.
func (b *Barrel) SyncFile(evalInterval time.Duration) {
var (
evalTicker = time.NewTicker(evalInterval).C
)
for range evalTicker {
if err := b.Sync(); err != nil {
b.lo.Error("error syncing db file to disk", "error", err)
}
}
}
// rotateDF checks if the active file size has crossed the threshold
// of max allowed file size. If it has, it replaces the open file descriptors
// pointing to that file with a new file and adds the current file to list of
// stale files.
func (b *Barrel) rotateDF() error {
b.Lock()
defer b.Unlock()
size, err := b.df.Size()
if err != nil {
return err
}
// If the file is below the threshold of max size, do no action.
b.lo.Debug("checking if db file has exceeded max_size", "current_size", size, "max_size", b.opts.maxActiveFileSize)
if size < b.opts.maxActiveFileSize {
return nil
}
oldID := b.df.ID()
// Add this datafile to list of stale files.
b.stale[oldID] = b.df
// Create a new datafile.
df, err := datafile.New(b.opts.dir, oldID+1)
if err != nil {
return err
}
// Replace with a new instance of datafile.
b.df = df
return nil
}
// generateHints encodes the contents of the in-memory hashtable
// as `gob` and writes the data to a hints file.
func (b *Barrel) generateHints() error {
path := filepath.Join(b.opts.dir, HINTS_FILE)
if err := b.keydir.Encode(path); err != nil {
return err
}
return nil
}
// cleanupExpired removes the expired keys.
func (b *Barrel) cleanupExpired() error {
// Iterate over all keys and delete all keys which are expired.
for k := range b.keydir {
record, err := b.get(k)
if err != nil {
b.lo.Error("error fetching key", "key", k, "error", err)
continue
}
if record.isExpired() {
b.lo.Debug("deleting key since it's expired", "key", k)
// Delete the key.
if err := b.delete(k); err != nil {
b.lo.Error("error deleting key", "key", k, "error", err)
continue
}
}
}
return nil
}
// Merge is the process of merging all datafiles in a single file.
// In this process, all the expired/deleted keys are cleaned up and old files
// are removed from the disk.
func (b *Barrel) merge() error {
var (
mergefsync bool
)
// There should be atleast 2 old files to merge.
if len(b.stale) < 2 {
return nil
}
// Create a new datafile for storing the output of merged files.
// Use a temp directory to store the file and move to main directory after merge is over.
tmpMergeDir, err := os.MkdirTemp("", "merged")
if err != nil {
return err
}
defer os.RemoveAll(tmpMergeDir)
mergeDF, err := datafile.New(tmpMergeDir, 0)
if err != nil {
return err
}
// Disable fsync for merge process and manually fsync at the end of merge.
if b.opts.alwaysFSync {
mergefsync = true
b.opts.alwaysFSync = false
}
// Loop over all active keys in the hashmap and write the updated values to merged database.
// Since the keydir has updated values of all keys, all the old keys which are expired/deleted/overwritten
// will be cleaned up in the merged database.
for k := range b.keydir {
record, err := b.get(k)
if err != nil {
return err
}
if err := b.put(mergeDF, k, record.Value, nil); err != nil {
return err
}
}
// Now close all the existing datafile handlers.
for _, df := range b.stale {
if err := df.Close(); err != nil {
b.lo.Error("error closing df", "id", df.ID(), "error", err)
continue
}
}
// Reset the old map.
b.stale = make(map[int]*datafile.DataFile, 0)
// Delete the existing .db files
err = filepath.Walk(b.opts.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
if filepath.Ext(path) == ".db" {
err := os.Remove(path)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
// Move the merged file to the main directory.
os.Rename(filepath.Join(tmpMergeDir, fmt.Sprintf(datafile.ACTIVE_DATAFILE, 0)),
filepath.Join(b.opts.dir, fmt.Sprintf(datafile.ACTIVE_DATAFILE, 0)))
// Set the merged DF as the active DF.
b.df = mergeDF
if mergefsync {
b.opts.alwaysFSync = true
b.df.Sync()
}
return nil
}