Merge pull request #2203 from hashicorp/file-backend-base64
Base64 encode the file names in the 'file' physical backend
This commit is contained in:
commit
bb1f28ce66
101
physical/file.go
101
physical/file.go
|
@ -1,6 +1,7 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -11,6 +12,7 @@ import (
|
|||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
)
|
||||
|
||||
|
@ -48,12 +50,16 @@ func (b *FileBackend) Delete(path string) error {
|
|||
b.l.Lock()
|
||||
defer b.l.Unlock()
|
||||
|
||||
basePath, key := b.path(path)
|
||||
fullPath := filepath.Join(basePath, key)
|
||||
_, fullPathPrefixedFileName, fullPathPrefixedEncodedFileName := b.path(path)
|
||||
err := os.Remove(fullPathPrefixedEncodedFileName)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// For backwards compatibility, try to delete the file without base64
|
||||
// URL encoding the file name.
|
||||
err = os.Remove(fullPathPrefixedFileName)
|
||||
}
|
||||
|
||||
err := os.Remove(fullPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("Failed to remove %q: %v", fullPath, err)
|
||||
return fmt.Errorf("Failed to remove %q: %v", path, err)
|
||||
}
|
||||
|
||||
err = b.cleanupLogicalPath(path)
|
||||
|
@ -95,19 +101,22 @@ func (b *FileBackend) cleanupLogicalPath(path string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (b *FileBackend) Get(k string) (*Entry, error) {
|
||||
func (b *FileBackend) Get(path string) (*Entry, error) {
|
||||
b.l.Lock()
|
||||
defer b.l.Unlock()
|
||||
|
||||
path, key := b.path(k)
|
||||
path = filepath.Join(path, key)
|
||||
_, fullPathPrefixedFileName, fullPathPrefixedEncodedFileName := b.path(path)
|
||||
f, err := os.Open(fullPathPrefixedEncodedFileName)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
// For backwards compatibility, if non-encoded file name is a valid
|
||||
// storage entry, read it out.
|
||||
f, err = os.Open(fullPathPrefixedFileName)
|
||||
}
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
@ -121,27 +130,61 @@ func (b *FileBackend) Get(k string) (*Entry, error) {
|
|||
}
|
||||
|
||||
func (b *FileBackend) Put(entry *Entry) error {
|
||||
path, key := b.path(entry.Key)
|
||||
var retErr error
|
||||
if entry == nil {
|
||||
retErr = multierror.Append(retErr, fmt.Errorf("nil entry"))
|
||||
return retErr
|
||||
}
|
||||
|
||||
basePath, fullPathPrefixedFileName, fullPathPrefixedEncodedFileName := b.path(entry.Key)
|
||||
|
||||
b.l.Lock()
|
||||
defer b.l.Unlock()
|
||||
|
||||
// New storage entries will have their file names base64 URL encoded. If a
|
||||
// file with a non-encoded file name exists, it indicates that this is an
|
||||
// update operation. To avoid duplication of storage entries, delete the
|
||||
// old entry in the defer function.
|
||||
info, err := os.Stat(fullPathPrefixedFileName)
|
||||
if err == nil && info != nil {
|
||||
defer func() {
|
||||
err := os.Remove(fullPathPrefixedFileName)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
retErr = multierror.Append(retErr, fmt.Errorf("failed to remove old entry: %v", err))
|
||||
return
|
||||
}
|
||||
err = b.cleanupLogicalPath(entry.Key)
|
||||
if err != nil {
|
||||
retErr = multierror.Append(retErr, fmt.Errorf("failed to cleanup the after removing old entry: %v", err))
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Make the parent tree
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
return err
|
||||
if err := os.MkdirAll(basePath, 0755); err != nil {
|
||||
retErr = multierror.Append(retErr, err)
|
||||
return retErr
|
||||
}
|
||||
|
||||
// JSON encode the entry and write it
|
||||
f, err := os.OpenFile(
|
||||
filepath.Join(path, key),
|
||||
fullPathPrefixedEncodedFileName,
|
||||
os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
|
||||
0600)
|
||||
if err != nil {
|
||||
return err
|
||||
retErr = multierror.Append(retErr, err)
|
||||
return retErr
|
||||
}
|
||||
defer f.Close()
|
||||
enc := json.NewEncoder(f)
|
||||
return enc.Encode(entry)
|
||||
|
||||
err = enc.Encode(entry)
|
||||
if err != nil {
|
||||
retErr = multierror.Append(retErr, err)
|
||||
return retErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *FileBackend) List(prefix string) ([]string, error) {
|
||||
|
@ -172,6 +215,12 @@ func (b *FileBackend) List(prefix string) ([]string, error) {
|
|||
for i, name := range names {
|
||||
if name[0] == '_' {
|
||||
names[i] = name[1:]
|
||||
// If the file name is encoded, decode it to retain the list output
|
||||
// meaningful.
|
||||
nameDecodedBytes, err := base64.URLEncoding.DecodeString(names[i])
|
||||
if err == nil {
|
||||
names[i] = string(nameDecodedBytes)
|
||||
}
|
||||
} else {
|
||||
names[i] = name + "/"
|
||||
}
|
||||
|
@ -180,9 +229,21 @@ func (b *FileBackend) List(prefix string) ([]string, error) {
|
|||
return names, nil
|
||||
}
|
||||
|
||||
func (b *FileBackend) path(k string) (string, string) {
|
||||
path := filepath.Join(b.Path, k)
|
||||
key := filepath.Base(path)
|
||||
path = filepath.Dir(path)
|
||||
return path, "_" + key
|
||||
func (b *FileBackend) path(path string) (string, string, string) {
|
||||
fullPath := filepath.Join(b.Path, path)
|
||||
|
||||
basePath := filepath.Dir(fullPath)
|
||||
|
||||
fileName := filepath.Base(fullPath)
|
||||
|
||||
fullPathPrefixedFileName := filepath.Join(basePath, "_"+fileName)
|
||||
|
||||
// base64 URL encode the file name to make all the characters compatible by
|
||||
// the host OS (specially Windows). However, the basePath can contain
|
||||
// disallowed characters. Encoding all the directory names and the file
|
||||
// name is an over kill, and encoding the fullPath will flatten the
|
||||
// storage, which *may* not be desired.
|
||||
fullPathPrefixedEncodedFileName := filepath.Join(basePath, "_"+base64.URLEncoding.EncodeToString([]byte(fileName)))
|
||||
|
||||
return basePath, fullPathPrefixedFileName, fullPathPrefixedEncodedFileName
|
||||
}
|
||||
|
|
|
@ -1,14 +1,136 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
func TestFileBackend_Base64URLEncoding(t *testing.T) {
|
||||
backendPath, err := ioutil.TempDir("", "vault")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
defer os.RemoveAll(backendPath)
|
||||
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("file", logger, map[string]string{
|
||||
"path": backendPath,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// List the entries. Length should be zero.
|
||||
keys, err := b.List("")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys))
|
||||
}
|
||||
|
||||
// Create a storage entry without base64 encoding the file name
|
||||
rawFullPath := filepath.Join(backendPath, "_foo")
|
||||
e := &Entry{Key: "foo", Value: []byte("test")}
|
||||
f, err := os.OpenFile(
|
||||
rawFullPath,
|
||||
os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
|
||||
0600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
json.NewEncoder(f).Encode(e)
|
||||
f.Close()
|
||||
|
||||
// Get should work
|
||||
out, err := b.Get("foo")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(out, e) {
|
||||
t.Fatalf("bad: %v expected: %v", out, e)
|
||||
}
|
||||
|
||||
// List the entries. There should be one entry.
|
||||
keys, err = b.List("")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
|
||||
}
|
||||
|
||||
err = b.Put(e)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// List the entries again. There should still be one entry.
|
||||
keys, err = b.List("")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
|
||||
}
|
||||
|
||||
// Get should work
|
||||
out, err = b.Get("foo")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(out, e) {
|
||||
t.Fatalf("bad: %v expected: %v", out, e)
|
||||
}
|
||||
|
||||
err = b.Delete("foo")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
out, err = b.Get("foo")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if out != nil {
|
||||
t.Fatalf("bad: entry: expected: nil, actual: %#v", e)
|
||||
}
|
||||
|
||||
keys, err = b.List("")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(keys) != 0 {
|
||||
t.Fatalf("bad: len(keys): expected: 0, actual: %d", len(keys))
|
||||
}
|
||||
|
||||
f, err = os.OpenFile(
|
||||
rawFullPath,
|
||||
os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
|
||||
0600)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
json.NewEncoder(f).Encode(e)
|
||||
f.Close()
|
||||
|
||||
keys, err = b.List("")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
if len(keys) != 1 {
|
||||
t.Fatalf("bad: len(keys): expected: 1, actual: %d", len(keys))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileBackend(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("", "vault")
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in New Issue