Convert to logxi
This commit is contained in:
parent
865ca94032
commit
58b32e5432
|
@ -5,7 +5,6 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
|
@ -59,7 +58,7 @@ func (b *backend) tidySecretID(s logical.Storage) error {
|
|||
}
|
||||
|
||||
if secretIDEntry == nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("[ERR] {{err}}", fmt.Errorf("entry for SecretID %s is nil", secretIDHMAC)))
|
||||
result = multierror.Append(result, fmt.Errorf("entry for SecretID %s is nil", secretIDHMAC))
|
||||
lock.Unlock()
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -218,18 +218,18 @@ func (b *backend) loadTrustedCerts(store logical.Storage) (pool *x509.CertPool,
|
|||
trustedNonCAs = make([]*ParsedCert, 0)
|
||||
names, err := store.List("cert/")
|
||||
if err != nil {
|
||||
b.Logger().Printf("[ERR] cert: failed to list trusted certs: %v", err)
|
||||
b.Logger().Error("cert: failed to list trusted certs", "error", err)
|
||||
return
|
||||
}
|
||||
for _, name := range names {
|
||||
entry, err := b.Cert(store, strings.TrimPrefix(name, "cert/"))
|
||||
if err != nil {
|
||||
b.Logger().Printf("[ERR] cert: failed to load trusted certs '%s': %v", name, err)
|
||||
b.Logger().Error("cert: failed to load trusted cert", "name", name, "error", err)
|
||||
continue
|
||||
}
|
||||
parsed := parsePEM([]byte(entry.Certificate))
|
||||
if len(parsed) == 0 {
|
||||
b.Logger().Printf("[ERR] cert: failed to parse certificate for '%s'", name)
|
||||
b.Logger().Error("cert: failed to parse certificate", "name", name)
|
||||
continue
|
||||
}
|
||||
if !parsed[0].IsCA {
|
||||
|
|
|
@ -48,7 +48,7 @@ func (b *backend) pathLogin(
|
|||
|
||||
ttl, _, err := b.SanitizeTTLStr(config.TTL.String(), config.MaxTTL.String())
|
||||
if err != nil {
|
||||
return logical.ErrorResponse(fmt.Sprintf("[ERR]:%s", err)), nil
|
||||
return logical.ErrorResponse(fmt.Sprintf("error sanitizing TTLs: %s", err)), nil
|
||||
}
|
||||
|
||||
return &logical.Response{
|
||||
|
|
|
@ -105,7 +105,9 @@ func (b *backend) Login(req *logical.Request, username string, password string)
|
|||
return nil, logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
|
||||
b.Logger().Printf("[DEBUG] auth/ldap: BindDN for %s is %s", username, bindDN)
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: BindDN fetched", "username", username, "binddn", bindDN)
|
||||
}
|
||||
|
||||
// Try to bind as the login user. This is where the actual authentication takes place.
|
||||
if err = c.Bind(bindDN, password); err != nil {
|
||||
|
@ -121,7 +123,9 @@ func (b *backend) Login(req *logical.Request, username string, password string)
|
|||
if err != nil {
|
||||
return nil, logical.ErrorResponse(err.Error()), nil
|
||||
}
|
||||
b.Logger().Printf("[DEBUG] auth/ldap: Server returned %d groups: %v", len(ldapGroups), ldapGroups)
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: Groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups)
|
||||
}
|
||||
|
||||
ldapResponse := &logical.Response{
|
||||
Data: map[string]interface{}{},
|
||||
|
@ -137,7 +141,9 @@ func (b *backend) Login(req *logical.Request, username string, password string)
|
|||
// Import the custom added groups from ldap backend
|
||||
user, err := b.User(req.Storage, username)
|
||||
if err == nil && user != nil && user.Groups != nil {
|
||||
b.Logger().Printf("[DEBUG] auth/ldap: adding %d local groups: %v\n", len(user.Groups), user.Groups)
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: adding local groups", "num_local_groups", len(user.Groups), "local_groups", user.Groups)
|
||||
}
|
||||
allGroups = append(allGroups, user.Groups...)
|
||||
}
|
||||
// Merge local and LDAP groups
|
||||
|
@ -207,7 +213,9 @@ func (b *backend) getBindDN(cfg *ConfigEntry, c *ldap.Conn, username string) (st
|
|||
}
|
||||
|
||||
filter := fmt.Sprintf("(%s=%s)", cfg.UserAttr, ldap.EscapeFilter(username))
|
||||
b.Logger().Printf("[DEBUG] auth/ldap: Discovering user, BaseDN=%s, Filter=%s", cfg.UserDN, filter)
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: Discovering user", "userdn", cfg.UserDN, "filter", filter)
|
||||
}
|
||||
result, err := c.Search(&ldap.SearchRequest{
|
||||
BaseDN: cfg.UserDN,
|
||||
Scope: 2, // subtree
|
||||
|
@ -239,7 +247,9 @@ func (b *backend) getUserDN(cfg *ConfigEntry, c *ldap.Conn, bindDN string) (stri
|
|||
if cfg.UPNDomain != "" {
|
||||
// Find the distinguished name for the user if userPrincipalName used for login
|
||||
filter := fmt.Sprintf("(userPrincipalName=%s)", ldap.EscapeFilter(bindDN))
|
||||
b.Logger().Printf("[DEBUG] auth/ldap: Searching UPN, BaseDN=%s, Filter=%s", cfg.UserDN, filter)
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: Searching UPN", "userdn", cfg.UserDN, "filter", filter)
|
||||
}
|
||||
result, err := c.Search(&ldap.SearchRequest{
|
||||
BaseDN: cfg.UserDN,
|
||||
Scope: 2, // subtree
|
||||
|
@ -281,18 +291,20 @@ func (b *backend) getLdapGroups(cfg *ConfigEntry, c *ldap.Conn, userDN string, u
|
|||
ldapMap := make(map[string]bool)
|
||||
|
||||
if cfg.GroupFilter == "" {
|
||||
b.Logger().Printf("[WARN] auth/ldap: GroupFilter is empty, will not query server")
|
||||
b.Logger().Warn("auth/ldap: GroupFilter is empty, will not query server")
|
||||
return make([]string, 0), nil
|
||||
}
|
||||
|
||||
if cfg.GroupDN == "" {
|
||||
b.Logger().Printf("[WARN] auth/ldap: GroupDN is empty, will not query server")
|
||||
b.Logger().Warn("auth/ldap: GroupDN is empty, will not query server")
|
||||
return make([]string, 0), nil
|
||||
}
|
||||
|
||||
// If groupfilter was defined, resolve it as a Go template and use the query for
|
||||
// returning the user's groups
|
||||
b.Logger().Printf("[DEBUG] auth/ldap: Compiling group filter %s", cfg.GroupFilter)
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: Compiling group filter", "group_filter", cfg.GroupFilter)
|
||||
}
|
||||
|
||||
// Parse the configuration as a template.
|
||||
// Example template "(&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}}))"
|
||||
|
@ -313,7 +325,9 @@ func (b *backend) getLdapGroups(cfg *ConfigEntry, c *ldap.Conn, userDN string, u
|
|||
var renderedQuery bytes.Buffer
|
||||
t.Execute(&renderedQuery, context)
|
||||
|
||||
b.Logger().Printf("[DEBUG] auth/ldap: Searching GroupDN=%s, query=%s", cfg.GroupDN, renderedQuery.String())
|
||||
if b.Logger().IsDebug() {
|
||||
b.Logger().Debug("auth/ldap: Searching", "groupdn", cfg.GroupDN, "rendered_query", renderedQuery.String())
|
||||
}
|
||||
|
||||
result, err := c.Search(&ldap.SearchRequest{
|
||||
BaseDN: cfg.GroupDN,
|
||||
|
|
|
@ -7,12 +7,12 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/logical"
|
||||
logicaltest "github.com/hashicorp/vault/logical/testing"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/ory-am/dockertest"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
|
@ -191,7 +191,9 @@ reply:
|
|||
logical.HTTPRawBody: certificate,
|
||||
}}
|
||||
if retErr != nil {
|
||||
b.Logger().Printf("Possible error, but cannot return in raw response: %s. Note that an empty CA probably means none was configured, and an empty CRL is quite possibly correct", retErr)
|
||||
if b.Logger().IsWarn() {
|
||||
b.Logger().Warn("Possible error, but cannot return in raw response. Note that an empty CA probably means none was configured, and an empty CRL is possibly correct", "error", retErr)
|
||||
}
|
||||
}
|
||||
retErr = nil
|
||||
response.Data[logical.HTTPStatusCode] = 200
|
||||
|
|
|
@ -3,10 +3,11 @@ package postgresql
|
|||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
)
|
||||
|
@ -45,13 +46,14 @@ type backend struct {
|
|||
db *sql.DB
|
||||
lock sync.Mutex
|
||||
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// DB returns the database connection.
|
||||
func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
|
||||
b.logger.Println("[TRACE] postgres/db: enter")
|
||||
defer b.logger.Println("[TRACE] postgres/db: exit")
|
||||
b.logger.Trace("postgres/db: enter")
|
||||
defer b.logger.Trace("postgres/db: exit")
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
|
@ -111,8 +113,8 @@ func (b *backend) DB(s logical.Storage) (*sql.DB, error) {
|
|||
|
||||
// ResetDB forces a connection next time DB() is called.
|
||||
func (b *backend) ResetDB() {
|
||||
b.logger.Println("[TRACE] postgres/resetdb: enter")
|
||||
defer b.logger.Println("[TRACE] postgres/resetdb: exit")
|
||||
b.logger.Trace("postgres/resetdb: enter")
|
||||
defer b.logger.Trace("postgres/resetdb: exit")
|
||||
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
|
|
@ -33,12 +33,13 @@ func pathRoleCreate(b *backend) *framework.Path {
|
|||
|
||||
func (b *backend) pathRoleCreateRead(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: enter")
|
||||
defer b.logger.Println("[TRACE] postgres/pathRoleCreateRead: exit")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: enter")
|
||||
defer b.logger.Trace("postgres/pathRoleCreateRead: exit")
|
||||
|
||||
name := data.Get("name").(string)
|
||||
|
||||
// Get the role
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: getting role")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: getting role")
|
||||
role, err := b.Role(req.Storage, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -48,7 +49,7 @@ func (b *backend) pathRoleCreateRead(
|
|||
}
|
||||
|
||||
// Determine if we have a lease
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: getting lease")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: getting lease")
|
||||
lease, err := b.Lease(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -84,20 +85,20 @@ func (b *backend) pathRoleCreateRead(
|
|||
Format("2006-01-02 15:04:05-0700")
|
||||
|
||||
// Get our handle
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: getting database handle")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: getting database handle")
|
||||
db, err := b.DB(req.Storage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Start a transaction
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: starting transaction")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: starting transaction")
|
||||
tx, err := db.Begin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: rolling back transaction")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: rolling back transaction")
|
||||
tx.Rollback()
|
||||
}()
|
||||
|
||||
|
@ -108,7 +109,7 @@ func (b *backend) pathRoleCreateRead(
|
|||
continue
|
||||
}
|
||||
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: preparing statement")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: preparing statement")
|
||||
stmt, err := tx.Prepare(Query(query, map[string]string{
|
||||
"name": username,
|
||||
"password": password,
|
||||
|
@ -118,7 +119,7 @@ func (b *backend) pathRoleCreateRead(
|
|||
return nil, err
|
||||
}
|
||||
defer stmt.Close()
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: executing statement")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: executing statement")
|
||||
if _, err := stmt.Exec(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -126,14 +127,14 @@ func (b *backend) pathRoleCreateRead(
|
|||
|
||||
// Commit the transaction
|
||||
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: committing transaction")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: committing transaction")
|
||||
if err := tx.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the secret
|
||||
|
||||
b.logger.Println("[TRACE] postgres/pathRoleCreateRead: generating secret")
|
||||
b.logger.Trace("postgres/pathRoleCreateRead: generating secret")
|
||||
resp := b.Secret(SecretCredsType).Response(map[string]interface{}{
|
||||
"username": username,
|
||||
"password": password,
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
package rabbitmq
|
|
@ -7,11 +7,12 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/agent"
|
||||
)
|
||||
|
@ -38,6 +39,9 @@ type SSHCommConfig struct {
|
|||
|
||||
// DisableAgent, if true, will not forward the SSH agent.
|
||||
DisableAgent bool
|
||||
|
||||
// Logger for output
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
// Creates a new communicator implementation over SSH. This takes
|
||||
|
@ -92,8 +96,9 @@ func (c *comm) NewSession() (session *ssh.Session, err error) {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
log.Printf("ssh session open error: '%s', attempting reconnect", err)
|
||||
c.config.Logger.Error("ssh session open error, attempting reconnect", "error", err)
|
||||
if err := c.reconnect(); err != nil {
|
||||
c.config.Logger.Error("reconnect attempt failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -119,13 +124,13 @@ func (c *comm) reconnect() error {
|
|||
//
|
||||
// http://golang.org/doc/faq#nil_error
|
||||
c.conn = nil
|
||||
log.Printf("reconnection error: %s", err)
|
||||
c.config.Logger.Error("reconnection error", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
sshConn, sshChan, req, err := ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig)
|
||||
if err != nil {
|
||||
log.Printf("handshake error: %s", err)
|
||||
c.config.Logger.Error("handshake error", "error", err)
|
||||
c.Close()
|
||||
return err
|
||||
}
|
||||
|
@ -153,7 +158,7 @@ func (c *comm) connectToAgent() {
|
|||
}
|
||||
agentConn, err := net.Dial("unix", socketLocation)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] could not connect to local agent socket: %s", socketLocation)
|
||||
c.config.Logger.Error("could not connect to local agent socket", "socket_path", socketLocation)
|
||||
return
|
||||
}
|
||||
defer agentConn.Close()
|
||||
|
@ -161,7 +166,7 @@ func (c *comm) connectToAgent() {
|
|||
// create agent and add in auth
|
||||
forwardingAgent := agent.NewClient(agentConn)
|
||||
if forwardingAgent == nil {
|
||||
log.Printf("[ERROR] Could not create agent client")
|
||||
c.config.Logger.Error("could not create agent client")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -180,7 +185,7 @@ func (c *comm) connectToAgent() {
|
|||
|
||||
err = agent.RequestAgentForwarding(session)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] RequestAgentForwarding: %#v", err)
|
||||
c.config.Logger.Error("error requesting agent forwarding", "error", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
|
@ -243,7 +248,7 @@ func (c *comm) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) er
|
|||
if exitErr, ok := err.(*ssh.ExitError); ok {
|
||||
// Otherwise, we have an ExitErorr, meaning we can just read
|
||||
// the exit status
|
||||
log.Printf("non-zero exit status: %d", exitErr.ExitStatus())
|
||||
c.config.Logger.Error("got non-zero exit status", "exit_status", exitErr.ExitStatus())
|
||||
|
||||
// If we exited with status 127, it means SCP isn't available.
|
||||
// Return a more descriptive error for that.
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/vault/logical"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
|
@ -52,7 +53,7 @@ func (b *backend) installPublicKeyInTarget(adminUser, username, ip string, port
|
|||
return err
|
||||
}
|
||||
|
||||
comm, err := createSSHComm(adminUser, ip, port, hostkey)
|
||||
comm, err := createSSHComm(b.Logger(), adminUser, ip, port, hostkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -203,7 +204,7 @@ func cidrListContainsIP(ip, cidrList string) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func createSSHComm(username, ip string, port int, hostkey string) (*comm, error) {
|
||||
func createSSHComm(logger log.Logger, username, ip string, port int, hostkey string) (*comm, error) {
|
||||
signer, err := ssh.ParsePrivateKey([]byte(hostkey))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -234,6 +235,7 @@ func createSSHComm(username, ip string, port int, hostkey string) (*comm, error)
|
|||
Connection: connfunc,
|
||||
Pty: false,
|
||||
DisableAgent: true,
|
||||
Logger: logger,
|
||||
}
|
||||
|
||||
return SSHCommNew(fmt.Sprintf("%s:%d", ip, port), config)
|
||||
|
|
|
@ -3,7 +3,6 @@ package cli
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
|
@ -72,9 +71,7 @@ func listCommands(commands map[string]cli.CommandFactory, maxKeyLen int) string
|
|||
|
||||
command, err := commandFunc()
|
||||
if err != nil {
|
||||
log.Printf("[ERR] cli: Command '%s' failed to load: %s",
|
||||
key, err)
|
||||
continue
|
||||
panic(fmt.Sprintf("command '%s' failed to load: %s", key, err))
|
||||
}
|
||||
|
||||
key = fmt.Sprintf("%s%s", key, strings.Repeat(" ", maxKeyLen-len(key)))
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -18,17 +17,20 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
colorable "github.com/mattn/go-colorable"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/armon/go-metrics/circonus"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/logutils"
|
||||
"github.com/hashicorp/vault/audit"
|
||||
"github.com/hashicorp/vault/command/server"
|
||||
"github.com/hashicorp/vault/helper/flag-slice"
|
||||
"github.com/hashicorp/vault/helper/gated-writer"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/helper/mlock"
|
||||
vaulthttp "github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
|
@ -51,7 +53,7 @@ type ServerCommand struct {
|
|||
|
||||
meta.Meta
|
||||
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
|
||||
ReloadFuncs map[string][]server.ReloadFunc
|
||||
}
|
||||
|
@ -73,6 +75,38 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
return 1
|
||||
}
|
||||
|
||||
// Create a logger. We wrap it in a gated writer so that it doesn't
|
||||
// start logging too early.
|
||||
logGate := &gatedwriter.Writer{Writer: colorable.NewColorable(os.Stderr)}
|
||||
var level int
|
||||
switch logLevel {
|
||||
case "trace":
|
||||
level = log.LevelTrace
|
||||
case "debug":
|
||||
level = log.LevelDebug
|
||||
case "info":
|
||||
level = log.LevelInfo
|
||||
case "notice":
|
||||
level = log.LevelNotice
|
||||
case "warn":
|
||||
level = log.LevelWarn
|
||||
case "err":
|
||||
level = log.LevelError
|
||||
default:
|
||||
c.Ui.Output(fmt.Sprintf("Unknown log level %s", logLevel))
|
||||
return 1
|
||||
}
|
||||
switch strings.ToLower(os.Getenv("LOGXI_FORMAT")) {
|
||||
case "vault", "vault_json", "vault-json", "vaultjson", "":
|
||||
c.logger = logformat.NewVaultLoggerWithWriter(logGate, level)
|
||||
default:
|
||||
c.logger = log.NewLogger(logGate, "vault")
|
||||
c.logger.SetLevel(level)
|
||||
}
|
||||
grpclog.SetLogger(&grpclogFaker{
|
||||
logger: c.logger,
|
||||
})
|
||||
|
||||
if os.Getenv("VAULT_DEV_ROOT_TOKEN_ID") != "" && devRootTokenID == "" {
|
||||
devRootTokenID = os.Getenv("VAULT_DEV_ROOT_TOKEN_ID")
|
||||
}
|
||||
|
@ -112,7 +146,7 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
}
|
||||
}
|
||||
for _, path := range configPath {
|
||||
current, err := server.LoadConfig(path)
|
||||
current, err := server.LoadConfig(path, c.logger)
|
||||
if err != nil {
|
||||
c.Ui.Error(fmt.Sprintf(
|
||||
"Error loading configuration from %s: %s", path, err))
|
||||
|
@ -147,17 +181,6 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
c.Ui.Output(" Vault on an mlockall(2) enabled system is much more secure.\n")
|
||||
}
|
||||
|
||||
// Create a logger. We wrap it in a gated writer so that it doesn't
|
||||
// start logging too early.
|
||||
logGate := &gatedwriter.Writer{Writer: os.Stderr}
|
||||
c.logger = log.New(&logutils.LevelFilter{
|
||||
Levels: []logutils.LogLevel{
|
||||
"TRACE", "DEBUG", "INFO", "WARN", "ERR"},
|
||||
MinLevel: logutils.LogLevel(strings.ToUpper(logLevel)),
|
||||
Writer: logGate,
|
||||
}, "", log.LstdFlags)
|
||||
grpclog.SetLogger(c.logger)
|
||||
|
||||
if err := c.setupTelemetry(config); err != nil {
|
||||
c.Ui.Error(fmt.Sprintf("Error initializing telemetry: %s", err))
|
||||
return 1
|
||||
|
@ -408,7 +431,9 @@ func (c *ServerCommand) Run(args []string) int {
|
|||
|
||||
}
|
||||
if !disableClustering {
|
||||
c.logger.Printf("[TRACE] cluster listeners will be started on %v", clusterAddrs)
|
||||
if c.logger.IsTrace() {
|
||||
c.logger.Trace("cluster listener addresses synthesized", "cluster_addresses", clusterAddrs)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure we close all listeners from this point on
|
||||
|
@ -598,6 +623,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
|
|||
|
||||
if rootTokenID != "" {
|
||||
req := &logical.Request{
|
||||
ID: "dev-gen-root",
|
||||
Operation: logical.UpdateOperation,
|
||||
ClientToken: init.RootToken,
|
||||
Path: "auth/token/create",
|
||||
|
@ -621,6 +647,7 @@ func (c *ServerCommand) enableDev(core *vault.Core, rootTokenID string) (*vault.
|
|||
|
||||
init.RootToken = resp.Auth.ClientToken
|
||||
|
||||
req.ID = "dev-revoke-init-root"
|
||||
req.Path = "auth/token/revoke-self"
|
||||
req.Data = nil
|
||||
resp, err = core.HandleRequest(req)
|
||||
|
@ -795,7 +822,7 @@ func (c *ServerCommand) Reload(configPath []string) error {
|
|||
// Read the new config
|
||||
var config *server.Config
|
||||
for _, path := range configPath {
|
||||
current, err := server.LoadConfig(path)
|
||||
current, err := server.LoadConfig(path, c.logger)
|
||||
if err != nil {
|
||||
retErr := fmt.Errorf("Error loading configuration from %s: %s", path, err)
|
||||
c.Ui.Error(retErr.Error())
|
||||
|
@ -910,3 +937,34 @@ func MakeSighupCh() chan struct{} {
|
|||
}()
|
||||
return resultCh
|
||||
}
|
||||
|
||||
type grpclogFaker struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func (g *grpclogFaker) Fatal(args ...interface{}) {
|
||||
g.logger.Error(fmt.Sprint(args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (g *grpclogFaker) Fatalf(format string, args ...interface{}) {
|
||||
g.logger.Error(fmt.Sprintf(format, args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (g *grpclogFaker) Fatalln(args ...interface{}) {
|
||||
g.logger.Error(fmt.Sprintln(args...))
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (g *grpclogFaker) Print(args ...interface{}) {
|
||||
g.logger.Warn(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
func (g *grpclogFaker) Printf(format string, args ...interface{}) {
|
||||
g.logger.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (g *grpclogFaker) Println(args ...interface{}) {
|
||||
g.logger.Warn(fmt.Sprintln(args...))
|
||||
}
|
||||
|
|
|
@ -4,13 +4,14 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/hcl"
|
||||
"github.com/hashicorp/hcl/hcl/ast"
|
||||
|
@ -231,30 +232,30 @@ func (c *Config) Merge(c2 *Config) *Config {
|
|||
|
||||
// LoadConfig loads the configuration at the given path, regardless if
|
||||
// its a file or directory.
|
||||
func LoadConfig(path string) (*Config, error) {
|
||||
func LoadConfig(path string, logger log.Logger) (*Config, error) {
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fi.IsDir() {
|
||||
return LoadConfigDir(path)
|
||||
return LoadConfigDir(path, logger)
|
||||
} else {
|
||||
return LoadConfigFile(path)
|
||||
return LoadConfigFile(path, logger)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadConfigFile loads the configuration from the given file.
|
||||
func LoadConfigFile(path string) (*Config, error) {
|
||||
func LoadConfigFile(path string, logger log.Logger) (*Config, error) {
|
||||
// Read the file
|
||||
d, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseConfig(string(d))
|
||||
return ParseConfig(string(d), logger)
|
||||
}
|
||||
|
||||
func ParseConfig(d string) (*Config, error) {
|
||||
func ParseConfig(d string, logger log.Logger) (*Config, error) {
|
||||
// Parse!
|
||||
obj, err := hcl.Parse(d)
|
||||
if err != nil {
|
||||
|
@ -294,41 +295,11 @@ func ParseConfig(d string) (*Config, error) {
|
|||
"default_lease_ttl",
|
||||
"max_lease_ttl",
|
||||
"cluster_name",
|
||||
|
||||
// TODO: Remove in 0.6.0
|
||||
// Deprecated keys
|
||||
"statsd_addr",
|
||||
"statsite_addr",
|
||||
}
|
||||
if err := checkHCLKeys(list, valid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: Remove in 0.6.0
|
||||
// Preflight checks for deprecated keys
|
||||
sda := list.Filter("statsd_addr")
|
||||
ssa := list.Filter("statsite_addr")
|
||||
if len(sda.Items) > 0 || len(ssa.Items) > 0 {
|
||||
log.Println("[WARN] The top-level keys 'statsd_addr' and 'statsite_addr' " +
|
||||
"have been moved into a 'telemetry' block instead. Please update your " +
|
||||
"Vault configuration as this deprecation will be removed in the next " +
|
||||
"major release. Values specified in a 'telemetry' block will take " +
|
||||
"precendence.")
|
||||
|
||||
t := struct {
|
||||
StatsdAddr string `hcl:"statsd_addr"`
|
||||
StatsiteAddr string `hcl:"statsite_addr"`
|
||||
}{}
|
||||
if err := hcl.DecodeObject(&t, list); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result.Telemetry = &Telemetry{
|
||||
StatsdAddr: t.StatsdAddr,
|
||||
StatsiteAddr: t.StatsiteAddr,
|
||||
}
|
||||
}
|
||||
|
||||
if o := list.Filter("backend"); len(o.Items) > 0 {
|
||||
if err := parseBackends(&result, o); err != nil {
|
||||
return nil, fmt.Errorf("error parsing 'backend': %s", err)
|
||||
|
@ -358,7 +329,7 @@ func ParseConfig(d string) (*Config, error) {
|
|||
|
||||
// LoadConfigDir loads all the configurations in the given directory
|
||||
// in alphabetical order.
|
||||
func LoadConfigDir(dir string) (*Config, error) {
|
||||
func LoadConfigDir(dir string, logger log.Logger) (*Config, error) {
|
||||
f, err := os.Open(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -409,7 +380,7 @@ func LoadConfigDir(dir string) (*Config, error) {
|
|||
|
||||
var result *Config
|
||||
for _, f := range files {
|
||||
config, err := LoadConfigFile(f)
|
||||
config, err := LoadConfigFile(f, logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading %s: %s", f, err)
|
||||
}
|
||||
|
@ -615,9 +586,6 @@ func parseTelemetry(result *Config, list *ast.ObjectList) error {
|
|||
|
||||
// Check for invalid keys
|
||||
valid := []string{
|
||||
"statsite_address",
|
||||
"statsd_address",
|
||||
"disable_hostname",
|
||||
"circonus_api_token",
|
||||
"circonus_api_app",
|
||||
"circonus_api_url",
|
||||
|
@ -629,6 +597,9 @@ func parseTelemetry(result *Config, list *ast.ObjectList) error {
|
|||
"circonus_check_search_tag",
|
||||
"circonus_broker_id",
|
||||
"circonus_broker_select_tag",
|
||||
"disable_hostname",
|
||||
"statsd_address",
|
||||
"statsite_address",
|
||||
}
|
||||
if err := checkHCLKeys(item.Val, valid); err != nil {
|
||||
return multierror.Prefix(err, "telemetry:")
|
||||
|
|
|
@ -5,10 +5,15 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
func TestLoadConfigFile(t *testing.T) {
|
||||
config, err := LoadConfigFile("./test-fixtures/config.hcl")
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
config, err := LoadConfigFile("./test-fixtures/config.hcl", logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -70,7 +75,9 @@ func TestLoadConfigFile(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLoadConfigFile_json(t *testing.T) {
|
||||
config, err := LoadConfigFile("./test-fixtures/config.hcl.json")
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
config, err := LoadConfigFile("./test-fixtures/config.hcl.json", logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -131,7 +138,9 @@ func TestLoadConfigFile_json(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLoadConfigFile_json2(t *testing.T) {
|
||||
config, err := LoadConfigFile("./test-fixtures/config2.hcl.json")
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
config, err := LoadConfigFile("./test-fixtures/config2.hcl.json", logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -189,7 +198,9 @@ func TestLoadConfigFile_json2(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestLoadConfigDir(t *testing.T) {
|
||||
config, err := LoadConfigDir("./test-fixtures/config-dir")
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
config, err := LoadConfigDir("./test-fixtures/config-dir", logger)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
@ -230,11 +241,13 @@ func TestLoadConfigDir(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParseConfig_badTopLevel(t *testing.T) {
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
_, err := ParseConfig(strings.TrimSpace(`
|
||||
backend {}
|
||||
bad = "one"
|
||||
nope = "yes"
|
||||
`))
|
||||
`), logger)
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
|
@ -250,13 +263,15 @@ nope = "yes"
|
|||
}
|
||||
|
||||
func TestParseConfig_badListener(t *testing.T) {
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
_, err := ParseConfig(strings.TrimSpace(`
|
||||
listener "tcp" {
|
||||
address = "1.2.3.3"
|
||||
bad = "one"
|
||||
nope = "yes"
|
||||
}
|
||||
`))
|
||||
`), logger)
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
|
@ -272,13 +287,15 @@ listener "tcp" {
|
|||
}
|
||||
|
||||
func TestParseConfig_badTelemetry(t *testing.T) {
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
_, err := ParseConfig(strings.TrimSpace(`
|
||||
telemetry {
|
||||
statsd_address = "1.2.3.3"
|
||||
bad = "one"
|
||||
nope = "yes"
|
||||
}
|
||||
`))
|
||||
`), logger)
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("expected error")
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
disable_cache = true
|
||||
disable_mlock = true
|
||||
statsd_addr = "bar"
|
||||
statsite_addr = "foo"
|
||||
|
||||
listener "atlas" {
|
||||
token = "foobar"
|
||||
|
@ -25,6 +23,11 @@ ha_backend "consul" {
|
|||
disable_clustering = "false"
|
||||
}
|
||||
|
||||
telemetry {
|
||||
statsd_address = "bar"
|
||||
statsite_address = "foo"
|
||||
}
|
||||
|
||||
max_lease_ttl = "10h"
|
||||
default_lease_ttl = "10h"
|
||||
cluster_name = "testcluster"
|
||||
|
|
|
@ -0,0 +1,144 @@
|
|||
package logformat
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
styledefault = iota
|
||||
stylejson
|
||||
)
|
||||
|
||||
func NewVaultLogger(level int) log.Logger {
|
||||
logger := log.New("vault")
|
||||
logger.(*log.DefaultLogger).SetLevel(level)
|
||||
logger.(*log.DefaultLogger).SetFormatter(createVaultFormatter())
|
||||
return logger
|
||||
}
|
||||
|
||||
func NewVaultLoggerWithWriter(w io.Writer, level int) log.Logger {
|
||||
logger := log.NewLogger(w, "vault")
|
||||
logger.(*log.DefaultLogger).SetLevel(level)
|
||||
logger.(*log.DefaultLogger).SetFormatter(createVaultFormatter())
|
||||
return logger
|
||||
}
|
||||
|
||||
func createVaultFormatter() log.Formatter {
|
||||
ret := &vaultFormatter{}
|
||||
switch os.Getenv("LOGXI_FORMAT") {
|
||||
case "vault_json", "vault-json", "vaultjson":
|
||||
ret.style = stylejson
|
||||
default:
|
||||
ret.style = styledefault
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type vaultFormatter struct {
|
||||
sync.Mutex
|
||||
style int
|
||||
}
|
||||
|
||||
func (v *vaultFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) {
|
||||
v.Lock()
|
||||
defer v.Unlock()
|
||||
switch v.style {
|
||||
case stylejson:
|
||||
v.formatJSON(writer, level, msg, args)
|
||||
default:
|
||||
v.formatDefault(writer, level, msg, args)
|
||||
}
|
||||
}
|
||||
|
||||
func (v *vaultFormatter) formatDefault(writer io.Writer, level int, msg string, args []interface{}) {
|
||||
defer writer.Write([]byte("\n"))
|
||||
|
||||
writer.Write([]byte(time.Now().Local().Format("2006/01/02 15:04:05.000000")))
|
||||
|
||||
switch level {
|
||||
case log.LevelError:
|
||||
writer.Write([]byte(" [ERR] "))
|
||||
case log.LevelWarn:
|
||||
writer.Write([]byte(" [WRN] "))
|
||||
case log.LevelInfo:
|
||||
writer.Write([]byte(" [INF] "))
|
||||
case log.LevelNotice:
|
||||
writer.Write([]byte(" [NOT] "))
|
||||
case log.LevelDebug:
|
||||
writer.Write([]byte(" [DBG] "))
|
||||
case log.LevelTrace:
|
||||
writer.Write([]byte(" [TRC] "))
|
||||
default:
|
||||
writer.Write([]byte(" [ALL] "))
|
||||
}
|
||||
|
||||
writer.Write([]byte(msg))
|
||||
|
||||
if args != nil && len(args) > 0 {
|
||||
if len(args)%2 != 0 {
|
||||
args = append(args, "[unknown!]")
|
||||
}
|
||||
|
||||
for i := 0; i < len(args); i = i + 2 {
|
||||
writer.Write([]byte(fmt.Sprintf(" %s=%v", args[i], args[i+1])))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type logMsg struct {
|
||||
Stamp string `json:"t"`
|
||||
Level string `json:"l"`
|
||||
Message string `json:"m"`
|
||||
Args map[string]interface{} `json:"a,omitempty"`
|
||||
}
|
||||
|
||||
func (v *vaultFormatter) formatJSON(writer io.Writer, level int, msg string, args []interface{}) {
|
||||
l := &logMsg{
|
||||
Message: msg,
|
||||
Stamp: time.Now().Format("2006-01-02T15:04:05.000000Z07:00"),
|
||||
}
|
||||
|
||||
switch level {
|
||||
case log.LevelError:
|
||||
l.Level = "E"
|
||||
case log.LevelWarn:
|
||||
l.Level = "W"
|
||||
case log.LevelInfo:
|
||||
l.Level = "I"
|
||||
case log.LevelNotice:
|
||||
l.Level = "N"
|
||||
case log.LevelDebug:
|
||||
l.Level = "D"
|
||||
case log.LevelTrace:
|
||||
l.Level = "T"
|
||||
default:
|
||||
l.Level = "A"
|
||||
}
|
||||
|
||||
if args != nil && len(args) > 0 {
|
||||
l.Args = make(map[string]interface{}, len(args)/2)
|
||||
|
||||
if len(args)%2 != 0 {
|
||||
args = append(args, "[unknown!]")
|
||||
}
|
||||
|
||||
for i := 0; i < len(args); i = i + 2 {
|
||||
if _, ok := args[i].(string); !ok {
|
||||
// As this is the logging function not much we can do here
|
||||
// without injecting into logs...
|
||||
continue
|
||||
}
|
||||
l.Args[args[i].(string)] = args[i+1]
|
||||
}
|
||||
}
|
||||
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(l)
|
||||
}
|
|
@ -238,7 +238,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uin
|
|||
// Check for panics, otherwise notify we're done
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
core.Logger().Printf("[ERR] got a panic: %v", err)
|
||||
core.Logger().Error("got a panic: %v", err)
|
||||
t.Fail()
|
||||
}
|
||||
atomic.AddUint64(&totalOps, myTotalOps)
|
||||
|
@ -324,7 +324,7 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, rpc, parallel bool, num uin
|
|||
waitCond.L.Unlock()
|
||||
waitCond.Broadcast()
|
||||
|
||||
core.Logger().Printf("[TRACE] Starting %d", id)
|
||||
core.Logger().Trace("Starting goroutine", "id", id)
|
||||
|
||||
startTime := time.Now()
|
||||
for {
|
||||
|
|
|
@ -104,7 +104,7 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle
|
|||
|
||||
if r.Header.Get(NoRequestForwardingHeaderName) != "" {
|
||||
// Forwarding explicitly disabled, fall back to previous behavior
|
||||
core.Logger().Printf("[TRACE] http/handleRequestForwarding: forwarding disabled by client request")
|
||||
core.Logger().Trace("http/handleRequestForwarding: forwarding disabled by client request")
|
||||
handler.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
@ -139,9 +139,9 @@ func handleRequestForwarding(core *vault.Core, handler http.Handler) http.Handle
|
|||
statusCode, retBytes, err := core.ForwardRequest(r)
|
||||
if err != nil {
|
||||
if err == vault.ErrCannotForward {
|
||||
core.Logger().Printf("[TRACE] http/handleRequestForwarding: cannot forward (possibly disabled on active node), falling back")
|
||||
core.Logger().Trace("http/handleRequestForwarding: cannot forward (possibly disabled on active node), falling back")
|
||||
} else {
|
||||
core.Logger().Printf("[ERR] http/handleRequestForwarding: error forwarding request: %v", err)
|
||||
core.Logger().Error("http/handleRequestForwarding: error forwarding request", "error", err)
|
||||
}
|
||||
|
||||
// Fall back to redirection
|
||||
|
|
|
@ -4,21 +4,18 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
func TestLogical(t *testing.T) {
|
||||
core, _, token := vault.TestCoreUnsealed(t)
|
||||
ln, addr := TestServer(t, core)
|
||||
|
@ -82,6 +79,8 @@ func TestLogical_StandbyRedirect(t *testing.T) {
|
|||
defer ln2.Close()
|
||||
|
||||
// Create an HA Vault
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inmha := physical.NewInmemHA(logger)
|
||||
conf := &vault.CoreConfig{
|
||||
Physical: inmha,
|
||||
|
|
|
@ -3,15 +3,17 @@ package framework
|
|||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/vault/helper/errutil"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
)
|
||||
|
||||
|
@ -71,7 +73,7 @@ type Backend struct {
|
|||
// See the built-in AuthRenew helpers in lease.go for common callbacks.
|
||||
AuthRenew OperationFunc
|
||||
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
system logical.SystemView
|
||||
once sync.Once
|
||||
pathsRe []*regexp.Regexp
|
||||
|
@ -224,12 +226,12 @@ func (b *Backend) Cleanup() {
|
|||
|
||||
// Logger can be used to get the logger. If no logger has been set,
|
||||
// the logs will be discarded.
|
||||
func (b *Backend) Logger() *log.Logger {
|
||||
func (b *Backend) Logger() log.Logger {
|
||||
if b.logger != nil {
|
||||
return b.logger
|
||||
}
|
||||
|
||||
return log.New(ioutil.Discard, "", 0)
|
||||
return logformat.NewVaultLoggerWithWriter(ioutil.Discard, log.LevelOff)
|
||||
}
|
||||
|
||||
func (b *Backend) System() logical.SystemView {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package logical
|
||||
|
||||
import "log"
|
||||
import log "github.com/mgutz/logxi/v1"
|
||||
|
||||
// Backend interface must be implemented to be "mountable" at
|
||||
// a given path. Requests flow through a router which has various mount
|
||||
|
@ -44,7 +44,7 @@ type BackendConfig struct {
|
|||
StorageView Storage
|
||||
|
||||
// The backend should use this logger. The log should not contain any secrets.
|
||||
Logger *log.Logger
|
||||
Logger log.Logger
|
||||
|
||||
// System provides a view into a subset of safe system information that
|
||||
// is useful for backends, such as the default/max lease TTLs
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package logical
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
// TestRequest is a helper to create a purely in-memory Request struct.
|
||||
|
@ -73,8 +74,11 @@ func TestSystemView() *StaticSystemView {
|
|||
}
|
||||
|
||||
func TestBackendConfig() *BackendConfig {
|
||||
return &BackendConfig{
|
||||
Logger: log.New(os.Stdout, "", log.LstdFlags),
|
||||
bc := &BackendConfig{
|
||||
Logger: logformat.NewVaultLogger(log.LevelTrace),
|
||||
System: TestSystemView(),
|
||||
}
|
||||
bc.Logger.SetLevel(log.LevelTrace)
|
||||
|
||||
return bc
|
||||
}
|
||||
|
|
|
@ -3,24 +3,22 @@ package testing
|
|||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/vault/api"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/http"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
"github.com/hashicorp/vault/vault"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// TestEnvVar must be set to a non-empty value for acceptance tests to run.
|
||||
const TestEnvVar = "VAULT_ACC"
|
||||
|
||||
|
@ -136,6 +134,8 @@ func Test(tt TestT, c TestCase) {
|
|||
}
|
||||
|
||||
// Create an in-memory Vault core
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
core, err := vault.NewCore(&vault.CoreConfig{
|
||||
Physical: physical.NewInmem(logger),
|
||||
LogicalBackends: map[string]logical.Factory{
|
||||
|
@ -200,7 +200,9 @@ func Test(tt TestT, c TestCase) {
|
|||
// Make requests
|
||||
var revoke []*logical.Request
|
||||
for i, s := range c.Steps {
|
||||
log.Printf("[WARN] Executing test step %d", i+1)
|
||||
if log.IsWarn() {
|
||||
log.Warn("Executing test step", "step_number", i+1)
|
||||
}
|
||||
|
||||
// Create the request
|
||||
req := &logical.Request{
|
||||
|
@ -282,7 +284,9 @@ func Test(tt TestT, c TestCase) {
|
|||
// Revoke any secrets we might have.
|
||||
var failedRevokes []*logical.Secret
|
||||
for _, req := range revoke {
|
||||
log.Printf("[WARN] Revoking secret: %#v", req)
|
||||
if log.IsWarn() {
|
||||
log.Warn("Revoking secret", "secret", fmt.Sprintf("%#v", req))
|
||||
}
|
||||
req.ClientToken = client.Token()
|
||||
resp, err := core.HandleRequest(req)
|
||||
if err == nil && resp.IsError() {
|
||||
|
@ -290,14 +294,14 @@ func Test(tt TestT, c TestCase) {
|
|||
}
|
||||
if err != nil {
|
||||
failedRevokes = append(failedRevokes, req.Secret)
|
||||
tt.Error(fmt.Sprintf("[ERR] Revoke error: %s", err))
|
||||
tt.Error(fmt.Sprintf("Revoke error: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
// Perform any rollbacks. This should no-op if there aren't any.
|
||||
// We set the "immediate" flag here that any backend can pick up on
|
||||
// to do all rollbacks immediately even if the WAL entries are new.
|
||||
log.Printf("[WARN] Requesting RollbackOperation")
|
||||
log.Warn("Requesting RollbackOperation")
|
||||
req := logical.RollbackRequest(prefix + "/")
|
||||
req.Data["immediate"] = true
|
||||
req.ClientToken = client.Token()
|
||||
|
|
|
@ -4,13 +4,14 @@ import (
|
|||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/errwrap"
|
||||
|
@ -24,14 +25,14 @@ var MaxBlobSize = 1024 * 1024 * 4
|
|||
type AzureBackend struct {
|
||||
container string
|
||||
client storage.BlobStorageClient
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
permitPool *PermitPool
|
||||
}
|
||||
|
||||
// newAzureBackend constructs an Azure backend using a pre-existing
|
||||
// bucket. Credentials can be provided to the backend, sourced
|
||||
// from the environment, AWS credential files or by IAM role.
|
||||
func newAzureBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newAzureBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
|
||||
container := os.Getenv("AZURE_BLOB_CONTAINER")
|
||||
if container == "" {
|
||||
|
@ -72,7 +73,9 @@ func newAzureBackend(conf map[string]string, logger *log.Logger) (Backend, error
|
|||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
|
||||
}
|
||||
logger.Printf("[DEBUG]: azure: max_parallel set to %d", maxParInt)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("azure: max_parallel set", "max_parallel", maxParInt)
|
||||
}
|
||||
}
|
||||
|
||||
a := &AzureBackend{
|
||||
|
|
|
@ -2,11 +2,13 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
)
|
||||
|
||||
|
@ -24,7 +26,8 @@ func TestAzureBackend(t *testing.T) {
|
|||
|
||||
cleanupClient, _ := storage.NewBasicClient(accountName, accountKey)
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
backend, err := NewBackend("azure", logger, map[string]string{
|
||||
"container": container,
|
||||
"accountName": accountName,
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := NewInmem(logger)
|
||||
cache := NewCache(inm, 0)
|
||||
testBackend(t, cache)
|
||||
|
@ -15,7 +17,8 @@ func TestCache(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCache_Purge(t *testing.T) {
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := NewInmem(logger)
|
||||
cache := NewCache(inm, 0)
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@ package physical
|
|||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
@ -12,6 +11,8 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
|
||||
|
@ -55,7 +56,7 @@ type notifyEvent struct{}
|
|||
// it allows Vault to run on multiple machines in a highly-available manner.
|
||||
type ConsulBackend struct {
|
||||
path string
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
client *api.Client
|
||||
kv *api.KV
|
||||
permitPool *PermitPool
|
||||
|
@ -73,21 +74,23 @@ type ConsulBackend struct {
|
|||
|
||||
// newConsulBackend constructs a Consul backend using the given API client
|
||||
// and the prefix in the KV store.
|
||||
func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newConsulBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
// Get the path in Consul
|
||||
path, ok := conf["path"]
|
||||
if !ok {
|
||||
path = "vault/"
|
||||
}
|
||||
logger.Printf("[DEBUG]: physical/consul: config path set to %v", path)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: config path set", "path", path)
|
||||
}
|
||||
|
||||
// Ensure path is suffixed but not prefixed
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
logger.Printf("[WARN]: physical/consul: appending trailing forward slash to path")
|
||||
logger.Warn("physical/consul: appending trailing forward slash to path")
|
||||
path += "/"
|
||||
}
|
||||
if strings.HasPrefix(path, "/") {
|
||||
logger.Printf("[WARN]: physical/consul: trimming path of its forward slash")
|
||||
logger.Warn("physical/consul: trimming path of its forward slash")
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
}
|
||||
|
||||
|
@ -101,19 +104,25 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
|
|||
}
|
||||
disableRegistration = b
|
||||
}
|
||||
logger.Printf("[DEBUG]: physical/consul: config disable_registration set to %v", disableRegistration)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: config disable_registration set", "disable_registration", disableRegistration)
|
||||
}
|
||||
|
||||
// Get the service name to advertise in Consul
|
||||
service, ok := conf["service"]
|
||||
if !ok {
|
||||
service = DefaultServiceName
|
||||
}
|
||||
logger.Printf("[DEBUG]: physical/consul: config service set to %s", service)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: config service set", "service", service)
|
||||
}
|
||||
|
||||
// Get the additional tags to attach to the registered service name
|
||||
tags := conf["service_tags"]
|
||||
|
||||
logger.Printf("[DEBUG]: physical/consul: config service_tags set to %s", tags)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: config service_tags set", "service_tags", tags)
|
||||
}
|
||||
|
||||
checkTimeout := defaultCheckTimeout
|
||||
checkTimeoutStr, ok := conf["check_timeout"]
|
||||
|
@ -129,7 +138,9 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
|
|||
}
|
||||
|
||||
checkTimeout = d
|
||||
logger.Printf("[DEBUG]: physical/consul: config check_timeout set to %v", d)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: config check_timeout set", "check_timeout", d)
|
||||
}
|
||||
}
|
||||
|
||||
// Configure the client
|
||||
|
@ -137,15 +148,19 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
|
|||
|
||||
if addr, ok := conf["address"]; ok {
|
||||
consulConf.Address = addr
|
||||
logger.Printf("[DEBUG]: physical/consul: config address set to %s", addr)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: config address set", "address", addr)
|
||||
}
|
||||
}
|
||||
if scheme, ok := conf["scheme"]; ok {
|
||||
consulConf.Scheme = scheme
|
||||
logger.Printf("[DEBUG]: physical/consul: config scheme set to %s", scheme)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: config scheme set", "scheme", scheme)
|
||||
}
|
||||
}
|
||||
if token, ok := conf["token"]; ok {
|
||||
consulConf.Token = token
|
||||
logger.Printf("[DEBUG]: physical/consul: config token set")
|
||||
logger.Debug("physical/consul: config token set")
|
||||
}
|
||||
|
||||
if consulConf.Scheme == "https" {
|
||||
|
@ -158,7 +173,7 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
|
|||
transport.MaxIdleConnsPerHost = 4
|
||||
transport.TLSClientConfig = tlsClientConfig
|
||||
consulConf.HttpClient.Transport = transport
|
||||
logger.Printf("[DEBUG]: physical/consul: configured TLS")
|
||||
logger.Debug("physical/consul: configured TLS")
|
||||
}
|
||||
|
||||
client, err := api.NewClient(consulConf)
|
||||
|
@ -173,7 +188,9 @@ func newConsulBackend(conf map[string]string, logger *log.Logger) (Backend, erro
|
|||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
|
||||
}
|
||||
logger.Printf("[DEBUG]: physical/consul: max_parallel set to %d", maxParInt)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt)
|
||||
}
|
||||
}
|
||||
|
||||
// Setup the backend
|
||||
|
@ -394,7 +411,7 @@ func (c *ConsulBackend) NotifyActiveStateChange() error {
|
|||
default:
|
||||
// NOTE: If this occurs Vault's active status could be out of
|
||||
// sync with Consul until reconcileTimer expires.
|
||||
c.logger.Printf("[WARN]: physical/consul: Concurrent state change notify dropped")
|
||||
c.logger.Warn("physical/consul: Concurrent state change notify dropped")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -406,7 +423,7 @@ func (c *ConsulBackend) NotifySealedStateChange() error {
|
|||
default:
|
||||
// NOTE: If this occurs Vault's sealed status could be out of
|
||||
// sync with Consul until checkTimer expires.
|
||||
c.logger.Printf("[WARN]: physical/consul: Concurrent sealed state change notify dropped")
|
||||
c.logger.Warn("physical/consul: Concurrent sealed state change notify dropped")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -478,7 +495,9 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh Sh
|
|||
for !shutdown {
|
||||
serviceID, err := c.reconcileConsul(registeredServiceID, activeFunc, sealedFunc)
|
||||
if err != nil {
|
||||
c.logger.Printf("[WARN]: physical/consul: reconcile unable to talk with Consul backend: %v", err)
|
||||
if c.logger.IsWarn() {
|
||||
c.logger.Warn("physical/consul: reconcile unable to talk with Consul backend", "error", err)
|
||||
}
|
||||
time.Sleep(consulRetryInterval)
|
||||
continue
|
||||
}
|
||||
|
@ -502,7 +521,9 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh Sh
|
|||
for !shutdown {
|
||||
sealed := sealedFunc()
|
||||
if err := c.runCheck(sealed); err != nil {
|
||||
c.logger.Printf("[WARN]: physical/consul: check unable to talk with Consul backend: %v", err)
|
||||
if c.logger.IsWarn() {
|
||||
c.logger.Warn("physical/consul: check unable to talk with Consul backend", "error", err)
|
||||
}
|
||||
time.Sleep(consulRetryInterval)
|
||||
continue
|
||||
}
|
||||
|
@ -511,7 +532,7 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh Sh
|
|||
}()
|
||||
}
|
||||
case <-shutdownCh:
|
||||
c.logger.Printf("[INFO]: physical/consul: Shutting down consul backend")
|
||||
c.logger.Info("physical/consul: Shutting down consul backend")
|
||||
shutdown = true
|
||||
}
|
||||
}
|
||||
|
@ -519,7 +540,9 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh Sh
|
|||
c.serviceLock.RLock()
|
||||
defer c.serviceLock.RUnlock()
|
||||
if err := c.client.Agent().ServiceDeregister(registeredServiceID); err != nil {
|
||||
c.logger.Printf("[WARN]: physical/consul: service deregistration failed: %v", err)
|
||||
if c.logger.IsWarn() {
|
||||
c.logger.Warn("physical/consul: service deregistration failed", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
|
@ -10,7 +9,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/consul/api"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/helper/strutil"
|
||||
"github.com/ory-am/dockertest"
|
||||
)
|
||||
|
@ -33,7 +35,8 @@ func testConsulBackend(t *testing.T) *ConsulBackend {
|
|||
}
|
||||
|
||||
func testConsulBackendConfig(t *testing.T, conf *consulConf) *ConsulBackend {
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
be, err := newConsulBackend(*conf, logger)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected Consul to initialize: %v", err)
|
||||
|
@ -89,7 +92,8 @@ func TestConsul_ServiceTags(t *testing.T) {
|
|||
"max_parallel": "4",
|
||||
"disable_registration": "false",
|
||||
}
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
be, err := newConsulBackend(consulConfig, logger)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -172,7 +176,8 @@ func TestConsul_newConsulBackend(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, test := range tests {
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
be, err := newConsulBackend(test.consulConfig, logger)
|
||||
if test.fail {
|
||||
if err == nil {
|
||||
|
@ -396,7 +401,8 @@ func TestConsulBackend(t *testing.T) {
|
|||
client.KV().DeleteTree(randPath, nil)
|
||||
}()
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("consul", logger, map[string]string{
|
||||
"address": conf.Address,
|
||||
"path": randPath,
|
||||
|
@ -436,7 +442,8 @@ func TestConsulHABackend(t *testing.T) {
|
|||
client.KV().DeleteTree(randPath, nil)
|
||||
}()
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("consul", logger, map[string]string{
|
||||
"address": conf.Address,
|
||||
"path": randPath,
|
||||
|
|
|
@ -2,7 +2,6 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -12,6 +11,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
@ -65,7 +66,7 @@ type DynamoDBBackend struct {
|
|||
table string
|
||||
client *dynamodb.DynamoDB
|
||||
recovery bool
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
haEnabled bool
|
||||
permitPool *PermitPool
|
||||
}
|
||||
|
@ -90,7 +91,7 @@ type DynamoDBLock struct {
|
|||
|
||||
// newDynamoDBBackend constructs a DynamoDB backend. If the
|
||||
// configured DynamoDB table does not exist, it creates it.
|
||||
func newDynamoDBBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newDynamoDBBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
table := os.Getenv("AWS_DYNAMODB_TABLE")
|
||||
if table == "" {
|
||||
table = conf["table"]
|
||||
|
@ -193,7 +194,9 @@ func newDynamoDBBackend(conf map[string]string, logger *log.Logger) (Backend, er
|
|||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
|
||||
}
|
||||
logger.Printf("[DEBUG]: physical/consul: max_parallel set to %d", maxParInt)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("physical/consul: max_parallel set", "max_parallel", maxParInt)
|
||||
}
|
||||
}
|
||||
|
||||
return &DynamoDBBackend{
|
||||
|
|
|
@ -2,12 +2,14 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
|
@ -48,7 +50,8 @@ func TestDynamoDBBackend(t *testing.T) {
|
|||
})
|
||||
}()
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("dynamodb", logger, map[string]string{
|
||||
"access_key": creds.AccessKeyID,
|
||||
"secret_key": creds.SecretAccessKey,
|
||||
|
@ -97,7 +100,7 @@ func TestDynamoDBHABackend(t *testing.T) {
|
|||
})
|
||||
}()
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
b, err := NewBackend("dynamodb", logger, map[string]string{
|
||||
"access_key": creds.AccessKeyID,
|
||||
"secret_key": creds.SecretAccessKey,
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -12,6 +11,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/coreos/etcd/client"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
|
@ -68,11 +69,11 @@ type EtcdBackend struct {
|
|||
path string
|
||||
kAPI client.KeysAPI
|
||||
permitPool *PermitPool
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// newEtcdBackend constructs a etcd backend using a given machine address.
|
||||
func newEtcdBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newEtcdBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
// Get the etcd path form the configuration.
|
||||
path, ok := conf["path"]
|
||||
if !ok {
|
||||
|
|
|
@ -2,11 +2,13 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/coreos/etcd/client"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
@ -48,7 +50,8 @@ func TestEtcdBackend(t *testing.T) {
|
|||
|
||||
// Generate new etcd backend. The etcd address is read from ETCD_ADDR. No
|
||||
// need to provide it explicitly.
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("etcd", logger, map[string]string{
|
||||
"path": randPath,
|
||||
})
|
||||
|
|
|
@ -3,11 +3,12 @@ package physical
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
)
|
||||
|
||||
|
@ -21,11 +22,11 @@ import (
|
|||
type FileBackend struct {
|
||||
Path string
|
||||
l sync.Mutex
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// newFileBackend constructs a Filebackend using the given directory
|
||||
func newFileBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newFileBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
path, ok := conf["path"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("'path' must be set")
|
||||
|
|
|
@ -2,9 +2,11 @@ package physical
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
func TestFileBackend(t *testing.T) {
|
||||
|
@ -14,7 +16,8 @@ func TestFileBackend(t *testing.T) {
|
|||
}
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("file", logger, map[string]string{
|
||||
"path": dir,
|
||||
})
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-radix"
|
||||
)
|
||||
|
||||
|
@ -15,11 +16,11 @@ type InmemBackend struct {
|
|||
root *radix.Tree
|
||||
l sync.RWMutex
|
||||
permitPool *PermitPool
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// NewInmem constructs a new in-memory backend
|
||||
func NewInmem(logger *log.Logger) *InmemBackend {
|
||||
func NewInmem(logger log.Logger) *InmemBackend {
|
||||
in := &InmemBackend{
|
||||
root: radix.New(),
|
||||
permitPool: NewPermitPool(DefaultParallelOperations),
|
||||
|
|
|
@ -2,8 +2,9 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
type InmemHABackend struct {
|
||||
|
@ -11,11 +12,11 @@ type InmemHABackend struct {
|
|||
locks map[string]string
|
||||
l sync.Mutex
|
||||
cond *sync.Cond
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// NewInmemHA constructs a new in-memory HA backend. This is only for testing.
|
||||
func NewInmemHA(logger *log.Logger) *InmemHABackend {
|
||||
func NewInmemHA(logger log.Logger) *InmemHABackend {
|
||||
in := &InmemHABackend{
|
||||
InmemBackend: *NewInmem(logger),
|
||||
locks: make(map[string]string),
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
func TestInmemHA(t *testing.T) {
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := NewInmemHA(logger)
|
||||
testHABackend(t, inm, inm)
|
||||
}
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
func TestInmem(t *testing.T) {
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := NewInmem(logger)
|
||||
testBackend(t, inm)
|
||||
testBackend_ListPrefix(t, inm)
|
||||
|
|
|
@ -6,12 +6,13 @@ import (
|
|||
"database/sql"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
mysql "github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
@ -26,12 +27,12 @@ type MySQLBackend struct {
|
|||
dbTable string
|
||||
client *sql.DB
|
||||
statements map[string]*sql.Stmt
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// newMySQLBackend constructs a MySQL backend using the given API client and
|
||||
// server address and credential for accessing mysql database.
|
||||
func newMySQLBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
// Get the MySQL credentials to perform read/write operations.
|
||||
username, ok := conf["username"]
|
||||
if !ok || username == "" {
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
_ "github.com/go-sql-driver/mysql"
|
||||
)
|
||||
|
||||
|
@ -28,7 +30,8 @@ func TestMySQLBackend(t *testing.T) {
|
|||
password := os.Getenv("MYSQL_PASSWORD")
|
||||
|
||||
// Run vault tests
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("mysql", logger, map[string]string{
|
||||
"address": address,
|
||||
"database": database,
|
||||
|
|
|
@ -2,8 +2,9 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
const DefaultParallelOperations = 128
|
||||
|
@ -96,11 +97,11 @@ type Entry struct {
|
|||
}
|
||||
|
||||
// Factory is the factory function to create a physical backend.
|
||||
type Factory func(config map[string]string, logger *log.Logger) (Backend, error)
|
||||
type Factory func(config map[string]string, logger log.Logger) (Backend, error)
|
||||
|
||||
// NewBackend returns a new backend with the given type and configuration.
|
||||
// The backend is looked up in the builtinBackends variable.
|
||||
func NewBackend(t string, logger *log.Logger, conf map[string]string) (Backend, error) {
|
||||
func NewBackend(t string, logger log.Logger, conf map[string]string) (Backend, error) {
|
||||
f, ok := builtinBackends[t]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown physical backend type: %s", t)
|
||||
|
@ -111,10 +112,10 @@ func NewBackend(t string, logger *log.Logger, conf map[string]string) (Backend,
|
|||
// BuiltinBackends is the list of built-in physical backends that can
|
||||
// be used with NewBackend.
|
||||
var builtinBackends = map[string]Factory{
|
||||
"inmem": func(_ map[string]string, logger *log.Logger) (Backend, error) {
|
||||
"inmem": func(_ map[string]string, logger log.Logger) (Backend, error) {
|
||||
return NewInmem(logger), nil
|
||||
},
|
||||
"inmem_ha": func(_ map[string]string, logger *log.Logger) (Backend, error) {
|
||||
"inmem_ha": func(_ map[string]string, logger log.Logger) (Backend, error) {
|
||||
return NewInmemHA(logger), nil
|
||||
},
|
||||
"consul": newConsulBackend,
|
||||
|
|
|
@ -1,16 +1,18 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
func testNewBackend(t *testing.T) {
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
_, err := NewBackend("foobar", logger, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error")
|
||||
|
|
|
@ -3,10 +3,11 @@ package physical
|
|||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/lib/pq"
|
||||
)
|
||||
|
@ -20,12 +21,12 @@ type PostgreSQLBackend struct {
|
|||
get_query string
|
||||
delete_query string
|
||||
list_query string
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// newPostgreSQLBackend constructs a PostgreSQL backend using the given
|
||||
// API client, server address, credentials, and database.
|
||||
func newPostgreSQLBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newPostgreSQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
// Get the PostgreSQL credentials to perform read/write operations.
|
||||
connURL, ok := conf["connection_url"]
|
||||
if !ok || connURL == "" {
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
package physical
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
)
|
||||
|
||||
|
@ -20,7 +22,8 @@ func TestPostgreSQLBackend(t *testing.T) {
|
|||
}
|
||||
|
||||
// Run vault tests
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("postgresql", logger, map[string]string{
|
||||
"connection_url": connURL,
|
||||
"table": table,
|
||||
|
|
|
@ -4,12 +4,13 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
|
@ -23,13 +24,13 @@ import (
|
|||
type S3Backend struct {
|
||||
bucket string
|
||||
client *s3.S3
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// newS3Backend constructs a S3 backend using a pre-existing
|
||||
// bucket. Credentials can be provided to the backend, sourced
|
||||
// from the environment, AWS credential files or by IAM role.
|
||||
func newS3Backend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newS3Backend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
|
||||
bucket := os.Getenv("AWS_S3_BUCKET")
|
||||
if bucket == "" {
|
||||
|
|
|
@ -2,12 +2,14 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
|
@ -73,7 +75,8 @@ func TestS3Backend(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("s3", logger, map[string]string{
|
||||
"access_key": creds.AccessKeyID,
|
||||
"secret_key": creds.SecretAccessKey,
|
||||
|
|
|
@ -2,13 +2,14 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
|
@ -20,14 +21,14 @@ import (
|
|||
type SwiftBackend struct {
|
||||
container string
|
||||
client *swift.Connection
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
permitPool *PermitPool
|
||||
}
|
||||
|
||||
// newSwiftBackend constructs a Swift backend using a pre-existing
|
||||
// container. Credentials can be provided to the backend, sourced
|
||||
// from the environment.
|
||||
func newSwiftBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newSwiftBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
|
||||
username := os.Getenv("OS_USERNAME")
|
||||
if username == "" {
|
||||
|
@ -87,7 +88,9 @@ func newSwiftBackend(conf map[string]string, logger *log.Logger) (Backend, error
|
|||
if err != nil {
|
||||
return nil, errwrap.Wrapf("failed parsing max_parallel parameter: {{err}}", err)
|
||||
}
|
||||
logger.Printf("[DEBUG]: swift: max_parallel set to %d", maxParInt)
|
||||
if logger.IsDebug() {
|
||||
logger.Debug("swift: max_parallel set", "max_parallel", maxParInt)
|
||||
}
|
||||
}
|
||||
|
||||
s := &SwiftBackend{
|
||||
|
|
|
@ -2,12 +2,14 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
|
@ -58,7 +60,8 @@ func TestSwiftBackend(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("swift", logger, map[string]string{
|
||||
"username": username,
|
||||
"password": password,
|
||||
|
|
|
@ -2,13 +2,14 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
)
|
||||
|
@ -28,12 +29,12 @@ type ZookeeperBackend struct {
|
|||
path string
|
||||
client *zk.Conn
|
||||
acl []zk.ACL
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// newZookeeperBackend constructs a Zookeeper backend using the given API client
|
||||
// and the prefix in the KV store.
|
||||
func newZookeeperBackend(conf map[string]string, logger *log.Logger) (Backend, error) {
|
||||
func newZookeeperBackend(conf map[string]string, logger log.Logger) (Backend, error) {
|
||||
// Get the path in Zookeeper
|
||||
path, ok := conf["path"]
|
||||
if !ok {
|
||||
|
|
|
@ -2,11 +2,13 @@ package physical
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/samuel/go-zookeeper/zk"
|
||||
)
|
||||
|
||||
|
@ -38,7 +40,8 @@ func TestZookeeperBackend(t *testing.T) {
|
|||
client.Close()
|
||||
}()
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("zookeeper", logger, map[string]string{
|
||||
"address": addr + "," + addr,
|
||||
"path": randPath,
|
||||
|
@ -77,7 +80,8 @@ func TestZookeeperHABackend(t *testing.T) {
|
|||
client.Close()
|
||||
}()
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
b, err := NewBackend("zookeeper", logger, map[string]string{
|
||||
"address": addr + "," + addr,
|
||||
"path": randPath,
|
||||
|
|
|
@ -5,11 +5,12 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
|
@ -91,8 +92,9 @@ func (c *Core) enableAudit(entry *MountEntry) error {
|
|||
|
||||
// Register the backend
|
||||
c.auditBroker.Register(entry.Path, backend, view)
|
||||
c.logger.Printf("[INFO] core: enabled audit backend '%s' type: %s",
|
||||
entry.Path, entry.Type)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: enabled audit backend", "path", entry.Path, "type", entry.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -124,7 +126,9 @@ func (c *Core) disableAudit(path string) error {
|
|||
|
||||
// Unmount the backend
|
||||
c.auditBroker.Deregister(path)
|
||||
c.logger.Printf("[INFO] core: disabled audit backend '%s'", path)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: disabled audit backend", "path", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -135,7 +139,7 @@ func (c *Core) loadAudits() error {
|
|||
// Load the existing audit table
|
||||
raw, err := c.barrier.Get(coreAuditConfigPath)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to read audit table: %v", err)
|
||||
c.logger.Error("core: failed to read audit table", "error", err)
|
||||
return errLoadAuditFailed
|
||||
}
|
||||
|
||||
|
@ -144,7 +148,7 @@ func (c *Core) loadAudits() error {
|
|||
|
||||
if raw != nil {
|
||||
if err := jsonutil.DecodeJSON(raw.Value, auditTable); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to decode audit table: %v", err)
|
||||
c.logger.Error("core: failed to decode audit table", "error", err)
|
||||
return errLoadAuditFailed
|
||||
}
|
||||
c.audit = auditTable
|
||||
|
@ -186,20 +190,13 @@ func (c *Core) loadAudits() error {
|
|||
// persistAudit is used to persist the audit table after modification
|
||||
func (c *Core) persistAudit(table *MountTable) error {
|
||||
if table.Type != auditTableType {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: given table to persist has type %s but need type %s",
|
||||
table.Type,
|
||||
auditTableType)
|
||||
c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", auditTableType)
|
||||
return fmt.Errorf("invalid table type given, not persisting")
|
||||
}
|
||||
|
||||
for _, entry := range table.Entries {
|
||||
if entry.Table != table.Type {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: entry in audit table with path %s has table value %s but is in table %s, refusing to persist",
|
||||
entry.Path,
|
||||
entry.Table,
|
||||
table.Type)
|
||||
c.logger.Error("core: given entry to persist in audit table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
|
||||
return fmt.Errorf("invalid audit entry found, not persisting")
|
||||
}
|
||||
}
|
||||
|
@ -207,7 +204,7 @@ func (c *Core) persistAudit(table *MountTable) error {
|
|||
// Marshal the table
|
||||
raw, err := json.Marshal(table)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to encode audit table: %v", err)
|
||||
c.logger.Error("core: failed to encode audit table", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -219,7 +216,7 @@ func (c *Core) persistAudit(table *MountTable) error {
|
|||
|
||||
// Write to the physical backend
|
||||
if err := c.barrier.Put(entry); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to persist audit table: %v", err)
|
||||
c.logger.Error("core: failed to persist audit table", "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -240,9 +237,7 @@ func (c *Core) setupAudits() error {
|
|||
// Initialize the backend
|
||||
audit, err := c.newAuditBackend(entry.Type, view, entry.Options)
|
||||
if err != nil {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: failed to create audit entry %s: %v",
|
||||
entry.Path, err)
|
||||
c.logger.Error("core: failed to create audit entry", "path", entry.Path, "error", err)
|
||||
return errLoadAuditFailed
|
||||
}
|
||||
|
||||
|
@ -275,7 +270,7 @@ func (c *Core) newAuditBackend(t string, view logical.Storage, conf map[string]s
|
|||
HMACType: "hmac-sha256",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERR] core: unable to generate salt: %v", err)
|
||||
return nil, fmt.Errorf("core: unable to generate salt: %v", err)
|
||||
}
|
||||
return f(&audit.BackendConfig{
|
||||
Salt: salter,
|
||||
|
@ -301,11 +296,11 @@ type backendEntry struct {
|
|||
type AuditBroker struct {
|
||||
l sync.RWMutex
|
||||
backends map[string]backendEntry
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// NewAuditBroker creates a new audit broker
|
||||
func NewAuditBroker(log *log.Logger) *AuditBroker {
|
||||
func NewAuditBroker(log log.Logger) *AuditBroker {
|
||||
b := &AuditBroker{
|
||||
backends: make(map[string]backendEntry),
|
||||
logger: log,
|
||||
|
@ -358,14 +353,14 @@ func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, outer
|
|||
defer a.l.RUnlock()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
a.logger.Printf("[ERR] audit: panic logging: req path: %s", req.Path)
|
||||
a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r)
|
||||
retErr = multierror.Append(retErr, fmt.Errorf("panic generating audit log"))
|
||||
}
|
||||
}()
|
||||
|
||||
// All logged requests must have an identifier
|
||||
//if req.ID == "" {
|
||||
// a.logger.Printf("[ERR] audit: missing identifier in request object: %s", req.Path)
|
||||
// a.logger.Error("audit: missing identifier in request object", "request_path", req.Path)
|
||||
// retErr = multierror.Append(retErr, fmt.Errorf("missing identifier in request object: %s", req.Path))
|
||||
// return
|
||||
//}
|
||||
|
@ -377,7 +372,7 @@ func (a *AuditBroker) LogRequest(auth *logical.Auth, req *logical.Request, outer
|
|||
err := be.backend.LogRequest(auth, req, outerErr)
|
||||
metrics.MeasureSince([]string{"audit", name, "log_request"}, start)
|
||||
if err != nil {
|
||||
a.logger.Printf("[ERR] audit: backend '%s' failed to log request: %v", name, err)
|
||||
a.logger.Error("audit: backend failed to log request", "backend", name, "error", err)
|
||||
} else {
|
||||
anyLogged = true
|
||||
}
|
||||
|
@ -398,7 +393,7 @@ func (a *AuditBroker) LogResponse(auth *logical.Auth, req *logical.Request,
|
|||
defer a.l.RUnlock()
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
a.logger.Printf("[ERR] audit: panic logging: req path: %s: %v", req.Path, r)
|
||||
a.logger.Error("audit: panic during logging", "request_path", req.Path, "error", r)
|
||||
reterr = fmt.Errorf("panic generating audit log")
|
||||
}
|
||||
}()
|
||||
|
@ -410,7 +405,7 @@ func (a *AuditBroker) LogResponse(auth *logical.Auth, req *logical.Request,
|
|||
err := be.backend.LogResponse(auth, req, resp, err)
|
||||
metrics.MeasureSince([]string{"audit", name, "log_response"}, start)
|
||||
if err != nil {
|
||||
a.logger.Printf("[ERR] audit: backend '%s' failed to log response: %v", name, err)
|
||||
a.logger.Error("audit: backend failed to log response", "backend", name, "error", err)
|
||||
} else {
|
||||
anyLogged = true
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ package vault
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -13,7 +11,9 @@ import (
|
|||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/audit"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
type NoopAudit struct {
|
||||
|
@ -206,7 +206,7 @@ func verifyDefaultAuditTable(t *testing.T, table *MountTable) {
|
|||
}
|
||||
|
||||
func TestAuditBroker_LogRequest(t *testing.T) {
|
||||
l := log.New(os.Stderr, "", log.LstdFlags)
|
||||
l := logformat.NewVaultLogger(log.LevelTrace)
|
||||
b := NewAuditBroker(l)
|
||||
a1 := &NoopAudit{}
|
||||
a2 := &NoopAudit{}
|
||||
|
@ -266,7 +266,7 @@ func TestAuditBroker_LogRequest(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAuditBroker_LogResponse(t *testing.T) {
|
||||
l := log.New(os.Stderr, "", log.LstdFlags)
|
||||
l := logformat.NewVaultLogger(log.LevelTrace)
|
||||
b := NewAuditBroker(l)
|
||||
a1 := &NoopAudit{}
|
||||
a2 := &NoopAudit{}
|
||||
|
|
|
@ -94,8 +94,9 @@ func (c *Core) enableCredential(entry *MountEntry) error {
|
|||
if err := c.router.Mount(backend, path, entry, view); err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Printf("[INFO] core: enabled credential backend '%s' type: %s",
|
||||
entry.Path, entry.Type)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: enabled credential backend", "path", entry.Path, "type", entry.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -152,7 +153,9 @@ func (c *Core) disableCredential(path string) error {
|
|||
if err := c.removeCredEntry(path); err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Printf("[INFO] core: disabled credential backend '%s'", path)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: disabled credential backend", "path", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -198,7 +201,7 @@ func (c *Core) loadCredentials() error {
|
|||
// Load the existing mount table
|
||||
raw, err := c.barrier.Get(coreAuthConfigPath)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to read auth table: %v", err)
|
||||
c.logger.Error("core: failed to read auth table", "error", err)
|
||||
return errLoadAuthFailed
|
||||
}
|
||||
|
||||
|
@ -207,7 +210,7 @@ func (c *Core) loadCredentials() error {
|
|||
|
||||
if raw != nil {
|
||||
if err := jsonutil.DecodeJSON(raw.Value, authTable); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to decode auth table: %v", err)
|
||||
c.logger.Error("core: failed to decode auth table", "error", err)
|
||||
return errLoadAuthFailed
|
||||
}
|
||||
c.auth = authTable
|
||||
|
@ -248,7 +251,7 @@ func (c *Core) loadCredentials() error {
|
|||
// Create and persist the default auth table
|
||||
c.auth = defaultAuthTable()
|
||||
if err := c.persistAuth(c.auth); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to persist auth table: %v", err)
|
||||
c.logger.Error("core: failed to persist auth table", "error", err)
|
||||
return errLoadAuthFailed
|
||||
}
|
||||
return nil
|
||||
|
@ -257,20 +260,13 @@ func (c *Core) loadCredentials() error {
|
|||
// persistAuth is used to persist the auth table after modification
|
||||
func (c *Core) persistAuth(table *MountTable) error {
|
||||
if table.Type != credentialTableType {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: given table to persist has type %s but need type %s",
|
||||
table.Type,
|
||||
credentialTableType)
|
||||
c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", credentialTableType)
|
||||
return fmt.Errorf("invalid table type given, not persisting")
|
||||
}
|
||||
|
||||
for _, entry := range table.Entries {
|
||||
if entry.Table != table.Type {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: entry in auth table with path %s has table value %s but is in table %s, refusing to persist",
|
||||
entry.Path,
|
||||
entry.Table,
|
||||
table.Type)
|
||||
c.logger.Error("core: given entry to persist in auth table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
|
||||
return fmt.Errorf("invalid auth entry found, not persisting")
|
||||
}
|
||||
}
|
||||
|
@ -278,7 +274,7 @@ func (c *Core) persistAuth(table *MountTable) error {
|
|||
// Marshal the table
|
||||
raw, err := json.Marshal(table)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to encode auth table: %v", err)
|
||||
c.logger.Error("core: failed to encode auth table", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -290,7 +286,7 @@ func (c *Core) persistAuth(table *MountTable) error {
|
|||
|
||||
// Write to the physical backend
|
||||
if err := c.barrier.Put(entry); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to persist auth table: %v", err)
|
||||
c.logger.Error("core: failed to persist auth table", "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -320,9 +316,7 @@ func (c *Core) setupCredentials() error {
|
|||
// Initialize the backend
|
||||
backend, err = c.newCredentialBackend(entry.Type, c.mountEntrySysView(entry), view, nil)
|
||||
if err != nil {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: failed to create credential entry %s: %v",
|
||||
entry.Path, err)
|
||||
c.logger.Error("core: failed to create credential entry", "path", entry.Path, "error", err)
|
||||
return errLoadAuthFailed
|
||||
}
|
||||
|
||||
|
@ -330,7 +324,7 @@ func (c *Core) setupCredentials() error {
|
|||
path := credentialRoutePrefix + entry.Path
|
||||
err = c.router.Mount(backend, path, entry, view)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to mount auth entry %s: %v", entry.Path, err)
|
||||
c.logger.Error("core: failed to mount auth entry", "path", entry.Path, "error", err)
|
||||
return errLoadAuthFailed
|
||||
}
|
||||
|
||||
|
|
|
@ -3,19 +3,20 @@ package vault
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
)
|
||||
|
||||
// mockBarrier returns a physical backend, security barrier, and master key
|
||||
func mockBarrier(t *testing.T) (physical.Backend, SecurityBarrier, []byte) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -30,6 +31,7 @@ func mockBarrier(t *testing.T) (physical.Backend, SecurityBarrier, []byte) {
|
|||
}
|
||||
|
||||
func TestAESGCMBarrier_Basic(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -39,6 +41,7 @@ func TestAESGCMBarrier_Basic(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAESGCMBarrier_Rotate(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -48,6 +51,7 @@ func TestAESGCMBarrier_Rotate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAESGCMBarrier_Upgrade(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b1, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -61,6 +65,7 @@ func TestAESGCMBarrier_Upgrade(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAESGCMBarrier_Upgrade_Rekey(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b1, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -74,6 +79,7 @@ func TestAESGCMBarrier_Upgrade_Rekey(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAESGCMBarrier_Rekey(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -85,6 +91,7 @@ func TestAESGCMBarrier_Rekey(t *testing.T) {
|
|||
// Test an upgrade from the old (0.1) barrier/init to the new
|
||||
// core/keyring style
|
||||
func TestAESGCMBarrier_BackwardsCompatible(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -164,6 +171,7 @@ func TestAESGCMBarrier_BackwardsCompatible(t *testing.T) {
|
|||
|
||||
// Verify data sent through is encrypted
|
||||
func TestAESGCMBarrier_Confidential(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -201,6 +209,7 @@ func TestAESGCMBarrier_Confidential(t *testing.T) {
|
|||
|
||||
// Verify data sent through cannot be tampered with
|
||||
func TestAESGCMBarrier_Integrity(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -236,6 +245,7 @@ func TestAESGCMBarrier_Integrity(t *testing.T) {
|
|||
|
||||
// Verify data sent through cannot be moved
|
||||
func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -274,6 +284,7 @@ func TestAESGCMBarrier_MoveIntegrityV1(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -312,6 +323,7 @@ func TestAESGCMBarrier_MoveIntegrityV2(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestAESGCMBarrier_UpgradeV1toV2(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -364,6 +376,7 @@ func TestAESGCMBarrier_UpgradeV1toV2(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEncrypt_Unique(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
@ -391,6 +404,7 @@ func TestEncrypt_Unique(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInitialize_KeyLength(t *testing.T) {
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
b, err := NewAESGCMBarrier(inm)
|
||||
if err != nil {
|
||||
|
|
|
@ -10,13 +10,14 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
|
||||
"github.com/hashicorp/go-uuid"
|
||||
|
@ -95,19 +96,19 @@ func (c *Core) loadClusterTLS(adv activeAdvertisement) error {
|
|||
return nil
|
||||
|
||||
case adv.ClusterKeyParams == nil:
|
||||
c.logger.Printf("[ERR] core/loadClusterTLS: no key params found")
|
||||
c.logger.Error("core/loadClusterTLS: no key params found")
|
||||
return fmt.Errorf("no local cluster key params found")
|
||||
|
||||
case adv.ClusterKeyParams.X == nil, adv.ClusterKeyParams.Y == nil, adv.ClusterKeyParams.D == nil:
|
||||
c.logger.Printf("[ERR] core/loadClusterTLS: failed to parse local cluster key due to missing params")
|
||||
c.logger.Error("core/loadClusterTLS: failed to parse local cluster key due to missing params")
|
||||
return fmt.Errorf("failed to parse local cluster key")
|
||||
|
||||
case adv.ClusterKeyParams.Type != corePrivateKeyTypeP521:
|
||||
c.logger.Printf("[ERR] core/loadClusterTLS: unknown local cluster key type %v", adv.ClusterKeyParams.Type)
|
||||
c.logger.Error("core/loadClusterTLS: unknown local cluster key type", "key_type", adv.ClusterKeyParams.Type)
|
||||
return fmt.Errorf("failed to find valid local cluster key type")
|
||||
|
||||
case adv.ClusterCert == nil || len(adv.ClusterCert) == 0:
|
||||
c.logger.Printf("[ERR] core/loadClusterTLS: no local cluster cert found")
|
||||
c.logger.Error("core/loadClusterTLS: no local cluster cert found")
|
||||
return fmt.Errorf("no local cluster cert found")
|
||||
|
||||
}
|
||||
|
@ -129,7 +130,7 @@ func (c *Core) loadClusterTLS(adv activeAdvertisement) error {
|
|||
|
||||
cert, err := x509.ParseCertificate(c.localClusterCert)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/loadClusterTLS: failed parsing local cluster certificate: %v", err)
|
||||
c.logger.Error("core/loadClusterTLS: failed parsing local cluster certificate", "error", err)
|
||||
return fmt.Errorf("error parsing local cluster certificate: %v", err)
|
||||
}
|
||||
|
||||
|
@ -145,7 +146,7 @@ func (c *Core) setupCluster() error {
|
|||
// Check if storage index is already present or not
|
||||
cluster, err := c.Cluster()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to get cluster details: %v", err)
|
||||
c.logger.Error("core: failed to get cluster details", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -158,10 +159,10 @@ func (c *Core) setupCluster() error {
|
|||
if cluster.Name == "" {
|
||||
// If cluster name is not supplied, generate one
|
||||
if c.clusterName == "" {
|
||||
c.logger.Printf("[TRACE] core: cluster name not found/set, generating new")
|
||||
c.logger.Trace("core: cluster name not found/set, generating new")
|
||||
clusterNameBytes, err := uuid.GenerateRandomBytes(4)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate cluster name: %v", err)
|
||||
c.logger.Error("core: failed to generate cluster name", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -169,19 +170,23 @@ func (c *Core) setupCluster() error {
|
|||
}
|
||||
|
||||
cluster.Name = c.clusterName
|
||||
c.logger.Printf("[DEBUG] core: cluster name set to %s", cluster.Name)
|
||||
if c.logger.IsDebug() {
|
||||
c.logger.Debug("core: cluster name set", "name", cluster.Name)
|
||||
}
|
||||
modified = true
|
||||
}
|
||||
|
||||
if cluster.ID == "" {
|
||||
c.logger.Printf("[TRACE] core: cluster ID not found, generating new")
|
||||
c.logger.Trace("core: cluster ID not found, generating new")
|
||||
// Generate a clusterID
|
||||
cluster.ID, err = uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate cluster identifier: %v", err)
|
||||
c.logger.Error("core: failed to generate cluster identifier", "error", err)
|
||||
return err
|
||||
}
|
||||
c.logger.Printf("[DEBUG] core: cluster ID set to %s", cluster.ID)
|
||||
if c.logger.IsDebug() {
|
||||
c.logger.Debug("core: cluster ID set", "id", cluster.ID)
|
||||
}
|
||||
modified = true
|
||||
}
|
||||
|
||||
|
@ -193,10 +198,10 @@ func (c *Core) setupCluster() error {
|
|||
|
||||
// Create a private key
|
||||
{
|
||||
c.logger.Printf("[TRACE] core: generating cluster private key")
|
||||
c.logger.Trace("core: generating cluster private key")
|
||||
key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate local cluster key: %v", err)
|
||||
c.logger.Error("core: failed to generate local cluster key", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -205,7 +210,7 @@ func (c *Core) setupCluster() error {
|
|||
|
||||
// Create a certificate
|
||||
{
|
||||
c.logger.Printf("[TRACE] core: generating local cluster certificate")
|
||||
c.logger.Trace("core: generating local cluster certificate")
|
||||
|
||||
host, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
|
@ -232,13 +237,13 @@ func (c *Core) setupCluster() error {
|
|||
|
||||
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, c.localClusterPrivateKey.Public(), c.localClusterPrivateKey)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: error generating self-signed cert: %v", err)
|
||||
c.logger.Error("core: error generating self-signed cert", "error", err)
|
||||
return fmt.Errorf("unable to generate local cluster certificate: %v", err)
|
||||
}
|
||||
|
||||
_, err = x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: error parsing self-signed cert: %v", err)
|
||||
c.logger.Error("core: error parsing self-signed cert", "error", err)
|
||||
return fmt.Errorf("error parsing generated certificate: %v", err)
|
||||
}
|
||||
|
||||
|
@ -250,7 +255,7 @@ func (c *Core) setupCluster() error {
|
|||
// Encode the cluster information into as a JSON string
|
||||
rawCluster, err := json.Marshal(cluster)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to encode cluster details: %v", err)
|
||||
c.logger.Error("core: failed to encode cluster details", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -260,7 +265,7 @@ func (c *Core) setupCluster() error {
|
|||
Value: rawCluster,
|
||||
})
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to store cluster details: %v", err)
|
||||
c.logger.Error("core: failed to store cluster details", "error", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -279,21 +284,21 @@ func (c *Core) SetClusterSetupFuncs(handler func() (http.Handler, http.Handler))
|
|||
// be built in the same mechanism or started independently.
|
||||
func (c *Core) startClusterListener() error {
|
||||
if c.clusterHandlerSetupFunc == nil {
|
||||
c.logger.Printf("[ERR] core/startClusterListener: cluster handler setup function has not been set")
|
||||
c.logger.Error("core/startClusterListener: cluster handler setup function has not been set")
|
||||
return fmt.Errorf("cluster handler setup function has not been set")
|
||||
}
|
||||
|
||||
if c.clusterAddr == "" {
|
||||
c.logger.Printf("[INFO] core/startClusterListener: clustering disabled, not starting listeners")
|
||||
c.logger.Info("core/startClusterListener: clustering disabled, not starting listeners")
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.clusterListenerAddrs == nil || len(c.clusterListenerAddrs) == 0 {
|
||||
c.logger.Printf("[WARN] core/startClusterListener: clustering not disabled but no addresses to listen on")
|
||||
c.logger.Warn("core/startClusterListener: clustering not disabled but no addresses to listen on")
|
||||
return fmt.Errorf("cluster addresses not found")
|
||||
}
|
||||
|
||||
c.logger.Printf("[TRACE] core/startClusterListener: starting listeners")
|
||||
c.logger.Trace("core/startClusterListener: starting listeners")
|
||||
|
||||
err := c.startForwarding()
|
||||
if err != nil {
|
||||
|
@ -307,11 +312,11 @@ func (c *Core) startClusterListener() error {
|
|||
// assumed that the state lock is held while this is run.
|
||||
func (c *Core) stopClusterListener() {
|
||||
if c.clusterAddr == "" {
|
||||
c.logger.Printf("[TRACE] core/stopClusterListener: clustering disabled, nothing to do")
|
||||
c.logger.Trace("core/stopClusterListener: clustering disabled, nothing to do")
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Printf("[INFO] core/stopClusterListener: stopping listeners")
|
||||
c.logger.Info("core/stopClusterListener: stopping listeners")
|
||||
|
||||
// Tell the goroutine managing the listeners to perform the shutdown
|
||||
// process
|
||||
|
@ -320,9 +325,9 @@ func (c *Core) stopClusterListener() {
|
|||
// The reason for this loop-de-loop is that we may be unsealing again
|
||||
// quickly, and if the listeners are not yet closed, we will get socket
|
||||
// bind errors. This ensures proper ordering.
|
||||
c.logger.Printf("[TRACE] core/stopClusterListener: waiting for success notification")
|
||||
c.logger.Trace("core/stopClusterListener: waiting for success notification")
|
||||
<-c.clusterListenerShutdownSuccessCh
|
||||
c.logger.Printf("[INFO] core/stopClusterListener: success")
|
||||
c.logger.Info("core/stopClusterListener: success")
|
||||
}
|
||||
|
||||
// ClusterTLSConfig generates a TLS configuration based on the local cluster
|
||||
|
@ -375,7 +380,7 @@ func (c *Core) SetClusterListenerAddrs(addrs []*net.TCPAddr) {
|
|||
// WrapHandlerForClustering takes in Vault's HTTP handler and returns a setup
|
||||
// function that returns both the original handler and one wrapped with cluster
|
||||
// methods
|
||||
func WrapHandlerForClustering(handler http.Handler, logger *log.Logger) func() (http.Handler, http.Handler) {
|
||||
func WrapHandlerForClustering(handler http.Handler, logger log.Logger) func() (http.Handler, http.Handler) {
|
||||
return func() (http.Handler, http.Handler) {
|
||||
// This mux handles cluster functions (right now, only forwarded requests)
|
||||
mux := http.NewServeMux()
|
||||
|
@ -383,7 +388,7 @@ func WrapHandlerForClustering(handler http.Handler, logger *log.Logger) func() (
|
|||
freq, err := forwarding.ParseForwardedHTTPRequest(req)
|
||||
if err != nil {
|
||||
if logger != nil {
|
||||
logger.Printf("[ERR] http/forwarded-request-server: error parsing forwarded request: %v", err)
|
||||
logger.Error("http/forwarded-request-server: error parsing forwarded request", "error", err)
|
||||
}
|
||||
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
|
@ -415,7 +420,7 @@ func WrapHandlerForClustering(handler http.Handler, logger *log.Logger) func() (
|
|||
|
||||
// WrapListenersForClustering takes in Vault's cluster addresses and returns a
|
||||
// setup function that creates the new listeners
|
||||
func WrapListenersForClustering(addrs []string, logger *log.Logger) func() ([]net.Listener, error) {
|
||||
func WrapListenersForClustering(addrs []string, logger log.Logger) func() ([]net.Listener, error) {
|
||||
return func() ([]net.Listener, error) {
|
||||
ret := make([]net.Listener, 0, len(addrs))
|
||||
// Loop over the existing listeners and start listeners on appropriate ports
|
||||
|
|
|
@ -4,15 +4,15 @@ import (
|
|||
"bytes"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
func TestClusterFetching(t *testing.T) {
|
||||
|
@ -34,7 +34,8 @@ func TestClusterFetching(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClusterHAFetching(t *testing.T) {
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
redirect := "http://127.0.0.1:8200"
|
||||
|
||||
c, err := NewCore(&CoreConfig{
|
||||
|
|
134
vault/core.go
134
vault/core.go
|
@ -7,14 +7,14 @@ import (
|
|||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/hashicorp/vault/audit"
|
||||
"github.com/hashicorp/vault/helper/errutil"
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/helper/mlock"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
|
@ -235,7 +236,7 @@ type Core struct {
|
|||
defaultLeaseTTL time.Duration
|
||||
maxLeaseTTL time.Duration
|
||||
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
|
||||
// cachingDisabled indicates whether caches are disabled
|
||||
cachingDisabled bool
|
||||
|
@ -298,7 +299,7 @@ type CoreConfig struct {
|
|||
|
||||
Seal Seal `json:"seal" structs:"seal" mapstructure:"seal"`
|
||||
|
||||
Logger *log.Logger `json:"logger" structs:"logger" mapstructure:"logger"`
|
||||
Logger log.Logger `json:"logger" structs:"logger" mapstructure:"logger"`
|
||||
|
||||
// Disables the LRU cache on the physical backend
|
||||
DisableCache bool `json:"disable_cache" structs:"disable_cache" mapstructure:"disable_cache"`
|
||||
|
@ -386,7 +387,7 @@ func NewCore(conf *CoreConfig) (*Core, error) {
|
|||
|
||||
// Make a default logger if not provided
|
||||
if conf.Logger == nil {
|
||||
conf.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
conf.Logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
}
|
||||
|
||||
// Setup the core
|
||||
|
@ -479,14 +480,14 @@ func (c *Core) fetchACLandTokenEntry(req *logical.Request) (*ACL, *TokenEntry, e
|
|||
}
|
||||
|
||||
if c.tokenStore == nil {
|
||||
c.logger.Printf("[ERR] core: token store is unavailable")
|
||||
c.logger.Error("core: token store is unavailable")
|
||||
return nil, nil, ErrInternalError
|
||||
}
|
||||
|
||||
// Resolve the token policy
|
||||
te, err := c.tokenStore.Lookup(req.ClientToken)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to lookup token: %v", err)
|
||||
c.logger.Error("core: failed to lookup token", "error", err)
|
||||
return nil, nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -498,7 +499,7 @@ func (c *Core) fetchACLandTokenEntry(req *logical.Request) (*ACL, *TokenEntry, e
|
|||
// Construct the corresponding ACL object
|
||||
acl, err := c.policyStore.ACL(te.Policies...)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to construct ACL: %v", err)
|
||||
c.logger.Error("core: failed to construct ACL", "error", err)
|
||||
return nil, nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -530,7 +531,7 @@ func (c *Core) checkToken(req *logical.Request) (*logical.Auth, *TokenEntry, err
|
|||
case nil:
|
||||
// Continue on
|
||||
default:
|
||||
c.logger.Printf("[ERR] core: failed to run existence check: %v", err)
|
||||
c.logger.Error("core: failed to run existence check", "error", err)
|
||||
if _, ok := err.(errutil.UserError); ok {
|
||||
return nil, nil, err
|
||||
} else {
|
||||
|
@ -747,8 +748,9 @@ func (c *Core) Unseal(key []byte) (bool, error) {
|
|||
|
||||
// Check if we don't have enough keys to unlock
|
||||
if len(c.unlockParts) < config.SecretThreshold {
|
||||
c.logger.Printf("[DEBUG] core: cannot unseal, have %d of %d keys",
|
||||
len(c.unlockParts), config.SecretThreshold)
|
||||
if c.logger.IsDebug() {
|
||||
c.logger.Debug("core: cannot unseal, not enough keys", "keys", len(c.unlockParts), "threshold", config.SecretThreshold)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -770,22 +772,24 @@ func (c *Core) Unseal(key []byte) (bool, error) {
|
|||
if err := c.barrier.Unseal(masterKey); err != nil {
|
||||
return false, err
|
||||
}
|
||||
c.logger.Printf("[INFO] core: vault is unsealed")
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: vault is unsealed")
|
||||
}
|
||||
|
||||
// Do post-unseal setup if HA is not enabled
|
||||
if c.ha == nil {
|
||||
// We still need to set up cluster info even if it's not part of a
|
||||
// cluster right now
|
||||
if err := c.setupCluster(); err != nil {
|
||||
c.logger.Printf("[ERR] core: cluster setup failed: %v", err)
|
||||
c.logger.Error("core: cluster setup failed", "error", err)
|
||||
c.barrier.Seal()
|
||||
c.logger.Printf("[WARN] core: vault is sealed")
|
||||
c.logger.Warn("core: vault is sealed")
|
||||
return false, err
|
||||
}
|
||||
if err := c.postUnseal(); err != nil {
|
||||
c.logger.Printf("[ERR] core: post-unseal setup failed: %v", err)
|
||||
c.logger.Error("core: post-unseal setup failed", "error", err)
|
||||
c.barrier.Seal()
|
||||
c.logger.Printf("[WARN] core: vault is sealed")
|
||||
c.logger.Warn("core: vault is sealed")
|
||||
return false, err
|
||||
}
|
||||
c.standby = false
|
||||
|
@ -803,7 +807,9 @@ func (c *Core) Unseal(key []byte) (bool, error) {
|
|||
sd, ok := c.ha.(physical.ServiceDiscovery)
|
||||
if ok {
|
||||
if err := sd.NotifySealedStateChange(); err != nil {
|
||||
c.logger.Printf("[WARN] core: failed to notify unsealed status: %v", err)
|
||||
if c.logger.IsWarn() {
|
||||
c.logger.Warn("core: failed to notify unsealed status", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -866,7 +872,7 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
|
|||
// just returning with an error and recommending a vault restart, which
|
||||
// essentially does the same thing.
|
||||
if c.standby {
|
||||
c.logger.Printf("[ERR] core: vault cannot seal when in standby mode; please restart instead")
|
||||
c.logger.Error("core: vault cannot seal when in standby mode; please restart instead")
|
||||
retErr = multierror.Append(retErr, errors.New("vault cannot seal when in standby mode; please restart instead"))
|
||||
return retErr
|
||||
}
|
||||
|
@ -883,8 +889,7 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
|
|||
}
|
||||
|
||||
if err := c.auditBroker.LogRequest(auth, req, nil); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to audit request with path %s: %v",
|
||||
req.Path, err)
|
||||
c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
|
||||
retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
|
||||
return retErr
|
||||
}
|
||||
|
@ -894,7 +899,7 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
|
|||
if te != nil {
|
||||
te, err = c.tokenStore.UseToken(te)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to use token: %v", err)
|
||||
c.logger.Error("core: failed to use token", "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return retErr
|
||||
}
|
||||
|
@ -908,7 +913,7 @@ func (c *Core) sealInitCommon(req *logical.Request) (retErr error) {
|
|||
defer func(id string) {
|
||||
err = c.tokenStore.Revoke(id)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: token needed revocation after seal but failed to revoke: %v", err)
|
||||
c.logger.Error("core: token needed revocation after seal but failed to revoke", "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
}
|
||||
}(te.ID)
|
||||
|
@ -970,8 +975,7 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) {
|
|||
}
|
||||
|
||||
if err := c.auditBroker.LogRequest(auth, req, nil); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to audit request with path %s: %v",
|
||||
req.Path, err)
|
||||
c.logger.Error("core: failed to audit request", "request_path", req.Path, "error", err)
|
||||
retErr = multierror.Append(retErr, errors.New("failed to audit request, cannot continue"))
|
||||
return retErr
|
||||
}
|
||||
|
@ -980,7 +984,7 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) {
|
|||
if te != nil {
|
||||
te, err = c.tokenStore.UseToken(te)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to use token: %v", err)
|
||||
c.logger.Error("core: failed to use token", "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return retErr
|
||||
}
|
||||
|
@ -994,7 +998,7 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) {
|
|||
defer func(id string) {
|
||||
err = c.tokenStore.Revoke(id)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: token needed revocation after step-down but failed to revoke: %v", err)
|
||||
c.logger.Error("core: token needed revocation after step-down but failed to revoke", "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
}
|
||||
}(te.ID)
|
||||
|
@ -1017,7 +1021,7 @@ func (c *Core) StepDown(req *logical.Request) (retErr error) {
|
|||
select {
|
||||
case c.manualStepDownCh <- struct{}{}:
|
||||
default:
|
||||
c.logger.Printf("[WARN] core: manual step-down operation already queued")
|
||||
c.logger.Warn("core: manual step-down operation already queued")
|
||||
}
|
||||
|
||||
return retErr
|
||||
|
@ -1032,7 +1036,7 @@ func (c *Core) sealInternal() error {
|
|||
// Do pre-seal teardown if HA is not enabled
|
||||
if c.ha == nil {
|
||||
if err := c.preSeal(); err != nil {
|
||||
c.logger.Printf("[ERR] core: pre-seal teardown failed: %v", err)
|
||||
c.logger.Error("core: pre-seal teardown failed", "error", err)
|
||||
return fmt.Errorf("internal error")
|
||||
}
|
||||
} else {
|
||||
|
@ -1048,13 +1052,15 @@ func (c *Core) sealInternal() error {
|
|||
if err := c.barrier.Seal(); err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Printf("[INFO] core: vault is sealed")
|
||||
c.logger.Info("core: vault is sealed")
|
||||
|
||||
if c.ha != nil {
|
||||
sd, ok := c.ha.(physical.ServiceDiscovery)
|
||||
if ok {
|
||||
if err := sd.NotifySealedStateChange(); err != nil {
|
||||
c.logger.Printf("[WARN] core: failed to notify sealed status: %v", err)
|
||||
if c.logger.IsWarn() {
|
||||
c.logger.Warn("core: failed to notify sealed status", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1073,7 +1079,7 @@ func (c *Core) postUnseal() (retErr error) {
|
|||
c.preSeal()
|
||||
}
|
||||
}()
|
||||
c.logger.Printf("[INFO] core: post-unseal setup starting")
|
||||
c.logger.Info("core: post-unseal setup starting")
|
||||
if cache, ok := c.physical.(*physical.Cache); ok {
|
||||
cache.Purge()
|
||||
}
|
||||
|
@ -1126,7 +1132,7 @@ func (c *Core) postUnseal() (retErr error) {
|
|||
}
|
||||
c.metricsCh = make(chan struct{})
|
||||
go c.emitMetrics(c.metricsCh)
|
||||
c.logger.Printf("[INFO] core: post-unseal setup complete")
|
||||
c.logger.Info("core: post-unseal setup complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1134,7 +1140,7 @@ func (c *Core) postUnseal() (retErr error) {
|
|||
// for any state teardown required.
|
||||
func (c *Core) preSeal() error {
|
||||
defer metrics.MeasureSince([]string{"core", "pre_seal"}, time.Now())
|
||||
c.logger.Printf("[INFO] core: pre-seal teardown starting")
|
||||
c.logger.Info("core: pre-seal teardown starting")
|
||||
|
||||
// Clear any rekey progress
|
||||
c.barrierRekeyConfig = nil
|
||||
|
@ -1152,27 +1158,27 @@ func (c *Core) preSeal() error {
|
|||
}
|
||||
|
||||
if err := c.teardownAudits(); err != nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("[ERR] error tearing down audits: {{err}}", err))
|
||||
result = multierror.Append(result, errwrap.Wrapf("error tearing down audits: {{err}}", err))
|
||||
}
|
||||
if err := c.stopExpiration(); err != nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("[ERR] error stopping expiration: {{err}}", err))
|
||||
result = multierror.Append(result, errwrap.Wrapf("error stopping expiration: {{err}}", err))
|
||||
}
|
||||
if err := c.teardownCredentials(); err != nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("[ERR] error tearing down credentials: {{err}}", err))
|
||||
result = multierror.Append(result, errwrap.Wrapf("error tearing down credentials: {{err}}", err))
|
||||
}
|
||||
if err := c.teardownPolicyStore(); err != nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("[ERR] error tearing down policy store: {{err}}", err))
|
||||
result = multierror.Append(result, errwrap.Wrapf("error tearing down policy store: {{err}}", err))
|
||||
}
|
||||
if err := c.stopRollback(); err != nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("[ERR] error stopping rollback: {{err}}", err))
|
||||
result = multierror.Append(result, errwrap.Wrapf("error stopping rollback: {{err}}", err))
|
||||
}
|
||||
if err := c.unloadMounts(); err != nil {
|
||||
result = multierror.Append(result, errwrap.Wrapf("[ERR] error unloading mounts: {{err}}", err))
|
||||
result = multierror.Append(result, errwrap.Wrapf("error unloading mounts: {{err}}", err))
|
||||
}
|
||||
if cache, ok := c.physical.(*physical.Cache); ok {
|
||||
cache.Purge()
|
||||
}
|
||||
c.logger.Printf("[INFO] core: pre-seal teardown complete")
|
||||
c.logger.Info("core: pre-seal teardown complete")
|
||||
return result
|
||||
}
|
||||
|
||||
|
@ -1182,7 +1188,7 @@ func (c *Core) preSeal() error {
|
|||
func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
||||
defer close(doneCh)
|
||||
defer close(manualStepDownCh)
|
||||
c.logger.Printf("[INFO] core: entering standby mode")
|
||||
c.logger.Info("core: entering standby mode")
|
||||
|
||||
// Monitor for key rotation
|
||||
keyRotateDone := make(chan struct{})
|
||||
|
@ -1204,12 +1210,12 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
|||
// Create a lock
|
||||
uuid, err := uuid.GenerateUUID()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate uuid: %v", err)
|
||||
c.logger.Error("core: failed to generate uuid", "error", err)
|
||||
return
|
||||
}
|
||||
lock, err := c.ha.LockWith(coreLockPath, uuid)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to create lock: %v", err)
|
||||
c.logger.Error("core: failed to create lock", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1220,7 +1226,7 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
|||
if leaderLostCh == nil {
|
||||
return
|
||||
}
|
||||
c.logger.Printf("[INFO] core: acquired lock, enabling active operation")
|
||||
c.logger.Info("core: acquired lock, enabling active operation")
|
||||
|
||||
// This is used later to log a metrics event; this can be helpful to
|
||||
// detect flapping
|
||||
|
@ -1231,7 +1237,7 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
|||
c.stateLock.Lock()
|
||||
if err := c.setupCluster(); err != nil {
|
||||
c.stateLock.Unlock()
|
||||
c.logger.Printf("[ERR] core: cluster setup failed: %v", err)
|
||||
c.logger.Error("core: cluster setup failed", "error", err)
|
||||
lock.Unlock()
|
||||
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
||||
continue
|
||||
|
@ -1240,7 +1246,7 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
|||
// Advertise as leader
|
||||
if err := c.advertiseLeader(uuid, leaderLostCh); err != nil {
|
||||
c.stateLock.Unlock()
|
||||
c.logger.Printf("[ERR] core: leader advertisement setup failed: %v", err)
|
||||
c.logger.Error("core: leader advertisement setup failed", "error", err)
|
||||
lock.Unlock()
|
||||
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
||||
continue
|
||||
|
@ -1255,7 +1261,7 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
|||
|
||||
// Handle a failure to unseal
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: post-unseal setup failed: %v", err)
|
||||
c.logger.Error("core: post-unseal setup failed", "error", err)
|
||||
lock.Unlock()
|
||||
metrics.MeasureSince([]string{"core", "leadership_setup_failed"}, activeTime)
|
||||
continue
|
||||
|
@ -1265,11 +1271,11 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
|||
var manualStepDown bool
|
||||
select {
|
||||
case <-leaderLostCh:
|
||||
c.logger.Printf("[WARN] core: leadership lost, stopping active operation")
|
||||
c.logger.Warn("core: leadership lost, stopping active operation")
|
||||
case <-stopCh:
|
||||
c.logger.Printf("[WARN] core: stopping active operation")
|
||||
c.logger.Warn("core: stopping active operation")
|
||||
case <-manualStepDownCh:
|
||||
c.logger.Printf("[WARN] core: stepping down from active operation to standby")
|
||||
c.logger.Warn("core: stepping down from active operation to standby")
|
||||
manualStepDown = true
|
||||
}
|
||||
|
||||
|
@ -1277,7 +1283,7 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
|||
|
||||
// Clear ourself as leader
|
||||
if err := c.clearLeader(uuid); err != nil {
|
||||
c.logger.Printf("[ERR] core: clearing leader advertisement failed: %v", err)
|
||||
c.logger.Error("core: clearing leader advertisement failed", "error", err)
|
||||
}
|
||||
|
||||
// Attempt the pre-seal process
|
||||
|
@ -1291,7 +1297,7 @@ func (c *Core) runStandby(doneCh, stopCh, manualStepDownCh chan struct{}) {
|
|||
|
||||
// Check for a failure to prepare to seal
|
||||
if preSealErr != nil {
|
||||
c.logger.Printf("[ERR] core: pre-seal teardown failed: %v", err)
|
||||
c.logger.Error("core: pre-seal teardown failed", "error", err)
|
||||
}
|
||||
|
||||
// If we've merely stepped down, we could instantly grab the lock
|
||||
|
@ -1317,7 +1323,7 @@ func (c *Core) periodicCheckKeyUpgrade(doneCh, stopCh chan struct{}) {
|
|||
}
|
||||
|
||||
if err := c.checkKeyUpgrades(); err != nil {
|
||||
c.logger.Printf("[ERR] core: key rotation periodic upgrade check failed: %v", err)
|
||||
c.logger.Error("core: key rotation periodic upgrade check failed", "error", err)
|
||||
}
|
||||
case <-stopCh:
|
||||
return
|
||||
|
@ -1339,7 +1345,9 @@ func (c *Core) checkKeyUpgrades() error {
|
|||
if !didUpgrade {
|
||||
break
|
||||
}
|
||||
c.logger.Printf("[INFO] core: upgraded to key term %d", newTerm)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: upgraded to new key term", "term", newTerm)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1363,7 +1371,7 @@ func (c *Core) scheduleUpgradeCleanup() error {
|
|||
for _, upgrade := range upgrades {
|
||||
path := fmt.Sprintf("%s%s", keyringUpgradePrefix, upgrade)
|
||||
if err := c.barrier.Delete(path); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to cleanup upgrade: %s", path)
|
||||
c.logger.Error("core: failed to cleanup upgrade", "path", path, "error", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -1380,7 +1388,7 @@ func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan st
|
|||
}
|
||||
|
||||
// Retry the acquisition
|
||||
c.logger.Printf("[ERR] core: failed to acquire lock: %v", err)
|
||||
c.logger.Error("core: failed to acquire lock", "error", err)
|
||||
select {
|
||||
case <-time.After(lockRetryInterval):
|
||||
case <-stopCh:
|
||||
|
@ -1398,7 +1406,7 @@ func (c *Core) advertiseLeader(uuid string, leaderLostCh <-chan struct{}) error
|
|||
case *ecdsa.PrivateKey:
|
||||
key = c.localClusterPrivateKey.(*ecdsa.PrivateKey)
|
||||
default:
|
||||
c.logger.Printf("[ERR] core: unknown cluster private key type %T", c.localClusterPrivateKey)
|
||||
c.logger.Error("core: unknown cluster private key type", "key_type", fmt.Sprintf("%T", c.localClusterPrivateKey))
|
||||
return fmt.Errorf("unknown cluster private key type %T", c.localClusterPrivateKey)
|
||||
}
|
||||
|
||||
|
@ -1431,7 +1439,9 @@ func (c *Core) advertiseLeader(uuid string, leaderLostCh <-chan struct{}) error
|
|||
sd, ok := c.ha.(physical.ServiceDiscovery)
|
||||
if ok {
|
||||
if err := sd.NotifyActiveStateChange(); err != nil {
|
||||
c.logger.Printf("[WARN] core: failed to notify active status: %v", err)
|
||||
if c.logger.IsWarn() {
|
||||
c.logger.Warn("core: failed to notify active status", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -1440,7 +1450,7 @@ func (c *Core) advertiseLeader(uuid string, leaderLostCh <-chan struct{}) error
|
|||
func (c *Core) cleanLeaderPrefix(uuid string, leaderLostCh <-chan struct{}) {
|
||||
keys, err := c.barrier.List(coreLeaderPrefix)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to list entries in core/leader: %v", err)
|
||||
c.logger.Error("core: failed to list entries in core/leader", "error", err)
|
||||
return
|
||||
}
|
||||
for len(keys) > 0 {
|
||||
|
@ -1465,7 +1475,9 @@ func (c *Core) clearLeader(uuid string) error {
|
|||
sd, ok := c.ha.(physical.ServiceDiscovery)
|
||||
if ok {
|
||||
if err := sd.NotifyActiveStateChange(); err != nil {
|
||||
c.logger.Printf("[WARN] core: failed to notify standby status: %v", err)
|
||||
if c.logger.IsWarn() {
|
||||
c.logger.Warn("core: failed to notify standby status", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1494,7 +1506,7 @@ func (c *Core) SealAccess() *SealAccess {
|
|||
return sa
|
||||
}
|
||||
|
||||
func (c *Core) Logger() *log.Logger {
|
||||
func (c *Core) Logger() log.Logger {
|
||||
return c.logger
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
package vault
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -10,8 +8,10 @@ import (
|
|||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/audit"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -20,7 +20,8 @@ var (
|
|||
)
|
||||
|
||||
func TestNewCore_badRedirectAddr(t *testing.T) {
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
conf := &CoreConfig{
|
||||
RedirectAddr: "127.0.0.1:8200",
|
||||
Physical: physical.NewInmem(logger),
|
||||
|
@ -955,7 +956,8 @@ func TestCore_LimitedUseToken(t *testing.T) {
|
|||
|
||||
func TestCore_Standby_Seal(t *testing.T) {
|
||||
// Create the first core and initialize it
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
inmha := physical.NewInmemHA(logger)
|
||||
redirectOriginal := "http://127.0.0.1:8200"
|
||||
|
@ -1061,7 +1063,8 @@ func TestCore_Standby_Seal(t *testing.T) {
|
|||
|
||||
func TestCore_StepDown(t *testing.T) {
|
||||
// Create the first core and initialize it
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
inmha := physical.NewInmemHA(logger)
|
||||
redirectOriginal := "http://127.0.0.1:8200"
|
||||
|
@ -1247,7 +1250,8 @@ func TestCore_StepDown(t *testing.T) {
|
|||
|
||||
func TestCore_CleanLeaderPrefix(t *testing.T) {
|
||||
// Create the first core and initialize it
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
inmha := physical.NewInmemHA(logger)
|
||||
redirectOriginal := "http://127.0.0.1:8200"
|
||||
|
@ -1404,13 +1408,15 @@ func TestCore_CleanLeaderPrefix(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCore_Standby(t *testing.T) {
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inmha := physical.NewInmemHA(logger)
|
||||
testCore_Standby_Common(t, inmha, inmha)
|
||||
}
|
||||
|
||||
func TestCore_Standby_SeparateHA(t *testing.T) {
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
testCore_Standby_Common(t, physical.NewInmemHA(logger), physical.NewInmemHA(logger))
|
||||
}
|
||||
|
||||
|
@ -1950,7 +1956,8 @@ func TestCore_HandleRequest_MountPoint(t *testing.T) {
|
|||
|
||||
func TestCore_Standby_Rotate(t *testing.T) {
|
||||
// Create the first core and initialize it
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
inmha := physical.NewInmemHA(logger)
|
||||
redirectOriginal := "http://127.0.0.1:8200"
|
||||
|
|
|
@ -25,20 +25,20 @@ func (d dynamicSystemView) SudoPrivilege(path string, token string) bool {
|
|||
// Resolve the token policy
|
||||
te, err := d.core.tokenStore.Lookup(token)
|
||||
if err != nil {
|
||||
d.core.logger.Printf("[ERR] core: failed to lookup token: %v", err)
|
||||
d.core.logger.Error("core: failed to lookup token", "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Ensure the token is valid
|
||||
if te == nil {
|
||||
d.core.logger.Printf("[ERR] entry not found for token: %s", token)
|
||||
d.core.logger.Error("entry not found for given token")
|
||||
return false
|
||||
}
|
||||
|
||||
// Construct the corresponding ACL object
|
||||
acl, err := d.core.policyStore.ACL(te.Policies...)
|
||||
if err != nil {
|
||||
d.core.logger.Printf("[ERR] failed to retrieve ACL for policies [%#v]: %s", te.Policies, err)
|
||||
d.core.logger.Error("failed to retrieve ACL for token's policies", "token_policies", te.Policies, "error", err)
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -3,13 +3,13 @@ package vault
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/helper/jsonutil"
|
||||
|
@ -52,7 +52,7 @@ type ExpirationManager struct {
|
|||
idView *BarrierView
|
||||
tokenView *BarrierView
|
||||
tokenStore *TokenStore
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
|
||||
pending map[string]*time.Timer
|
||||
pendingLock sync.Mutex
|
||||
|
@ -60,9 +60,10 @@ type ExpirationManager struct {
|
|||
|
||||
// NewExpirationManager creates a new ExpirationManager that is backed
|
||||
// using a given view, and uses the provided router for revocation.
|
||||
func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, logger *log.Logger) *ExpirationManager {
|
||||
func NewExpirationManager(router *Router, view *BarrierView, ts *TokenStore, logger log.Logger) *ExpirationManager {
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger = log.New("expiration_manager")
|
||||
|
||||
}
|
||||
exp := &ExpirationManager{
|
||||
router: router,
|
||||
|
@ -153,7 +154,9 @@ func (m *ExpirationManager) Restore() error {
|
|||
})
|
||||
}
|
||||
if len(m.pending) > 0 {
|
||||
m.logger.Printf("[INFO] expire: restored %d leases", len(m.pending))
|
||||
if m.logger.IsInfo() {
|
||||
m.logger.Info("expire: leases restored", "restored_lease_count", len(m.pending))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -199,7 +202,9 @@ func (m *ExpirationManager) revokeCommon(leaseID string, force, skipToken bool)
|
|||
if !force {
|
||||
return err
|
||||
} else {
|
||||
m.logger.Printf("[WARN]: revocation from the backend failed, but in force mode so ignoring; error was: %s", err)
|
||||
if m.logger.IsWarn() {
|
||||
m.logger.Warn("revocation from the backend failed, but in force mode so ignoring", "error", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -565,13 +570,15 @@ func (m *ExpirationManager) expireID(leaseID string) {
|
|||
for attempt := uint(0); attempt < maxRevokeAttempts; attempt++ {
|
||||
err := m.Revoke(leaseID)
|
||||
if err == nil {
|
||||
m.logger.Printf("[INFO] expire: revoked '%s'", leaseID)
|
||||
if m.logger.IsInfo() {
|
||||
m.logger.Info("expire: revoked lease", "lease_id", leaseID)
|
||||
}
|
||||
return
|
||||
}
|
||||
m.logger.Printf("[ERR] expire: failed to revoke '%s': %v", leaseID, err)
|
||||
m.logger.Error("expire: failed to revoke lease", "lease_id", leaseID, "error", err)
|
||||
time.Sleep((1 << attempt) * revokeRetryBase)
|
||||
}
|
||||
m.logger.Printf("[ERR] expire: maximum revoke attempts for '%s' reached", leaseID)
|
||||
m.logger.Error("expire: maximum revoke attempts reached", "lease_id", leaseID)
|
||||
}
|
||||
|
||||
// revokeEntry is used to attempt revocation of an internal entry
|
||||
|
|
|
@ -128,8 +128,9 @@ func (c *Core) GenerateRootInit(otp, pgpKey string) error {
|
|||
PGPFingerprint: fingerprint,
|
||||
}
|
||||
|
||||
c.logger.Printf("[INFO] core: root generation initialized (nonce: %s)",
|
||||
c.generateRootConfig.Nonce)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: root generation initialized", "nonce", c.generateRootConfig.Nonce)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -200,8 +201,9 @@ func (c *Core) GenerateRootUpdate(key []byte, nonce string) (*GenerateRootResult
|
|||
|
||||
// Check if we don't have enough keys to unlock
|
||||
if len(c.generateRootProgress) < config.SecretThreshold {
|
||||
c.logger.Printf("[DEBUG] core: cannot generate root, have %d of %d keys",
|
||||
progress, config.SecretThreshold)
|
||||
if c.logger.IsDebug() {
|
||||
c.logger.Debug("core: cannot generate root, not enough keys", "keys", progress, "threshold", config.SecretThreshold)
|
||||
}
|
||||
return &GenerateRootResult{
|
||||
Progress: progress,
|
||||
Required: config.SecretThreshold,
|
||||
|
@ -225,35 +227,35 @@ func (c *Core) GenerateRootUpdate(key []byte, nonce string) (*GenerateRootResult
|
|||
// Verify the master key
|
||||
if c.seal.RecoveryKeySupported() {
|
||||
if err := c.seal.VerifyRecoveryKey(masterKey); err != nil {
|
||||
c.logger.Printf("[ERR] core: root generation aborted, recovery key verification failed: %v", err)
|
||||
c.logger.Error("core: root generation aborted, recovery key verification failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := c.barrier.VerifyMaster(masterKey); err != nil {
|
||||
c.logger.Printf("[ERR] core: root generation aborted, master key verification failed: %v", err)
|
||||
c.logger.Error("core: root generation aborted, master key verification failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
te, err := c.tokenStore.rootToken()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: root token generation failed: %v", err)
|
||||
c.logger.Error("core: root token generation failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
if te == nil {
|
||||
c.logger.Printf("[ERR] core: got nil token entry back from root generation")
|
||||
c.logger.Error("core: got nil token entry back from root generation")
|
||||
return nil, fmt.Errorf("got nil token entry back from root generation")
|
||||
}
|
||||
|
||||
uuidBytes, err := uuid.ParseUUID(te.ID)
|
||||
if err != nil {
|
||||
c.tokenStore.Revoke(te.ID)
|
||||
c.logger.Printf("[ERR] core: error getting generated token bytes: %v", err)
|
||||
c.logger.Error("core: error getting generated token bytes", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
if uuidBytes == nil {
|
||||
c.tokenStore.Revoke(te.ID)
|
||||
c.logger.Printf("[ERR] core: got nil parsed UUID bytes")
|
||||
c.logger.Error("core: got nil parsed UUID bytes")
|
||||
return nil, fmt.Errorf("got nil parsed UUID bytes")
|
||||
}
|
||||
|
||||
|
@ -267,7 +269,7 @@ func (c *Core) GenerateRootUpdate(key []byte, nonce string) (*GenerateRootResult
|
|||
tokenBytes, err = xor.XORBase64(c.generateRootConfig.OTP, base64.StdEncoding.EncodeToString(uuidBytes))
|
||||
if err != nil {
|
||||
c.tokenStore.Revoke(te.ID)
|
||||
c.logger.Printf("[ERR] core: xor of root token failed: %v", err)
|
||||
c.logger.Error("core: xor of root token failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -275,7 +277,7 @@ func (c *Core) GenerateRootUpdate(key []byte, nonce string) (*GenerateRootResult
|
|||
_, tokenBytesArr, err := pgpkeys.EncryptShares([][]byte{[]byte(te.ID)}, []string{c.generateRootConfig.PGPKey})
|
||||
if err != nil {
|
||||
c.tokenStore.Revoke(te.ID)
|
||||
c.logger.Printf("[ERR] core: error encrypting new root token: %v", err)
|
||||
c.logger.Error("core: error encrypting new root token", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
tokenBytes = tokenBytesArr[0]
|
||||
|
@ -292,8 +294,9 @@ func (c *Core) GenerateRootUpdate(key []byte, nonce string) (*GenerateRootResult
|
|||
PGPFingerprint: c.generateRootConfig.PGPFingerprint,
|
||||
}
|
||||
|
||||
c.logger.Printf("[INFO] core: root generation finished (nonce: %s)",
|
||||
c.generateRootConfig.Nonce)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: root generation finished", "nonce", c.generateRootConfig.Nonce)
|
||||
}
|
||||
|
||||
c.generateRootProgress = nil
|
||||
c.generateRootConfig = nil
|
||||
|
|
|
@ -21,11 +21,11 @@ func (c *Core) Initialized() (bool, error) {
|
|||
// Check the barrier first
|
||||
init, err := c.barrier.Initialized()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: barrier init check failed: %v", err)
|
||||
c.logger.Error("core: barrier init check failed", "error", err)
|
||||
return false, err
|
||||
}
|
||||
if !init {
|
||||
c.logger.Printf("[INFO] core: security barrier not initialized")
|
||||
c.logger.Info("core: security barrier not initialized")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ func (c *Core) Initialized() (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
if sealConf == nil {
|
||||
return false, fmt.Errorf("[ERR] core: barrier reports initialized but no seal configuration found")
|
||||
return false, fmt.Errorf("core: barrier reports initialized but no seal configuration found")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
@ -56,7 +56,7 @@ func (c *Core) generateShares(sc *SealConfig) ([]byte, [][]byte, error) {
|
|||
// Split the master key using the Shamir algorithm
|
||||
shares, err := shamir.Split(masterKey, sc.SecretShares, sc.SecretThreshold)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate shares: %v", err)
|
||||
return nil, nil, fmt.Errorf("failed to generate barrier shares: %v", err)
|
||||
}
|
||||
unsealKeys = shares
|
||||
}
|
||||
|
@ -91,14 +91,14 @@ func (c *Core) Initialize(barrierConfig, recoveryConfig *SealConfig) (*InitResul
|
|||
|
||||
// Check if the seal configuration is valid
|
||||
if err := recoveryConfig.Validate(); err != nil {
|
||||
c.logger.Printf("[ERR] core: invalid recovery configuration: %v", err)
|
||||
c.logger.Error("core: invalid recovery configuration", "error", err)
|
||||
return nil, fmt.Errorf("invalid recovery configuration: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the seal configuration is valid
|
||||
if err := barrierConfig.Validate(); err != nil {
|
||||
c.logger.Printf("[ERR] core: invalid seal configuration: %v", err)
|
||||
c.logger.Error("core: invalid seal configuration", "error", err)
|
||||
return nil, fmt.Errorf("invalid seal configuration: %v", err)
|
||||
}
|
||||
|
||||
|
@ -117,19 +117,19 @@ func (c *Core) Initialize(barrierConfig, recoveryConfig *SealConfig) (*InitResul
|
|||
|
||||
err = c.seal.Init()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to initialize seal: %v", err)
|
||||
c.logger.Error("core: failed to initialize seal", "error", err)
|
||||
return nil, fmt.Errorf("error initializing seal: %v", err)
|
||||
}
|
||||
|
||||
err = c.seal.SetBarrierConfig(barrierConfig)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to save barrier configuration: %v", err)
|
||||
c.logger.Error("core: failed to save barrier configuration", "error", err)
|
||||
return nil, fmt.Errorf("barrier configuration saving failed: %v", err)
|
||||
}
|
||||
|
||||
barrierKey, barrierUnsealKeys, err := c.generateShares(barrierConfig)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: %v", err)
|
||||
c.logger.Error("core: error generating shares", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ func (c *Core) Initialize(barrierConfig, recoveryConfig *SealConfig) (*InitResul
|
|||
barrierUnsealKeys = barrierUnsealKeys[1:]
|
||||
}
|
||||
if err := c.seal.SetStoredKeys(keysToStore); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to store keys: %v", err)
|
||||
c.logger.Error("core: failed to store keys", "error", err)
|
||||
return nil, fmt.Errorf("failed to store keys: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -153,33 +153,34 @@ func (c *Core) Initialize(barrierConfig, recoveryConfig *SealConfig) (*InitResul
|
|||
|
||||
// Initialize the barrier
|
||||
if err := c.barrier.Initialize(barrierKey); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to initialize barrier: %v", err)
|
||||
c.logger.Error("core: failed to initialize barrier", "error", err)
|
||||
return nil, fmt.Errorf("failed to initialize barrier: %v", err)
|
||||
}
|
||||
c.logger.Printf("[INFO] core: security barrier initialized (shares: %d, threshold %d)",
|
||||
barrierConfig.SecretShares, barrierConfig.SecretThreshold)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: security barrier initialized", "shares", barrierConfig.SecretShares, "threshold", barrierConfig.SecretThreshold)
|
||||
}
|
||||
|
||||
// Unseal the barrier
|
||||
if err := c.barrier.Unseal(barrierKey); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to unseal barrier: %v", err)
|
||||
c.logger.Error("core: failed to unseal barrier", "error", err)
|
||||
return nil, fmt.Errorf("failed to unseal barrier: %v", err)
|
||||
}
|
||||
|
||||
// Ensure the barrier is re-sealed
|
||||
defer func() {
|
||||
if err := c.barrier.Seal(); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to seal barrier: %v", err)
|
||||
c.logger.Error("core: failed to seal barrier", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Perform initial setup
|
||||
if err := c.setupCluster(); err != nil {
|
||||
c.stateLock.Unlock()
|
||||
c.logger.Printf("[ERR] core: cluster setup failed during init: %v", err)
|
||||
c.logger.Error("core: cluster setup failed during init", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
if err := c.postUnseal(); err != nil {
|
||||
c.logger.Printf("[ERR] core: post-unseal setup failed during init: %v", err)
|
||||
c.logger.Error("core: post-unseal setup failed during init", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -189,14 +190,14 @@ func (c *Core) Initialize(barrierConfig, recoveryConfig *SealConfig) (*InitResul
|
|||
if c.seal.RecoveryKeySupported() {
|
||||
err = c.seal.SetRecoveryConfig(recoveryConfig)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to save recovery configuration: %v", err)
|
||||
c.logger.Error("core: failed to save recovery configuration", "error", err)
|
||||
return nil, fmt.Errorf("recovery configuration saving failed: %v", err)
|
||||
}
|
||||
|
||||
if recoveryConfig.SecretShares > 0 {
|
||||
recoveryKey, recoveryUnsealKeys, err := c.generateShares(recoveryConfig)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: %v", err)
|
||||
c.logger.Error("core: failed to generate recovery shares", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -212,15 +213,15 @@ func (c *Core) Initialize(barrierConfig, recoveryConfig *SealConfig) (*InitResul
|
|||
// Generate a new root token
|
||||
rootToken, err := c.tokenStore.rootToken()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: root token generation failed: %v", err)
|
||||
c.logger.Error("core: root token generation failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
results.RootToken = rootToken.ID
|
||||
c.logger.Printf("[INFO] core: root token generated")
|
||||
c.logger.Info("core: root token generated")
|
||||
|
||||
// Prepare to re-seal
|
||||
if err := c.preSeal(); err != nil {
|
||||
c.logger.Printf("[ERR] core: pre-seal teardown failed: %v", err)
|
||||
c.logger.Error("core: pre-seal teardown failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -234,28 +235,28 @@ func (c *Core) UnsealWithStoredKeys() error {
|
|||
|
||||
sealed, err := c.Sealed()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: error checking sealed status in auto-unseal: %s", err)
|
||||
c.logger.Error("core: error checking sealed status in auto-unseal", "error", err)
|
||||
return fmt.Errorf("error checking sealed status in auto-unseal: %s", err)
|
||||
}
|
||||
if !sealed {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.logger.Printf("[INFO] core: stored unseal keys supported, attempting fetch")
|
||||
c.logger.Info("core: stored unseal keys supported, attempting fetch")
|
||||
keys, err := c.seal.GetStoredKeys()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: fetching stored unseal keys failed: %v", err)
|
||||
c.logger.Error("core: fetching stored unseal keys failed", "error", err)
|
||||
return &NonFatalError{Err: fmt.Errorf("fetching stored unseal keys failed: %v", err)}
|
||||
}
|
||||
if len(keys) == 0 {
|
||||
c.logger.Printf("[WARN] core: stored unseal key(s) supported but none found")
|
||||
c.logger.Warn("core: stored unseal key(s) supported but none found")
|
||||
} else {
|
||||
unsealed := false
|
||||
keysUsed := 0
|
||||
for _, key := range keys {
|
||||
unsealed, err = c.Unseal(key)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: unseal with stored unseal key failed: %v", err)
|
||||
c.logger.Error("core: unseal with stored unseal key failed", "error", err)
|
||||
return &NonFatalError{Err: fmt.Errorf("unseal with stored key failed: %v", err)}
|
||||
}
|
||||
keysUsed += 1
|
||||
|
@ -264,9 +265,13 @@ func (c *Core) UnsealWithStoredKeys() error {
|
|||
}
|
||||
}
|
||||
if !unsealed {
|
||||
c.logger.Printf("[WARN] core: %d stored unseal key(s) used but Vault not unsealed yet", keysUsed)
|
||||
if c.logger.IsWarn() {
|
||||
c.logger.Warn("core: stored unseal key(s) used but Vault not unsealed yet", "stored_keys_used", keysUsed)
|
||||
}
|
||||
} else {
|
||||
c.logger.Printf("[INFO] core: successfully unsealed with %d stored key(s)", keysUsed)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: successfully unsealed with stored key(s)", "stored_keys_used", keysUsed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package vault
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
)
|
||||
|
@ -22,7 +23,8 @@ func TestCore_Init(t *testing.T) {
|
|||
}
|
||||
|
||||
func testCore_NewTestCore(t *testing.T, seal Seal) (*Core, *CoreConfig) {
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
conf := &CoreConfig{
|
||||
Physical: inm,
|
||||
|
|
|
@ -57,7 +57,7 @@ type CubbyholeBackend struct {
|
|||
|
||||
func (b *CubbyholeBackend) revoke(saltedToken string) error {
|
||||
if saltedToken == "" {
|
||||
return fmt.Errorf("[ERR] cubbyhole: client token empty during revocation")
|
||||
return fmt.Errorf("cubbyhole: client token empty during revocation")
|
||||
}
|
||||
|
||||
if err := ClearView(b.storageView.(*BarrierView).SubView(saltedToken + "/")); err != nil {
|
||||
|
@ -80,7 +80,7 @@ func (b *CubbyholeBackend) handleExistenceCheck(
|
|||
func (b *CubbyholeBackend) handleRead(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
if req.ClientToken == "" {
|
||||
return nil, fmt.Errorf("[ERR] cubbyhole read: Client token empty")
|
||||
return nil, fmt.Errorf("cubbyhole read: client token empty")
|
||||
}
|
||||
|
||||
// Read the path
|
||||
|
@ -111,7 +111,7 @@ func (b *CubbyholeBackend) handleRead(
|
|||
func (b *CubbyholeBackend) handleWrite(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
if req.ClientToken == "" {
|
||||
return nil, fmt.Errorf("[ERR] cubbyhole write: Client token empty")
|
||||
return nil, fmt.Errorf("cubbyhole write: client token empty")
|
||||
}
|
||||
// Check that some fields are given
|
||||
if len(req.Data) == 0 {
|
||||
|
@ -139,7 +139,7 @@ func (b *CubbyholeBackend) handleWrite(
|
|||
func (b *CubbyholeBackend) handleDelete(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
if req.ClientToken == "" {
|
||||
return nil, fmt.Errorf("[ERR] cubbyhole delete: Client token empty")
|
||||
return nil, fmt.Errorf("cubbyhole delete: client token empty")
|
||||
}
|
||||
// Delete the key at the request path
|
||||
if err := req.Storage.Delete(req.ClientToken + "/" + req.Path); err != nil {
|
||||
|
@ -152,7 +152,7 @@ func (b *CubbyholeBackend) handleDelete(
|
|||
func (b *CubbyholeBackend) handleList(
|
||||
req *logical.Request, data *framework.FieldData) (*logical.Response, error) {
|
||||
if req.ClientToken == "" {
|
||||
return nil, fmt.Errorf("[ERR] cubbyhole list: Client token empty")
|
||||
return nil, fmt.Errorf("cubbyhole list: client token empty")
|
||||
}
|
||||
|
||||
// Right now we only handle directories, so ensure it ends with / We also
|
||||
|
|
|
@ -777,7 +777,7 @@ func (b *SystemBackend) handleMount(
|
|||
|
||||
// Attempt mount
|
||||
if err := b.Core.mount(me); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: mount %s failed: %v", me.Path, err)
|
||||
b.Backend.Logger().Error("sys: mount failed", "path", me.Path, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
|
@ -807,7 +807,7 @@ func (b *SystemBackend) handleUnmount(
|
|||
|
||||
// Attempt unmount
|
||||
if err := b.Core.unmount(suffix); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: unmount '%s' failed: %v", suffix, err)
|
||||
b.Backend.Logger().Error("sys: unmount failed", "path", suffix, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
|
@ -831,7 +831,7 @@ func (b *SystemBackend) handleRemount(
|
|||
|
||||
// Attempt remount
|
||||
if err := b.Core.remount(fromPath, toPath); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: remount '%s' to '%s' failed: %v", fromPath, toPath, err)
|
||||
b.Backend.Logger().Error("sys: remount failed", "from_path", fromPath, "to_path", toPath, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
|
@ -872,9 +872,8 @@ func (b *SystemBackend) handleTuneReadCommon(path string) (*logical.Response, er
|
|||
|
||||
sysView := b.Core.router.MatchingSystemView(path)
|
||||
if sysView == nil {
|
||||
err := fmt.Errorf("[ERR] sys: cannot fetch sysview for path %s", path)
|
||||
b.Backend.Logger().Print(err)
|
||||
return handleError(err)
|
||||
b.Backend.Logger().Error("sys: cannot fetch sysview", "path", path)
|
||||
return handleError(fmt.Errorf("sys: cannot fetch sysview for path %s", path))
|
||||
}
|
||||
|
||||
resp := &logical.Response{
|
||||
|
@ -920,17 +919,15 @@ func (b *SystemBackend) handleTuneWriteCommon(
|
|||
// Prevent protected paths from being changed
|
||||
for _, p := range untunableMounts {
|
||||
if strings.HasPrefix(path, p) {
|
||||
err := fmt.Errorf("[ERR] core: cannot tune '%s'", path)
|
||||
b.Backend.Logger().Print(err)
|
||||
return handleError(err)
|
||||
b.Backend.Logger().Error("sys: cannot tune this mount", "path", path)
|
||||
return handleError(fmt.Errorf("sys: cannot tune '%s'", path))
|
||||
}
|
||||
}
|
||||
|
||||
mountEntry := b.Core.router.MatchingMountEntry(path)
|
||||
if mountEntry == nil {
|
||||
err := fmt.Errorf("[ERR] sys: tune of path '%s' failed: no mount entry found", path)
|
||||
b.Backend.Logger().Print(err)
|
||||
return handleError(err)
|
||||
b.Backend.Logger().Error("sys: tune failed: no mount entry found", "path", path)
|
||||
return handleError(fmt.Errorf("sys: tune of path '%s' failed: no mount entry found", path))
|
||||
}
|
||||
|
||||
var lock *sync.RWMutex
|
||||
|
@ -977,7 +974,7 @@ func (b *SystemBackend) handleTuneWriteCommon(
|
|||
defer lock.Unlock()
|
||||
|
||||
if err := b.tuneMountTTLs(path, &mountEntry.Config, newDefault, newMax); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: tune of path '%s' failed: %v", path, err)
|
||||
b.Backend.Logger().Error("sys: tuning failed", "path", path, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
}
|
||||
|
@ -1002,7 +999,7 @@ func (b *SystemBackend) handleRenew(
|
|||
// Invoke the expiration manager directly
|
||||
resp, err := b.Core.expiration.Renew(leaseID, increment)
|
||||
if err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: renew '%s' failed: %v", leaseID, err)
|
||||
b.Backend.Logger().Error("sys: lease renewal failed", "lease_id", leaseID, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
return resp, err
|
||||
|
@ -1016,7 +1013,7 @@ func (b *SystemBackend) handleRevoke(
|
|||
|
||||
// Invoke the expiration manager directly
|
||||
if err := b.Core.expiration.Revoke(leaseID); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: revoke '%s' failed: %v", leaseID, err)
|
||||
b.Backend.Logger().Error("sys: lease revocation failed", "lease_id", leaseID, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -1048,7 +1045,7 @@ func (b *SystemBackend) handleRevokePrefixCommon(
|
|||
err = b.Core.expiration.RevokePrefix(prefix)
|
||||
}
|
||||
if err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: revoke prefix '%s' failed: %v", prefix, err)
|
||||
b.Backend.Logger().Error("sys: revoke prefix failed", "prefix", prefix, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -1103,7 +1100,7 @@ func (b *SystemBackend) handleEnableAuth(
|
|||
|
||||
// Attempt enabling
|
||||
if err := b.Core.enableCredential(me); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: enable auth %s failed: %v", me.Path, err)
|
||||
b.Backend.Logger().Error("sys: enable auth mount failed", "path", me.Path, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -1121,7 +1118,7 @@ func (b *SystemBackend) handleDisableAuth(
|
|||
|
||||
// Attempt disable
|
||||
if err := b.Core.disableCredential(suffix); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: disable auth '%s' failed: %v", suffix, err)
|
||||
b.Backend.Logger().Error("sys: disable auth mount failed", "path", suffix, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -1273,7 +1270,7 @@ func (b *SystemBackend) handleEnableAudit(
|
|||
|
||||
// Attempt enabling
|
||||
if err := b.Core.enableAudit(me); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: enable audit %s failed: %v", me.Path, err)
|
||||
b.Backend.Logger().Error("sys: enable audit mount failed", "path", me.Path, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -1286,7 +1283,7 @@ func (b *SystemBackend) handleDisableAudit(
|
|||
|
||||
// Attempt disable
|
||||
if err := b.Core.disableAudit(path); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: disable audit '%s' failed: %v", path, err)
|
||||
b.Backend.Logger().Error("sys: disable audit mount failed", "path", path, "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -1387,22 +1384,22 @@ func (b *SystemBackend) handleRotate(
|
|||
// Rotate to the new term
|
||||
newTerm, err := b.Core.barrier.Rotate()
|
||||
if err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: failed to create new encryption key: %v", err)
|
||||
b.Backend.Logger().Error("sys: failed to create new encryption key", "error", err)
|
||||
return handleError(err)
|
||||
}
|
||||
b.Backend.Logger().Printf("[INFO] sys: installed new encryption key")
|
||||
b.Backend.Logger().Info("sys: installed new encryption key")
|
||||
|
||||
// In HA mode, we need to an upgrade path for the standby instances
|
||||
if b.Core.ha != nil {
|
||||
// Create the upgrade path to the new term
|
||||
if err := b.Core.barrier.CreateUpgrade(newTerm); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: failed to create new upgrade for key term %d: %v", newTerm, err)
|
||||
b.Backend.Logger().Error("sys: failed to create new upgrade", "term", newTerm, "error", err)
|
||||
}
|
||||
|
||||
// Schedule the destroy of the upgrade path
|
||||
time.AfterFunc(keyRotateGracePeriod, func() {
|
||||
if err := b.Core.barrier.DestroyUpgrade(newTerm); err != nil {
|
||||
b.Backend.Logger().Printf("[ERR] sys: failed to destroy upgrade for key term %d: %v", newTerm, err)
|
||||
b.Backend.Logger().Error("sys: failed to destroy upgrade", "term", newTerm, "error", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -75,7 +75,9 @@ func (b *SystemBackend) tuneMountTTLs(path string, meConfig *MountConfig, newDef
|
|||
return fmt.Errorf("failed to update mount table, rolling back TTL changes")
|
||||
}
|
||||
|
||||
b.Core.logger.Printf("[INFO] core: tuned '%s'", path)
|
||||
if b.Core.logger.IsInfo() {
|
||||
b.Core.logger.Info("core: mount tuning successful", "path", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -214,7 +214,9 @@ func (c *Core) mount(me *MountEntry) error {
|
|||
if err := c.router.Mount(backend, me.Path, me, view); err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Printf("[INFO] core: mounted '%s' type: %s", me.Path, me.Type)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: successful mount", "path", me.Path, "type", me.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -278,7 +280,9 @@ func (c *Core) unmount(path string) error {
|
|||
if err := c.removeMountEntry(path); err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Printf("[INFO] core: unmounted '%s'", path)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: successful unmounted", "path", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -387,7 +391,9 @@ func (c *Core) remount(src, dst string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
c.logger.Printf("[INFO] core: remounted '%s' to '%s'", src, dst)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: successful remount", "old_path", src, "new_path", dst)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -397,7 +403,7 @@ func (c *Core) loadMounts() error {
|
|||
// Load the existing mount table
|
||||
raw, err := c.barrier.Get(coreMountConfigPath)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to read mount table: %v", err)
|
||||
c.logger.Error("core: failed to read mount table", "error", err)
|
||||
return errLoadMountsFailed
|
||||
}
|
||||
|
||||
|
@ -409,7 +415,7 @@ func (c *Core) loadMounts() error {
|
|||
// yes, decompress the table and then JSON decode it. If not,
|
||||
// simply JSON decode it.
|
||||
if err := jsonutil.DecodeJSON(raw.Value, mountTable); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to decompress and/or decode the mount table: %v", err)
|
||||
c.logger.Error("core: failed to decompress and/or decode the mount table", "error", err)
|
||||
return err
|
||||
}
|
||||
c.mounts = mountTable
|
||||
|
@ -469,20 +475,13 @@ func (c *Core) loadMounts() error {
|
|||
// persistMounts is used to persist the mount table after modification
|
||||
func (c *Core) persistMounts(table *MountTable) error {
|
||||
if table.Type != mountTableType {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: given table to persist has type %s but need type %s",
|
||||
table.Type,
|
||||
mountTableType)
|
||||
c.logger.Error("core: given table to persist has wrong type", "actual_type", table.Type, "expected_type", mountTableType)
|
||||
return fmt.Errorf("invalid table type given, not persisting")
|
||||
}
|
||||
|
||||
for _, entry := range table.Entries {
|
||||
if entry.Table != table.Type {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: entry in mount table with path %s has table value %s but is in table %s, refusing to persist",
|
||||
entry.Path,
|
||||
entry.Table,
|
||||
table.Type)
|
||||
c.logger.Error("core: given entry to persist in mount table has wrong table value", "path", entry.Path, "entry_table_type", entry.Table, "actual_type", table.Type)
|
||||
return fmt.Errorf("invalid mount entry found, not persisting")
|
||||
}
|
||||
}
|
||||
|
@ -490,7 +489,7 @@ func (c *Core) persistMounts(table *MountTable) error {
|
|||
// Encode the mount table into JSON and compress it (lzw).
|
||||
compressedBytes, err := jsonutil.EncodeJSONAndCompress(table, nil)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to encode and/or compress the mount table: %v", err)
|
||||
c.logger.Error("core: failed to encode and/or compress the mount table", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -502,7 +501,7 @@ func (c *Core) persistMounts(table *MountTable) error {
|
|||
|
||||
// Write to the physical backend
|
||||
if err := c.barrier.Put(entry); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to persist mount table: %v", err)
|
||||
c.logger.Error("core: failed to persist mount table", "error", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -532,9 +531,7 @@ func (c *Core) setupMounts() error {
|
|||
// Create the new backend
|
||||
backend, err = c.newLogicalBackend(entry.Type, c.mountEntrySysView(entry), view, nil)
|
||||
if err != nil {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: failed to create mount entry %s: %v",
|
||||
entry.Path, err)
|
||||
c.logger.Error("core: failed to create mount entry", "path", entry.Path, "error", err)
|
||||
return errLoadMountsFailed
|
||||
}
|
||||
|
||||
|
@ -550,10 +547,12 @@ func (c *Core) setupMounts() error {
|
|||
// Mount the backend
|
||||
err = c.router.Mount(backend, entry.Path, entry, view)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to mount entry %s: %v", entry.Path, err)
|
||||
c.logger.Error("core: failed to mount entry", "path", entry.Path, "error", err)
|
||||
return errLoadMountsFailed
|
||||
} else {
|
||||
c.logger.Printf("[INFO] core: mounted backend of type %s at %s", entry.Type, entry.Path)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: successfully mounted backend", "type", entry.Type, "path", entry.Path)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the path is tainted if set in the mount table
|
||||
|
|
|
@ -139,7 +139,7 @@ func (c *Core) BarrierRekeyInit(config *SealConfig) error {
|
|||
|
||||
// Check if the seal configuration is valid
|
||||
if err := config.Validate(); err != nil {
|
||||
c.logger.Printf("[ERR] core: invalid rekey seal configuration: %v", err)
|
||||
c.logger.Error("core: invalid rekey seal configuration", "error", err)
|
||||
return fmt.Errorf("invalid rekey seal configuration: %v", err)
|
||||
}
|
||||
|
||||
|
@ -171,8 +171,9 @@ func (c *Core) BarrierRekeyInit(config *SealConfig) error {
|
|||
}
|
||||
c.barrierRekeyConfig.Nonce = nonce
|
||||
|
||||
c.logger.Printf("[INFO] core: rekey initialized (nonce: %s, shares: %d, threshold: %d)",
|
||||
c.barrierRekeyConfig.Nonce, c.barrierRekeyConfig.SecretShares, c.barrierRekeyConfig.SecretThreshold)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: rekey initialized", "nonce", c.barrierRekeyConfig.Nonce, "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -184,7 +185,7 @@ func (c *Core) RecoveryRekeyInit(config *SealConfig) error {
|
|||
|
||||
// Check if the seal configuration is valid
|
||||
if err := config.Validate(); err != nil {
|
||||
c.logger.Printf("[ERR] core: invalid recovery configuration: %v", err)
|
||||
c.logger.Error("core: invalid recovery configuration", "error", err)
|
||||
return fmt.Errorf("invalid recovery configuration: %v", err)
|
||||
}
|
||||
|
||||
|
@ -220,8 +221,9 @@ func (c *Core) RecoveryRekeyInit(config *SealConfig) error {
|
|||
}
|
||||
c.recoveryRekeyConfig.Nonce = nonce
|
||||
|
||||
c.logger.Printf("[INFO] core: rekey initialized (nonce: %s, shares: %d, threshold: %d)",
|
||||
c.recoveryRekeyConfig.Nonce, c.recoveryRekeyConfig.SecretShares, c.recoveryRekeyConfig.SecretThreshold)
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: rekey initialized", "nonce", c.recoveryRekeyConfig.Nonce, "shares", c.recoveryRekeyConfig.SecretShares, "threshold", c.recoveryRekeyConfig.SecretThreshold)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -289,8 +291,9 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
|
|||
|
||||
// Check if we don't have enough keys to unlock
|
||||
if len(c.barrierRekeyProgress) < existingConfig.SecretThreshold {
|
||||
c.logger.Printf("[DEBUG] core: cannot rekey, have %d of %d keys",
|
||||
len(c.barrierRekeyProgress), existingConfig.SecretThreshold)
|
||||
if c.logger.IsDebug() {
|
||||
c.logger.Debug("core: cannot rekey yet, not enough keys", "keys", len(c.barrierRekeyProgress), "threshold", existingConfig.SecretThreshold)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -308,14 +311,14 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
|
|||
}
|
||||
|
||||
if err := c.barrier.VerifyMaster(masterKey); err != nil {
|
||||
c.logger.Printf("[ERR] core: rekey aborted, master key verification failed: %v", err)
|
||||
c.logger.Error("core: rekey aborted, master key verification failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate a new master key
|
||||
newMasterKey, err := c.barrier.GenerateKey()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate master key: %v", err)
|
||||
c.logger.Error("core: failed to generate master key", "error", err)
|
||||
return nil, fmt.Errorf("master key generation failed: %v", err)
|
||||
}
|
||||
|
||||
|
@ -330,7 +333,7 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
|
|||
// Split the master key using the Shamir algorithm
|
||||
shares, err := shamir.Split(newMasterKey, c.barrierRekeyConfig.SecretShares, c.barrierRekeyConfig.SecretThreshold)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate shares: %v", err)
|
||||
c.logger.Error("core: failed to generate shares", "error", err)
|
||||
return nil, fmt.Errorf("failed to generate shares: %v", err)
|
||||
}
|
||||
results.SecretShares = shares
|
||||
|
@ -373,7 +376,7 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
|
|||
}
|
||||
buf, err := json.Marshal(backupVals)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to marshal unseal key backup: %v", err)
|
||||
c.logger.Error("core: failed to marshal unseal key backup", "error", err)
|
||||
return nil, fmt.Errorf("failed to marshal unseal key backup: %v", err)
|
||||
}
|
||||
pe := &physical.Entry{
|
||||
|
@ -381,7 +384,7 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
|
|||
Value: buf,
|
||||
}
|
||||
if err = c.physical.Put(pe); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to save unseal key backup: %v", err)
|
||||
c.logger.Error("core: failed to save unseal key backup", "error", err)
|
||||
return nil, fmt.Errorf("failed to save unseal key backup: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -389,21 +392,21 @@ func (c *Core) BarrierRekeyUpdate(key []byte, nonce string) (*RekeyResult, error
|
|||
|
||||
if keysToStore != nil {
|
||||
if err := c.seal.SetStoredKeys(keysToStore); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to store keys: %v", err)
|
||||
c.logger.Error("core: failed to store keys", "error", err)
|
||||
return nil, fmt.Errorf("failed to store keys: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Rekey the barrier
|
||||
if err := c.barrier.Rekey(newMasterKey); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to rekey barrier: %v", err)
|
||||
c.logger.Error("core: failed to rekey barrier", "error", err)
|
||||
return nil, fmt.Errorf("failed to rekey barrier: %v", err)
|
||||
}
|
||||
c.logger.Printf("[INFO] core: security barrier rekeyed (shares: %d, threshold: %d)",
|
||||
c.barrierRekeyConfig.SecretShares, c.barrierRekeyConfig.SecretThreshold)
|
||||
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core: security barrier rekeyed", "shares", c.barrierRekeyConfig.SecretShares, "threshold", c.barrierRekeyConfig.SecretThreshold)
|
||||
}
|
||||
if err := c.seal.SetBarrierConfig(c.barrierRekeyConfig); err != nil {
|
||||
c.logger.Printf("[ERR] core: error saving rekey seal configuration: %v", err)
|
||||
c.logger.Error("core: error saving rekey seal configuration", "error", err)
|
||||
return nil, fmt.Errorf("failed to save rekey seal configuration: %v", err)
|
||||
}
|
||||
|
||||
|
@ -475,8 +478,9 @@ func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, erro
|
|||
|
||||
// Check if we don't have enough keys to unlock
|
||||
if len(c.recoveryRekeyProgress) < existingConfig.SecretThreshold {
|
||||
c.logger.Printf("[DEBUG] core: cannot rekey, have %d of %d keys",
|
||||
len(c.recoveryRekeyProgress), existingConfig.SecretThreshold)
|
||||
if c.logger.IsDebug() {
|
||||
c.logger.Debug("core: cannot rekey yet, not enough keys", "keys", len(c.recoveryRekeyProgress), "threshold", existingConfig.SecretThreshold)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -495,14 +499,14 @@ func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, erro
|
|||
|
||||
// Verify the recovery key
|
||||
if err := c.seal.VerifyRecoveryKey(masterKey); err != nil {
|
||||
c.logger.Printf("[ERR] core: rekey aborted, recovery key verification failed: %v", err)
|
||||
c.logger.Error("core: rekey aborted, recovery key verification failed", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate a new master key
|
||||
newMasterKey, err := c.barrier.GenerateKey()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate recovery key: %v", err)
|
||||
c.logger.Error("core: failed to generate recovery key", "error", err)
|
||||
return nil, fmt.Errorf("recovery key generation failed: %v", err)
|
||||
}
|
||||
|
||||
|
@ -517,7 +521,7 @@ func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, erro
|
|||
// Split the master key using the Shamir algorithm
|
||||
shares, err := shamir.Split(newMasterKey, c.recoveryRekeyConfig.SecretShares, c.recoveryRekeyConfig.SecretThreshold)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to generate shares: %v", err)
|
||||
c.logger.Error("core: failed to generate shares", "error", err)
|
||||
return nil, fmt.Errorf("failed to generate shares: %v", err)
|
||||
}
|
||||
results.SecretShares = shares
|
||||
|
@ -550,7 +554,7 @@ func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, erro
|
|||
}
|
||||
buf, err := json.Marshal(backupVals)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to marshal recovery key backup: %v", err)
|
||||
c.logger.Error("core: failed to marshal recovery key backup", "error", err)
|
||||
return nil, fmt.Errorf("failed to marshal recovery key backup: %v", err)
|
||||
}
|
||||
pe := &physical.Entry{
|
||||
|
@ -558,19 +562,19 @@ func (c *Core) RecoveryRekeyUpdate(key []byte, nonce string) (*RekeyResult, erro
|
|||
Value: buf,
|
||||
}
|
||||
if err = c.physical.Put(pe); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to save unseal key backup: %v", err)
|
||||
c.logger.Error("core: failed to save unseal key backup", "error", err)
|
||||
return nil, fmt.Errorf("failed to save unseal key backup: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.seal.SetRecoveryKey(newMasterKey); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to set recovery key: %v", err)
|
||||
c.logger.Error("core: failed to set recovery key", "error", err)
|
||||
return nil, fmt.Errorf("failed to set recovery key: %v", err)
|
||||
}
|
||||
|
||||
if err := c.seal.SetRecoveryConfig(c.recoveryRekeyConfig); err != nil {
|
||||
c.logger.Printf("[ERR] core: error saving rekey seal configuration: %v", err)
|
||||
c.logger.Error("core: error saving rekey seal configuration", "error", err)
|
||||
return nil, fmt.Errorf("failed to save rekey seal configuration: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -2,11 +2,12 @@ package vault
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/physical"
|
||||
)
|
||||
|
||||
|
@ -361,7 +362,8 @@ func testCore_Rekey_Invalid_Common(t *testing.T, c *Core, keys [][]byte, recover
|
|||
|
||||
func TestCore_Standby_Rekey(t *testing.T) {
|
||||
// Create the first core and initialize it
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
inm := physical.NewInmem(logger)
|
||||
inmha := physical.NewInmemHA(logger)
|
||||
redirectOriginal := "http://127.0.0.1:8200"
|
||||
|
|
|
@ -34,7 +34,7 @@ func (c *Core) startForwarding() error {
|
|||
// Get our TLS config
|
||||
tlsConfig, err := c.ClusterTLSConfig()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/startClusterListener: failed to get tls configuration: %v", err)
|
||||
c.logger.Error("core/startClusterListener: failed to get tls configuration", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -68,20 +68,22 @@ func (c *Core) startForwarding() error {
|
|||
go func() {
|
||||
defer shutdownWg.Done()
|
||||
|
||||
c.logger.Printf("[INFO] core/startClusterListener: starting listener")
|
||||
c.logger.Info("core/startClusterListener: starting listener")
|
||||
|
||||
// Create a TCP listener. We do this separately and specifically
|
||||
// with TCP so that we can set deadlines.
|
||||
tcpLn, err := net.ListenTCP("tcp", laddr)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERROR] core/startClusterListener: error starting listener: %v", err)
|
||||
c.logger.Error("core/startClusterListener: error starting listener", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Wrap the listener with TLS
|
||||
tlsLn := tls.NewListener(tcpLn, tlsConfig)
|
||||
|
||||
c.logger.Printf("[INFO] core/startClusterListener: serving cluster requests on %s", tlsLn.Addr())
|
||||
if c.logger.IsInfo() {
|
||||
c.logger.Info("core/startClusterListener: serving cluster requests", "cluster_listen_address", tlsLn.Addr())
|
||||
}
|
||||
|
||||
for {
|
||||
if atomic.LoadUint32(&shutdown) > 0 {
|
||||
|
@ -108,7 +110,9 @@ func (c *Core) startForwarding() error {
|
|||
tlsConn := conn.(*tls.Conn)
|
||||
err = tlsConn.Handshake()
|
||||
if err != nil {
|
||||
c.logger.Printf("[DEBUG] core/startClusterListener/Accept: error handshaking: %v", err)
|
||||
if c.logger.IsDebug() {
|
||||
c.logger.Debug("core/startClusterListener/Accept: error handshaking", "error", err)
|
||||
}
|
||||
if conn != nil {
|
||||
conn.Close()
|
||||
}
|
||||
|
@ -117,19 +121,19 @@ func (c *Core) startForwarding() error {
|
|||
|
||||
switch tlsConn.ConnectionState().NegotiatedProtocol {
|
||||
case "h2":
|
||||
c.logger.Printf("[DEBUG] core/startClusterListener/Accept: got h2 connection")
|
||||
c.logger.Debug("core/startClusterListener/Accept: got h2 connection")
|
||||
go fws.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Handler: wrappedHandler,
|
||||
})
|
||||
|
||||
case "req_fw_sb-act_v1":
|
||||
c.logger.Printf("[DEBUG] core/startClusterListener/Accept: got req_fw_sb-act_v1 connection")
|
||||
c.logger.Debug("core/startClusterListener/Accept: got req_fw_sb-act_v1 connection")
|
||||
go fws.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Handler: c.rpcServer,
|
||||
})
|
||||
|
||||
default:
|
||||
c.logger.Printf("[DEBUG] core/startClusterListener/Accept: unknown negotiated protocol")
|
||||
c.logger.Debug("core/startClusterListener/Accept: unknown negotiated protocol")
|
||||
conn.Close()
|
||||
continue
|
||||
}
|
||||
|
@ -145,7 +149,7 @@ func (c *Core) startForwarding() error {
|
|||
|
||||
// Stop the RPC server
|
||||
c.rpcServer.Stop()
|
||||
c.logger.Printf("[INFO] core/startClusterListener: shutting down listeners")
|
||||
c.logger.Info("core/startClusterListener: shutting down listeners")
|
||||
|
||||
// Set the shutdown flag. This will cause the listeners to shut down
|
||||
// within the deadline in clusterListenerAcceptDeadline
|
||||
|
@ -153,7 +157,7 @@ func (c *Core) startForwarding() error {
|
|||
|
||||
// Wait for them all to shut down
|
||||
shutdownWg.Wait()
|
||||
c.logger.Printf("[INFO] core/startClusterListener: listeners successfully shut down")
|
||||
c.logger.Info("core/startClusterListener: listeners successfully shut down")
|
||||
|
||||
// Tell the main thread that shutdown is done.
|
||||
c.clusterListenerShutdownSuccessCh <- struct{}{}
|
||||
|
@ -188,7 +192,7 @@ func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error {
|
|||
|
||||
clusterURL, err := url.Parse(clusterAddr)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: error parsing cluster address: %v", err)
|
||||
c.logger.Error("core/refreshRequestForwardingConnection: error parsing cluster address", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -197,7 +201,7 @@ func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error {
|
|||
// Set up normal HTTP forwarding handling
|
||||
tlsConfig, err := c.ClusterTLSConfig()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: error fetching cluster tls configuration: %v", err)
|
||||
c.logger.Error("core/refreshRequestForwardingConnection: error fetching cluster tls configuration", "error", err)
|
||||
return err
|
||||
}
|
||||
tp := &http2.Transport{
|
||||
|
@ -217,7 +221,7 @@ func (c *Core) refreshRequestForwardingConnection(clusterAddr string) error {
|
|||
c.rpcClientConnCancelFunc = cancelFunc
|
||||
c.rpcClientConn, err = grpc.DialContext(ctx, clusterURL.Host, grpc.WithDialer(c.getGRPCDialer()), grpc.WithInsecure())
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/refreshRequestForwardingConnection: err setting up rpc client: %v", err)
|
||||
c.logger.Error("core/refreshRequestForwardingConnection: err setting up rpc client", "error", err)
|
||||
return err
|
||||
}
|
||||
c.rpcForwardingClient = NewRequestForwardingClient(c.rpcClientConn)
|
||||
|
@ -263,7 +267,7 @@ func (c *Core) ForwardRequest(req *http.Request) (int, []byte, error) {
|
|||
|
||||
freq, err := forwarding.GenerateForwardedHTTPRequest(req, c.requestForwardingConnection.clusterAddr+"/cluster/local/forwarded-request")
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: error creating forwarded request: %v", err)
|
||||
c.logger.Error("core/ForwardRequest: error creating forwarded request", "error", err)
|
||||
return 0, nil, fmt.Errorf("error creating forwarding request")
|
||||
}
|
||||
|
||||
|
@ -290,16 +294,16 @@ func (c *Core) ForwardRequest(req *http.Request) (int, []byte, error) {
|
|||
|
||||
freq, err := forwarding.GenerateForwardedRequest(req)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: error creating forwarding RPC request: %v", err)
|
||||
c.logger.Error("core/ForwardRequest: error creating forwarding RPC request", "error", err)
|
||||
return 0, nil, fmt.Errorf("error creating forwarding RPC request")
|
||||
}
|
||||
if freq == nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: got nil forwarding RPC request")
|
||||
c.logger.Error("core/ForwardRequest: got nil forwarding RPC request")
|
||||
return 0, nil, fmt.Errorf("got nil forwarding RPC request")
|
||||
}
|
||||
resp, err := c.rpcForwardingClient.HandleRequest(context.Background(), freq, grpc.FailFast(true))
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/ForwardRequest: error during forwarded RPC request: %v", err)
|
||||
c.logger.Error("core/ForwardRequest: error during forwarded RPC request", "error", err)
|
||||
return 0, nil, fmt.Errorf("error during forwarding RPC request")
|
||||
}
|
||||
return int(resp.StatusCode), resp.Body, nil
|
||||
|
@ -313,7 +317,7 @@ func (c *Core) getGRPCDialer() func(string, time.Duration) (net.Conn, error) {
|
|||
return func(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
tlsConfig, err := c.ClusterTLSConfig()
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core/getGRPCDialer: failed to get tls configuration: %v", err)
|
||||
c.logger.Error("core/getGRPCDialer: failed to get tls configuration", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
tlsConfig.NextProtos = []string{"req_fw_sb-act_v1"}
|
||||
|
|
|
@ -67,8 +67,7 @@ func (c *Core) HandleRequest(req *logical.Request) (resp *logical.Response, err
|
|||
|
||||
// Create an audit trail of the response
|
||||
if auditErr := c.auditBroker.LogResponse(auth, req, resp, err); auditErr != nil {
|
||||
c.logger.Printf("[ERR] core: failed to audit response (request path: %s): %v",
|
||||
req.Path, auditErr)
|
||||
c.logger.Error("core: failed to audit response", "request_path", req.Path, "error", auditErr)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -96,7 +95,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
var err error
|
||||
te, err = c.tokenStore.UseToken(te)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to use token: %v", err)
|
||||
c.logger.Error("core: failed to use token", "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return nil, nil, retErr
|
||||
}
|
||||
|
@ -112,7 +111,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
defer func(id string) {
|
||||
err = c.tokenStore.Revoke(id)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to revoke token: %v", err)
|
||||
c.logger.Error("core: failed to revoke token", "error", err)
|
||||
retResp = nil
|
||||
retAuth = nil
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
|
@ -138,8 +137,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
}
|
||||
|
||||
if err := c.auditBroker.LogRequest(auth, req, ctErr); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to audit request with path (%s): %v",
|
||||
req.Path, err)
|
||||
c.logger.Error("core: failed to audit request", "path", req.Path, "error", err)
|
||||
}
|
||||
|
||||
if errType != nil {
|
||||
|
@ -153,8 +151,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
|
||||
// Create an audit trail of the request
|
||||
if err := c.auditBroker.LogRequest(auth, req, nil); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to audit request with path (%s): %v",
|
||||
req.Path, err)
|
||||
c.logger.Error("core: failed to audit request", "path", req.Path, "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return nil, auth, retErr
|
||||
}
|
||||
|
@ -178,7 +175,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
// Get the SystemView for the mount
|
||||
sysView := c.router.MatchingSystemView(req.Path)
|
||||
if sysView == nil {
|
||||
c.logger.Println("[ERR] core: unable to retrieve system view from router")
|
||||
c.logger.Error("core: unable to retrieve system view from router")
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return nil, auth, retErr
|
||||
}
|
||||
|
@ -199,7 +196,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
registerLease := true
|
||||
matchingBackend := c.router.MatchingBackend(req.Path)
|
||||
if matchingBackend == nil {
|
||||
c.logger.Println("[ERR] core: unable to retrieve generic backend from router")
|
||||
c.logger.Error("core: unable to retrieve generic backend from router")
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return nil, auth, retErr
|
||||
}
|
||||
|
@ -213,9 +210,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
if registerLease {
|
||||
leaseID, err := c.expiration.Register(req, resp)
|
||||
if err != nil {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: failed to register lease "+
|
||||
"(request path: %s): %v", req.Path, err)
|
||||
c.logger.Error("core: failed to register lease", "request_path", req.Path, "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return nil, auth, retErr
|
||||
}
|
||||
|
@ -228,9 +223,7 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
// since it does not need to be re-registered
|
||||
if resp != nil && resp.Auth != nil && !strings.HasPrefix(req.Path, "auth/token/renew") {
|
||||
if !strings.HasPrefix(req.Path, "auth/token/") {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: unexpected Auth response for non-token backend "+
|
||||
"(request path: %s)", req.Path)
|
||||
c.logger.Error("core: unexpected Auth response for non-token backend", "request_path", req.Path)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return nil, auth, retErr
|
||||
}
|
||||
|
@ -239,14 +232,13 @@ func (c *Core) handleRequest(req *logical.Request) (retResp *logical.Response, r
|
|||
// here because roles allow suffixes.
|
||||
te, err := c.tokenStore.Lookup(resp.Auth.ClientToken)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to lookup token: %v", err)
|
||||
c.logger.Error("core: failed to look up token", "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return nil, nil, retErr
|
||||
}
|
||||
|
||||
if err := c.expiration.RegisterAuth(te.Path, resp.Auth); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to register token lease "+
|
||||
"(request path: %s): %v", req.Path, err)
|
||||
c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err)
|
||||
retErr = multierror.Append(retErr, ErrInternalError)
|
||||
return nil, auth, retErr
|
||||
}
|
||||
|
@ -266,17 +258,14 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
|
|||
|
||||
// Create an audit trail of the request, auth is not available on login requests
|
||||
if err := c.auditBroker.LogRequest(nil, req, nil); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to audit request with path %s: %v",
|
||||
req.Path, err)
|
||||
c.logger.Error("core: failed to audit request", "path", req.Path, "error", err)
|
||||
return nil, nil, ErrInternalError
|
||||
}
|
||||
|
||||
// The token store uses authentication even when creating a new token,
|
||||
// so it's handled in handleRequest. It should not be reached here.
|
||||
if strings.HasPrefix(req.Path, "auth/token/") {
|
||||
c.logger.Printf(
|
||||
"[ERR] core: unexpected login request for token backend "+
|
||||
"(request path: %s)", req.Path)
|
||||
c.logger.Error("core: unexpected login request for token backend", "request_path", req.Path)
|
||||
return nil, nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -295,8 +284,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
|
|||
|
||||
// A login request should never return a secret!
|
||||
if resp != nil && resp.Secret != nil {
|
||||
c.logger.Printf("[ERR] core: unexpected Secret response for login path"+
|
||||
"(request path: %s)", req.Path)
|
||||
c.logger.Error("core: unexpected Secret response for login path", "request_path", req.Path)
|
||||
return nil, nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -319,8 +307,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
|
|||
|
||||
sysView := c.router.MatchingSystemView(req.Path)
|
||||
if sysView == nil {
|
||||
c.logger.Printf("[ERR] core: unable to look up sys view for login path"+
|
||||
"(request path: %s)", req.Path)
|
||||
c.logger.Error("core: unable to look up sys view for login path", "request_path", req.Path)
|
||||
return nil, nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -347,7 +334,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
|
|||
te.Policies = policyutil.SanitizePolicies(te.Policies, true)
|
||||
|
||||
if err := c.tokenStore.create(&te); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to create token: %v", err)
|
||||
c.logger.Error("core: failed to create token", "error", err)
|
||||
return nil, auth, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -358,8 +345,7 @@ func (c *Core) handleLoginRequest(req *logical.Request) (*logical.Response, *log
|
|||
|
||||
// Register with the expiration manager
|
||||
if err := c.expiration.RegisterAuth(te.Path, auth); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to register token lease "+
|
||||
"(request path: %s): %v", req.Path, err)
|
||||
c.logger.Error("core: failed to register token lease", "request_path", req.Path, "error", err)
|
||||
return nil, auth, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -386,7 +372,7 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l
|
|||
}
|
||||
|
||||
if err := c.tokenStore.create(&te); err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to create wrapping token: %v", err)
|
||||
c.logger.Error("core: failed to create wrapping token", "error", err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -413,7 +399,7 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l
|
|||
|
||||
marshaledResponse, err := json.Marshal(httpResponse)
|
||||
if err != nil {
|
||||
c.logger.Printf("[ERR] core: failed to marshal wrapped response: %v", err)
|
||||
c.logger.Error("core: failed to marshal wrapped response", "error", err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -430,12 +416,12 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l
|
|||
if err != nil {
|
||||
// Revoke since it's not yet being tracked for expiration
|
||||
c.tokenStore.Revoke(te.ID)
|
||||
c.logger.Printf("[ERR] core: failed to store wrapped response information: %v", err)
|
||||
c.logger.Error("core: failed to store wrapped response information", "error", err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
if cubbyResp != nil && cubbyResp.IsError() {
|
||||
c.tokenStore.Revoke(te.ID)
|
||||
c.logger.Printf("[ERR] core: failed to store wrapped response information: %v", cubbyResp.Data["error"])
|
||||
c.logger.Error("core: failed to store wrapped response information", "error", cubbyResp.Data["error"])
|
||||
return cubbyResp, nil
|
||||
}
|
||||
|
||||
|
@ -452,8 +438,7 @@ func (c *Core) wrapInCubbyhole(req *logical.Request, resp *logical.Response) (*l
|
|||
if err := c.expiration.RegisterAuth(te.Path, auth); err != nil {
|
||||
// Revoke since it's not yet being tracked for expiration
|
||||
c.tokenStore.Revoke(te.ID)
|
||||
c.logger.Printf("[ERR] core: failed to register cubbyhole wrapping token lease "+
|
||||
"(request path: %s): %v", req.Path, err)
|
||||
c.logger.Error("core: failed to register cubbyhole wrapping token lease", "request_path", req.Path, "error", err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package vault
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
)
|
||||
|
@ -28,7 +29,7 @@ const (
|
|||
// on every mounted logical backend. It ensures that only one rollback operation
|
||||
// is in-flight at any given time within a single seal/unseal phase.
|
||||
type RollbackManager struct {
|
||||
logger *log.Logger
|
||||
logger log.Logger
|
||||
|
||||
// This gives the current mount table of both logical and credential backends,
|
||||
// plus a RWMutex that is locked for reading. It is up to the caller to RUnlock
|
||||
|
@ -55,7 +56,7 @@ type rollbackState struct {
|
|||
}
|
||||
|
||||
// NewRollbackManager is used to create a new rollback manager
|
||||
func NewRollbackManager(logger *log.Logger, backendsFunc func() []*MountEntry, router *Router) *RollbackManager {
|
||||
func NewRollbackManager(logger log.Logger, backendsFunc func() []*MountEntry, router *Router) *RollbackManager {
|
||||
r := &RollbackManager{
|
||||
logger: logger,
|
||||
backends: backendsFunc,
|
||||
|
@ -88,7 +89,7 @@ func (m *RollbackManager) Stop() {
|
|||
|
||||
// run is a long running routine to periodically invoke rollback
|
||||
func (m *RollbackManager) run() {
|
||||
m.logger.Printf("[INFO] rollback: starting rollback manager")
|
||||
m.logger.Info("rollback: starting rollback manager")
|
||||
tick := time.NewTicker(m.period)
|
||||
defer tick.Stop()
|
||||
defer close(m.doneCh)
|
||||
|
@ -98,7 +99,7 @@ func (m *RollbackManager) run() {
|
|||
m.triggerRollbacks()
|
||||
|
||||
case <-m.shutdownCh:
|
||||
m.logger.Printf("[INFO] rollback: stopping rollback manager")
|
||||
m.logger.Info("rollback: stopping rollback manager")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -136,7 +137,9 @@ func (m *RollbackManager) startRollback(path string) *rollbackState {
|
|||
// attemptRollback invokes a RollbackOperation for the given path
|
||||
func (m *RollbackManager) attemptRollback(path string, rs *rollbackState) (err error) {
|
||||
defer metrics.MeasureSince([]string{"rollback", "attempt", strings.Replace(path, "/", "-", -1)}, time.Now())
|
||||
m.logger.Printf("[DEBUG] rollback: attempting rollback on %s", path)
|
||||
if m.logger.IsDebug() {
|
||||
m.logger.Debug("rollback: attempting rollback", "path", path)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
rs.lastError = err
|
||||
|
@ -160,8 +163,7 @@ func (m *RollbackManager) attemptRollback(path string, rs *rollbackState) (err e
|
|||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
m.logger.Printf("[ERR] rollback: error rolling back %s: %s",
|
||||
path, err)
|
||||
m.logger.Error("rollback: error rolling back", "path", path, "error", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -1,13 +1,14 @@
|
|||
package vault
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
)
|
||||
|
||||
// mockRollback returns a mock rollback manager
|
||||
|
@ -33,7 +34,8 @@ func mockRollback(t *testing.T) (*RollbackManager, *NoopBackend) {
|
|||
return mounts.Entries
|
||||
}
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
rb := NewRollbackManager(logger, mountsFunc, router)
|
||||
rb.period = 10 * time.Millisecond
|
||||
return rb, backend
|
||||
|
|
|
@ -86,11 +86,11 @@ func (d *DefaultSeal) RecoveryKeySupported() bool {
|
|||
}
|
||||
|
||||
func (d *DefaultSeal) SetStoredKeys(keys [][]byte) error {
|
||||
return fmt.Errorf("[ERR] core: stored keys are not supported")
|
||||
return fmt.Errorf("core: stored keys are not supported")
|
||||
}
|
||||
|
||||
func (d *DefaultSeal) GetStoredKeys() ([][]byte, error) {
|
||||
return nil, fmt.Errorf("[ERR] core: stored keys are not supported")
|
||||
return nil, fmt.Errorf("core: stored keys are not supported")
|
||||
}
|
||||
|
||||
func (d *DefaultSeal) BarrierConfig() (*SealConfig, error) {
|
||||
|
@ -105,13 +105,13 @@ func (d *DefaultSeal) BarrierConfig() (*SealConfig, error) {
|
|||
// Fetch the core configuration
|
||||
pe, err := d.core.physical.Get(barrierSealConfigPath)
|
||||
if err != nil {
|
||||
d.core.logger.Printf("[ERR] core: failed to read seal configuration: %v", err)
|
||||
d.core.logger.Error("core: failed to read seal configuration", "error", err)
|
||||
return nil, fmt.Errorf("failed to check seal configuration: %v", err)
|
||||
}
|
||||
|
||||
// If the seal configuration is missing, we are not initialized
|
||||
if pe == nil {
|
||||
d.core.logger.Printf("[INFO] core: seal configuration missing, not initialized")
|
||||
d.core.logger.Info("core: seal configuration missing, not initialized")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -119,7 +119,7 @@ func (d *DefaultSeal) BarrierConfig() (*SealConfig, error) {
|
|||
|
||||
// Decode the barrier entry
|
||||
if err := jsonutil.DecodeJSON(pe.Value, &conf); err != nil {
|
||||
d.core.logger.Printf("[ERR] core: failed to decode seal configuration: %v", err)
|
||||
d.core.logger.Error("core: failed to decode seal configuration", "error", err)
|
||||
return nil, fmt.Errorf("failed to decode seal configuration: %v", err)
|
||||
}
|
||||
|
||||
|
@ -129,13 +129,13 @@ func (d *DefaultSeal) BarrierConfig() (*SealConfig, error) {
|
|||
conf.Type = d.BarrierType()
|
||||
case d.BarrierType():
|
||||
default:
|
||||
d.core.logger.Printf("[ERR] core: barrier seal type of %s does not match loaded type of %s", conf.Type, d.BarrierType())
|
||||
d.core.logger.Error("core: barrier seal type does not match loaded type", "barrier_seal_type", conf.Type, "loaded_seal_type", d.BarrierType())
|
||||
return nil, fmt.Errorf("barrier seal type of %s does not match loaded type of %s", conf.Type, d.BarrierType())
|
||||
}
|
||||
|
||||
// Check for a valid seal configuration
|
||||
if err := conf.Validate(); err != nil {
|
||||
d.core.logger.Printf("[ERR] core: invalid seal configuration: %v", err)
|
||||
d.core.logger.Error("core: invalid seal configuration", "error", err)
|
||||
return nil, fmt.Errorf("seal validation failed: %v", err)
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ func (d *DefaultSeal) SetBarrierConfig(config *SealConfig) error {
|
|||
}
|
||||
|
||||
if err := d.core.physical.Put(pe); err != nil {
|
||||
d.core.logger.Printf("[ERR] core: failed to write seal configuration: %v", err)
|
||||
d.core.logger.Error("core: failed to write seal configuration", "error", err)
|
||||
return fmt.Errorf("failed to write seal configuration: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,18 +8,19 @@ import (
|
|||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
log "github.com/mgutz/logxi/v1"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/vault/audit"
|
||||
"github.com/hashicorp/vault/helper/logformat"
|
||||
"github.com/hashicorp/vault/helper/salt"
|
||||
"github.com/hashicorp/vault/logical"
|
||||
"github.com/hashicorp/vault/logical/framework"
|
||||
|
@ -110,7 +111,8 @@ func TestCoreWithSeal(t *testing.T, testSeal Seal) *Core {
|
|||
logicalBackends[backendName] = backendFactory
|
||||
}
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
physicalBackend := physical.NewInmem(logger)
|
||||
conf := &CoreConfig{
|
||||
Physical: physicalBackend,
|
||||
|
@ -203,7 +205,7 @@ func TestCoreWithTokenStore(t *testing.T) (*Core, *TokenStore, []byte, string) {
|
|||
router.Mount(ts, "auth/token/", &MountEntry{Table: credentialTableType, UUID: ""}, ts.view)
|
||||
|
||||
subview := c.systemBarrierView.SubView(expirationSubPath)
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
exp := NewExpirationManager(router, subview, ts, logger)
|
||||
ts.SetExpirationManager(exp)
|
||||
|
@ -501,7 +503,7 @@ func TestCluster(t *testing.T, handlers []http.Handler, base *CoreConfig, unseal
|
|||
t.Fatal("no verified chains for chains auth")
|
||||
}
|
||||
|
||||
logger := log.New(os.Stderr, "", log.LstdFlags)
|
||||
logger := logformat.NewVaultLogger(log.LevelTrace)
|
||||
|
||||
//
|
||||
// Listener setup
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,43 @@
|
|||
# go-colorable
|
||||
|
||||
Colorable writer for windows.
|
||||
|
||||
For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
|
||||
This package is possible to handle escape sequence for ansi color on windows.
|
||||
|
||||
## Too Bad!
|
||||
|
||||
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
|
||||
|
||||
|
||||
## So Good!
|
||||
|
||||
![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
|
||||
logrus.SetOutput(colorable.NewColorableStdout())
|
||||
|
||||
logrus.Info("succeeded")
|
||||
logrus.Warn("not correct")
|
||||
logrus.Error("something error")
|
||||
logrus.Fatal("panic")
|
||||
```
|
||||
|
||||
You can compile above code on non-windows OSs.
|
||||
|
||||
## Installation
|
||||
|
||||
```
|
||||
$ go get github.com/mattn/go-colorable
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
MIT
|
||||
|
||||
# Author
|
||||
|
||||
Yasuhiro Matsumoto (a.k.a mattn)
|
|
@ -0,0 +1,24 @@
|
|||
// +build !windows
|
||||
|
||||
package colorable
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func NewColorableStdout() io.Writer {
|
||||
return os.Stdout
|
||||
}
|
||||
|
||||
func NewColorableStderr() io.Writer {
|
||||
return os.Stderr
|
||||
}
|
|
@ -0,0 +1,809 @@
|
|||
package colorable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
const (
|
||||
foregroundBlue = 0x1
|
||||
foregroundGreen = 0x2
|
||||
foregroundRed = 0x4
|
||||
foregroundIntensity = 0x8
|
||||
foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
|
||||
backgroundBlue = 0x10
|
||||
backgroundGreen = 0x20
|
||||
backgroundRed = 0x40
|
||||
backgroundIntensity = 0x80
|
||||
backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
|
||||
)
|
||||
|
||||
type wchar uint16
|
||||
type short int16
|
||||
type dword uint32
|
||||
type word uint16
|
||||
|
||||
type coord struct {
|
||||
x short
|
||||
y short
|
||||
}
|
||||
|
||||
type smallRect struct {
|
||||
left short
|
||||
top short
|
||||
right short
|
||||
bottom short
|
||||
}
|
||||
|
||||
type consoleScreenBufferInfo struct {
|
||||
size coord
|
||||
cursorPosition coord
|
||||
attributes word
|
||||
window smallRect
|
||||
maximumWindowSize coord
|
||||
}
|
||||
|
||||
type consoleCursorInfo struct {
|
||||
size dword
|
||||
visible int32
|
||||
}
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
||||
procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition")
|
||||
procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
|
||||
procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
|
||||
procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo")
|
||||
procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo")
|
||||
)
|
||||
|
||||
type Writer struct {
|
||||
out io.Writer
|
||||
handle syscall.Handle
|
||||
lastbuf bytes.Buffer
|
||||
oldattr word
|
||||
}
|
||||
|
||||
func NewColorable(file *os.File) io.Writer {
|
||||
if file == nil {
|
||||
panic("nil passed instead of *os.File to NewColorable()")
|
||||
}
|
||||
|
||||
if isatty.IsTerminal(file.Fd()) {
|
||||
var csbi consoleScreenBufferInfo
|
||||
handle := syscall.Handle(file.Fd())
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
return &Writer{out: file, handle: handle, oldattr: csbi.attributes}
|
||||
} else {
|
||||
return file
|
||||
}
|
||||
}
|
||||
|
||||
func NewColorableStdout() io.Writer {
|
||||
return NewColorable(os.Stdout)
|
||||
}
|
||||
|
||||
func NewColorableStderr() io.Writer {
|
||||
return NewColorable(os.Stderr)
|
||||
}
|
||||
|
||||
var color256 = map[int]int{
|
||||
0: 0x000000,
|
||||
1: 0x800000,
|
||||
2: 0x008000,
|
||||
3: 0x808000,
|
||||
4: 0x000080,
|
||||
5: 0x800080,
|
||||
6: 0x008080,
|
||||
7: 0xc0c0c0,
|
||||
8: 0x808080,
|
||||
9: 0xff0000,
|
||||
10: 0x00ff00,
|
||||
11: 0xffff00,
|
||||
12: 0x0000ff,
|
||||
13: 0xff00ff,
|
||||
14: 0x00ffff,
|
||||
15: 0xffffff,
|
||||
16: 0x000000,
|
||||
17: 0x00005f,
|
||||
18: 0x000087,
|
||||
19: 0x0000af,
|
||||
20: 0x0000d7,
|
||||
21: 0x0000ff,
|
||||
22: 0x005f00,
|
||||
23: 0x005f5f,
|
||||
24: 0x005f87,
|
||||
25: 0x005faf,
|
||||
26: 0x005fd7,
|
||||
27: 0x005fff,
|
||||
28: 0x008700,
|
||||
29: 0x00875f,
|
||||
30: 0x008787,
|
||||
31: 0x0087af,
|
||||
32: 0x0087d7,
|
||||
33: 0x0087ff,
|
||||
34: 0x00af00,
|
||||
35: 0x00af5f,
|
||||
36: 0x00af87,
|
||||
37: 0x00afaf,
|
||||
38: 0x00afd7,
|
||||
39: 0x00afff,
|
||||
40: 0x00d700,
|
||||
41: 0x00d75f,
|
||||
42: 0x00d787,
|
||||
43: 0x00d7af,
|
||||
44: 0x00d7d7,
|
||||
45: 0x00d7ff,
|
||||
46: 0x00ff00,
|
||||
47: 0x00ff5f,
|
||||
48: 0x00ff87,
|
||||
49: 0x00ffaf,
|
||||
50: 0x00ffd7,
|
||||
51: 0x00ffff,
|
||||
52: 0x5f0000,
|
||||
53: 0x5f005f,
|
||||
54: 0x5f0087,
|
||||
55: 0x5f00af,
|
||||
56: 0x5f00d7,
|
||||
57: 0x5f00ff,
|
||||
58: 0x5f5f00,
|
||||
59: 0x5f5f5f,
|
||||
60: 0x5f5f87,
|
||||
61: 0x5f5faf,
|
||||
62: 0x5f5fd7,
|
||||
63: 0x5f5fff,
|
||||
64: 0x5f8700,
|
||||
65: 0x5f875f,
|
||||
66: 0x5f8787,
|
||||
67: 0x5f87af,
|
||||
68: 0x5f87d7,
|
||||
69: 0x5f87ff,
|
||||
70: 0x5faf00,
|
||||
71: 0x5faf5f,
|
||||
72: 0x5faf87,
|
||||
73: 0x5fafaf,
|
||||
74: 0x5fafd7,
|
||||
75: 0x5fafff,
|
||||
76: 0x5fd700,
|
||||
77: 0x5fd75f,
|
||||
78: 0x5fd787,
|
||||
79: 0x5fd7af,
|
||||
80: 0x5fd7d7,
|
||||
81: 0x5fd7ff,
|
||||
82: 0x5fff00,
|
||||
83: 0x5fff5f,
|
||||
84: 0x5fff87,
|
||||
85: 0x5fffaf,
|
||||
86: 0x5fffd7,
|
||||
87: 0x5fffff,
|
||||
88: 0x870000,
|
||||
89: 0x87005f,
|
||||
90: 0x870087,
|
||||
91: 0x8700af,
|
||||
92: 0x8700d7,
|
||||
93: 0x8700ff,
|
||||
94: 0x875f00,
|
||||
95: 0x875f5f,
|
||||
96: 0x875f87,
|
||||
97: 0x875faf,
|
||||
98: 0x875fd7,
|
||||
99: 0x875fff,
|
||||
100: 0x878700,
|
||||
101: 0x87875f,
|
||||
102: 0x878787,
|
||||
103: 0x8787af,
|
||||
104: 0x8787d7,
|
||||
105: 0x8787ff,
|
||||
106: 0x87af00,
|
||||
107: 0x87af5f,
|
||||
108: 0x87af87,
|
||||
109: 0x87afaf,
|
||||
110: 0x87afd7,
|
||||
111: 0x87afff,
|
||||
112: 0x87d700,
|
||||
113: 0x87d75f,
|
||||
114: 0x87d787,
|
||||
115: 0x87d7af,
|
||||
116: 0x87d7d7,
|
||||
117: 0x87d7ff,
|
||||
118: 0x87ff00,
|
||||
119: 0x87ff5f,
|
||||
120: 0x87ff87,
|
||||
121: 0x87ffaf,
|
||||
122: 0x87ffd7,
|
||||
123: 0x87ffff,
|
||||
124: 0xaf0000,
|
||||
125: 0xaf005f,
|
||||
126: 0xaf0087,
|
||||
127: 0xaf00af,
|
||||
128: 0xaf00d7,
|
||||
129: 0xaf00ff,
|
||||
130: 0xaf5f00,
|
||||
131: 0xaf5f5f,
|
||||
132: 0xaf5f87,
|
||||
133: 0xaf5faf,
|
||||
134: 0xaf5fd7,
|
||||
135: 0xaf5fff,
|
||||
136: 0xaf8700,
|
||||
137: 0xaf875f,
|
||||
138: 0xaf8787,
|
||||
139: 0xaf87af,
|
||||
140: 0xaf87d7,
|
||||
141: 0xaf87ff,
|
||||
142: 0xafaf00,
|
||||
143: 0xafaf5f,
|
||||
144: 0xafaf87,
|
||||
145: 0xafafaf,
|
||||
146: 0xafafd7,
|
||||
147: 0xafafff,
|
||||
148: 0xafd700,
|
||||
149: 0xafd75f,
|
||||
150: 0xafd787,
|
||||
151: 0xafd7af,
|
||||
152: 0xafd7d7,
|
||||
153: 0xafd7ff,
|
||||
154: 0xafff00,
|
||||
155: 0xafff5f,
|
||||
156: 0xafff87,
|
||||
157: 0xafffaf,
|
||||
158: 0xafffd7,
|
||||
159: 0xafffff,
|
||||
160: 0xd70000,
|
||||
161: 0xd7005f,
|
||||
162: 0xd70087,
|
||||
163: 0xd700af,
|
||||
164: 0xd700d7,
|
||||
165: 0xd700ff,
|
||||
166: 0xd75f00,
|
||||
167: 0xd75f5f,
|
||||
168: 0xd75f87,
|
||||
169: 0xd75faf,
|
||||
170: 0xd75fd7,
|
||||
171: 0xd75fff,
|
||||
172: 0xd78700,
|
||||
173: 0xd7875f,
|
||||
174: 0xd78787,
|
||||
175: 0xd787af,
|
||||
176: 0xd787d7,
|
||||
177: 0xd787ff,
|
||||
178: 0xd7af00,
|
||||
179: 0xd7af5f,
|
||||
180: 0xd7af87,
|
||||
181: 0xd7afaf,
|
||||
182: 0xd7afd7,
|
||||
183: 0xd7afff,
|
||||
184: 0xd7d700,
|
||||
185: 0xd7d75f,
|
||||
186: 0xd7d787,
|
||||
187: 0xd7d7af,
|
||||
188: 0xd7d7d7,
|
||||
189: 0xd7d7ff,
|
||||
190: 0xd7ff00,
|
||||
191: 0xd7ff5f,
|
||||
192: 0xd7ff87,
|
||||
193: 0xd7ffaf,
|
||||
194: 0xd7ffd7,
|
||||
195: 0xd7ffff,
|
||||
196: 0xff0000,
|
||||
197: 0xff005f,
|
||||
198: 0xff0087,
|
||||
199: 0xff00af,
|
||||
200: 0xff00d7,
|
||||
201: 0xff00ff,
|
||||
202: 0xff5f00,
|
||||
203: 0xff5f5f,
|
||||
204: 0xff5f87,
|
||||
205: 0xff5faf,
|
||||
206: 0xff5fd7,
|
||||
207: 0xff5fff,
|
||||
208: 0xff8700,
|
||||
209: 0xff875f,
|
||||
210: 0xff8787,
|
||||
211: 0xff87af,
|
||||
212: 0xff87d7,
|
||||
213: 0xff87ff,
|
||||
214: 0xffaf00,
|
||||
215: 0xffaf5f,
|
||||
216: 0xffaf87,
|
||||
217: 0xffafaf,
|
||||
218: 0xffafd7,
|
||||
219: 0xffafff,
|
||||
220: 0xffd700,
|
||||
221: 0xffd75f,
|
||||
222: 0xffd787,
|
||||
223: 0xffd7af,
|
||||
224: 0xffd7d7,
|
||||
225: 0xffd7ff,
|
||||
226: 0xffff00,
|
||||
227: 0xffff5f,
|
||||
228: 0xffff87,
|
||||
229: 0xffffaf,
|
||||
230: 0xffffd7,
|
||||
231: 0xffffff,
|
||||
232: 0x080808,
|
||||
233: 0x121212,
|
||||
234: 0x1c1c1c,
|
||||
235: 0x262626,
|
||||
236: 0x303030,
|
||||
237: 0x3a3a3a,
|
||||
238: 0x444444,
|
||||
239: 0x4e4e4e,
|
||||
240: 0x585858,
|
||||
241: 0x626262,
|
||||
242: 0x6c6c6c,
|
||||
243: 0x767676,
|
||||
244: 0x808080,
|
||||
245: 0x8a8a8a,
|
||||
246: 0x949494,
|
||||
247: 0x9e9e9e,
|
||||
248: 0xa8a8a8,
|
||||
249: 0xb2b2b2,
|
||||
250: 0xbcbcbc,
|
||||
251: 0xc6c6c6,
|
||||
252: 0xd0d0d0,
|
||||
253: 0xdadada,
|
||||
254: 0xe4e4e4,
|
||||
255: 0xeeeeee,
|
||||
}
|
||||
|
||||
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
var csbi consoleScreenBufferInfo
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
|
||||
er := bytes.NewBuffer(data)
|
||||
loop:
|
||||
for {
|
||||
r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
if r1 == 0 {
|
||||
break loop
|
||||
}
|
||||
|
||||
c1, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c1 != 0x1b {
|
||||
fmt.Fprint(w.out, string(c1))
|
||||
continue
|
||||
}
|
||||
c2, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
break loop
|
||||
}
|
||||
if c2 != 0x5b {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
var m rune
|
||||
for {
|
||||
c, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
w.lastbuf.Write(buf.Bytes())
|
||||
break loop
|
||||
}
|
||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||
m = c
|
||||
break
|
||||
}
|
||||
buf.Write([]byte(string(c)))
|
||||
}
|
||||
|
||||
var csbi consoleScreenBufferInfo
|
||||
switch m {
|
||||
case 'A':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.y -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'B':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'C':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'D':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if n, err = strconv.Atoi(buf.String()); err == nil {
|
||||
var csbi consoleScreenBufferInfo
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
}
|
||||
case 'E':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = 0
|
||||
csbi.cursorPosition.y += short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'F':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = 0
|
||||
csbi.cursorPosition.y -= short(n)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'G':
|
||||
n, err = strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
csbi.cursorPosition.x = short(n-1)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'H':
|
||||
token := strings.Split(buf.String(), ";")
|
||||
if len(token) != 2 {
|
||||
continue
|
||||
}
|
||||
n1, err := strconv.Atoi(token[0])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
n2, err := strconv.Atoi(token[1])
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
csbi.cursorPosition.x = short(n2-1)
|
||||
csbi.cursorPosition.y = short(n1-1)
|
||||
procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
|
||||
case 'J':
|
||||
n, err := strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
var cursor coord
|
||||
switch n {
|
||||
case 0:
|
||||
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||
case 1:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||
case 2:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top}
|
||||
}
|
||||
var count, written dword
|
||||
count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
|
||||
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
case 'K':
|
||||
n, err := strconv.Atoi(buf.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
var cursor coord
|
||||
switch n {
|
||||
case 0:
|
||||
cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
|
||||
case 1:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||
case 2:
|
||||
cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
|
||||
}
|
||||
var count, written dword
|
||||
count = dword(csbi.size.x - csbi.cursorPosition.x)
|
||||
procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
|
||||
case 'm':
|
||||
procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
|
||||
attr := csbi.attributes
|
||||
cs := buf.String()
|
||||
if cs == "" {
|
||||
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
|
||||
continue
|
||||
}
|
||||
token := strings.Split(cs, ";")
|
||||
for i := 0; i < len(token); i++ {
|
||||
ns := token[i]
|
||||
if n, err = strconv.Atoi(ns); err == nil {
|
||||
switch {
|
||||
case n == 0 || n == 100:
|
||||
attr = w.oldattr
|
||||
case 1 <= n && n <= 5:
|
||||
attr |= foregroundIntensity
|
||||
case n == 7:
|
||||
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||
case 22 == n || n == 25 || n == 25:
|
||||
attr |= foregroundIntensity
|
||||
case n == 27:
|
||||
attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
|
||||
case 30 <= n && n <= 37:
|
||||
attr &= backgroundMask
|
||||
if (n-30)&1 != 0 {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if (n-30)&2 != 0 {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if (n-30)&4 != 0 {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
case n == 38: // set foreground color.
|
||||
if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
|
||||
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||
if n256foreAttr == nil {
|
||||
n256setup()
|
||||
}
|
||||
attr &= backgroundMask
|
||||
attr |= n256foreAttr[n256]
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
attr = attr & (w.oldattr & backgroundMask)
|
||||
}
|
||||
case n == 39: // reset foreground color.
|
||||
attr &= backgroundMask
|
||||
attr |= w.oldattr & foregroundMask
|
||||
case 40 <= n && n <= 47:
|
||||
attr &= foregroundMask
|
||||
if (n-40)&1 != 0 {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if (n-40)&2 != 0 {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if (n-40)&4 != 0 {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
case n == 48: // set background color.
|
||||
if i < len(token)-2 && token[i+1] == "5" {
|
||||
if n256, err := strconv.Atoi(token[i+2]); err == nil {
|
||||
if n256backAttr == nil {
|
||||
n256setup()
|
||||
}
|
||||
attr &= foregroundMask
|
||||
attr |= n256backAttr[n256]
|
||||
i += 2
|
||||
}
|
||||
} else {
|
||||
attr = attr & (w.oldattr & foregroundMask)
|
||||
}
|
||||
case n == 49: // reset foreground color.
|
||||
attr &= foregroundMask
|
||||
attr |= w.oldattr & backgroundMask
|
||||
case 90 <= n && n <= 97:
|
||||
attr = (attr & backgroundMask)
|
||||
attr |= foregroundIntensity
|
||||
if (n-90)&1 != 0 {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if (n-90)&2 != 0 {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if (n-90)&4 != 0 {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
case 100 <= n && n <= 107:
|
||||
attr = (attr & foregroundMask)
|
||||
attr |= backgroundIntensity
|
||||
if (n-100)&1 != 0 {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if (n-100)&2 != 0 {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if (n-100)&4 != 0 {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
}
|
||||
procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
|
||||
}
|
||||
}
|
||||
case 'h':
|
||||
cs := buf.String()
|
||||
if cs == "?25" {
|
||||
var ci consoleCursorInfo
|
||||
procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
|
||||
ci.visible = 1
|
||||
procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
|
||||
}
|
||||
case 'l':
|
||||
cs := buf.String()
|
||||
if cs == "?25" {
|
||||
var ci consoleCursorInfo
|
||||
procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
|
||||
ci.visible = 0
|
||||
procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(data) - w.lastbuf.Len(), nil
|
||||
}
|
||||
|
||||
type consoleColor struct {
|
||||
rgb int
|
||||
red bool
|
||||
green bool
|
||||
blue bool
|
||||
intensity bool
|
||||
}
|
||||
|
||||
func (c consoleColor) foregroundAttr() (attr word) {
|
||||
if c.red {
|
||||
attr |= foregroundRed
|
||||
}
|
||||
if c.green {
|
||||
attr |= foregroundGreen
|
||||
}
|
||||
if c.blue {
|
||||
attr |= foregroundBlue
|
||||
}
|
||||
if c.intensity {
|
||||
attr |= foregroundIntensity
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c consoleColor) backgroundAttr() (attr word) {
|
||||
if c.red {
|
||||
attr |= backgroundRed
|
||||
}
|
||||
if c.green {
|
||||
attr |= backgroundGreen
|
||||
}
|
||||
if c.blue {
|
||||
attr |= backgroundBlue
|
||||
}
|
||||
if c.intensity {
|
||||
attr |= backgroundIntensity
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var color16 = []consoleColor{
|
||||
consoleColor{0x000000, false, false, false, false},
|
||||
consoleColor{0x000080, false, false, true, false},
|
||||
consoleColor{0x008000, false, true, false, false},
|
||||
consoleColor{0x008080, false, true, true, false},
|
||||
consoleColor{0x800000, true, false, false, false},
|
||||
consoleColor{0x800080, true, false, true, false},
|
||||
consoleColor{0x808000, true, true, false, false},
|
||||
consoleColor{0xc0c0c0, true, true, true, false},
|
||||
consoleColor{0x808080, false, false, false, true},
|
||||
consoleColor{0x0000ff, false, false, true, true},
|
||||
consoleColor{0x00ff00, false, true, false, true},
|
||||
consoleColor{0x00ffff, false, true, true, true},
|
||||
consoleColor{0xff0000, true, false, false, true},
|
||||
consoleColor{0xff00ff, true, false, true, true},
|
||||
consoleColor{0xffff00, true, true, false, true},
|
||||
consoleColor{0xffffff, true, true, true, true},
|
||||
}
|
||||
|
||||
type hsv struct {
|
||||
h, s, v float32
|
||||
}
|
||||
|
||||
func (a hsv) dist(b hsv) float32 {
|
||||
dh := a.h - b.h
|
||||
switch {
|
||||
case dh > 0.5:
|
||||
dh = 1 - dh
|
||||
case dh < -0.5:
|
||||
dh = -1 - dh
|
||||
}
|
||||
ds := a.s - b.s
|
||||
dv := a.v - b.v
|
||||
return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
|
||||
}
|
||||
|
||||
func toHSV(rgb int) hsv {
|
||||
r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
|
||||
float32((rgb&0x00FF00)>>8)/256.0,
|
||||
float32(rgb&0x0000FF)/256.0
|
||||
min, max := minmax3f(r, g, b)
|
||||
h := max - min
|
||||
if h > 0 {
|
||||
if max == r {
|
||||
h = (g - b) / h
|
||||
if h < 0 {
|
||||
h += 6
|
||||
}
|
||||
} else if max == g {
|
||||
h = 2 + (b-r)/h
|
||||
} else {
|
||||
h = 4 + (r-g)/h
|
||||
}
|
||||
}
|
||||
h /= 6.0
|
||||
s := max - min
|
||||
if max != 0 {
|
||||
s /= max
|
||||
}
|
||||
v := max
|
||||
return hsv{h: h, s: s, v: v}
|
||||
}
|
||||
|
||||
type hsvTable []hsv
|
||||
|
||||
func toHSVTable(rgbTable []consoleColor) hsvTable {
|
||||
t := make(hsvTable, len(rgbTable))
|
||||
for i, c := range rgbTable {
|
||||
t[i] = toHSV(c.rgb)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (t hsvTable) find(rgb int) consoleColor {
|
||||
hsv := toHSV(rgb)
|
||||
n := 7
|
||||
l := float32(5.0)
|
||||
for i, p := range t {
|
||||
d := hsv.dist(p)
|
||||
if d < l {
|
||||
l, n = d, i
|
||||
}
|
||||
}
|
||||
return color16[n]
|
||||
}
|
||||
|
||||
func minmax3f(a, b, c float32) (min, max float32) {
|
||||
if a < b {
|
||||
if b < c {
|
||||
return a, c
|
||||
} else if a < c {
|
||||
return a, b
|
||||
} else {
|
||||
return c, b
|
||||
}
|
||||
} else {
|
||||
if a < c {
|
||||
return b, c
|
||||
} else if b < c {
|
||||
return b, a
|
||||
} else {
|
||||
return c, a
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var n256foreAttr []word
|
||||
var n256backAttr []word
|
||||
|
||||
func n256setup() {
|
||||
n256foreAttr = make([]word, 256)
|
||||
n256backAttr = make([]word, 256)
|
||||
t := toHSVTable(color16)
|
||||
for i, rgb := range color256 {
|
||||
c := t.find(rgb)
|
||||
n256foreAttr[i] = c.foregroundAttr()
|
||||
n256backAttr[i] = c.backgroundAttr()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package colorable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type NonColorable struct {
|
||||
out io.Writer
|
||||
lastbuf bytes.Buffer
|
||||
}
|
||||
|
||||
func NewNonColorable(w io.Writer) io.Writer {
|
||||
return &NonColorable{out: w}
|
||||
}
|
||||
|
||||
func (w *NonColorable) Write(data []byte) (n int, err error) {
|
||||
er := bytes.NewBuffer(data)
|
||||
loop:
|
||||
for {
|
||||
c1, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
break loop
|
||||
}
|
||||
if c1 != 0x1b {
|
||||
fmt.Fprint(w.out, string(c1))
|
||||
continue
|
||||
}
|
||||
c2, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
break loop
|
||||
}
|
||||
if c2 != 0x5b {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
continue
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
for {
|
||||
c, _, err := er.ReadRune()
|
||||
if err != nil {
|
||||
w.lastbuf.WriteRune(c1)
|
||||
w.lastbuf.WriteRune(c2)
|
||||
w.lastbuf.Write(buf.Bytes())
|
||||
break loop
|
||||
}
|
||||
if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
|
||||
break
|
||||
}
|
||||
buf.Write([]byte(string(c)))
|
||||
}
|
||||
}
|
||||
return len(data) - w.lastbuf.Len(), nil
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
The MIT License (MIT)
|
||||
Copyright (c) 2013 Mario L. Gutierrez
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
|
@ -0,0 +1,119 @@
|
|||
# ansi
|
||||
|
||||
Package ansi is a small, fast library to create ANSI colored strings and codes.
|
||||
|
||||
## Install
|
||||
|
||||
This install the color viewer and the package itself
|
||||
|
||||
```sh
|
||||
go get -u github.com/mgutz/ansi/cmd/ansi-mgutz
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
import "github.com/mgutz/ansi"
|
||||
|
||||
// colorize a string, SLOW
|
||||
msg := ansi.Color("foo", "red+b:white")
|
||||
|
||||
// create a closure to avoid recalculating ANSI code compilation
|
||||
phosphorize := ansi.ColorFunc("green+h:black")
|
||||
msg = phosphorize("Bring back the 80s!")
|
||||
msg2 := phospohorize("Look, I'm a CRT!")
|
||||
|
||||
// cache escape codes and build strings manually
|
||||
lime := ansi.ColorCode("green+h:black")
|
||||
reset := ansi.ColorCode("reset")
|
||||
|
||||
fmt.Println(lime, "Bring back the 80s!", reset)
|
||||
```
|
||||
|
||||
Other examples
|
||||
|
||||
```go
|
||||
Color(s, "red") // red
|
||||
Color(s, "red+b") // red bold
|
||||
Color(s, "red+B") // red blinking
|
||||
Color(s, "red+u") // red underline
|
||||
Color(s, "red+bh") // red bold bright
|
||||
Color(s, "red:white") // red on white
|
||||
Color(s, "red+b:white+h") // red bold on white bright
|
||||
Color(s, "red+B:white+h") // red blink on white bright
|
||||
Color(s, "off") // turn off ansi codes
|
||||
```
|
||||
|
||||
To view color combinations, from terminal.
|
||||
|
||||
```sh
|
||||
ansi-mgutz
|
||||
```
|
||||
|
||||
## Style format
|
||||
|
||||
```go
|
||||
"foregroundColor+attributes:backgroundColor+attributes"
|
||||
```
|
||||
|
||||
Colors
|
||||
|
||||
* black
|
||||
* red
|
||||
* green
|
||||
* yellow
|
||||
* blue
|
||||
* magenta
|
||||
* cyan
|
||||
* white
|
||||
* 0...255 (256 colors)
|
||||
|
||||
Attributes
|
||||
|
||||
* b = bold foreground
|
||||
* B = Blink foreground
|
||||
* u = underline foreground
|
||||
* i = inverse
|
||||
* h = high intensity (bright) foreground, background
|
||||
|
||||
does not work with 256 colors
|
||||
|
||||
## Constants
|
||||
|
||||
* ansi.Reset
|
||||
* ansi.DefaultBG
|
||||
* ansi.DefaultFG
|
||||
* ansi.Black
|
||||
* ansi.Red
|
||||
* ansi.Green
|
||||
* ansi.Yellow
|
||||
* ansi.Blue
|
||||
* ansi.Magenta
|
||||
* ansi.Cyan
|
||||
* ansi.White
|
||||
* ansi.LightBlack
|
||||
* ansi.LightRed
|
||||
* ansi.LightGreen
|
||||
* ansi.LightYellow
|
||||
* ansi.LightBlue
|
||||
* ansi.LightMagenta
|
||||
* ansi.LightCyan
|
||||
* ansi.LightWhite
|
||||
|
||||
|
||||
## References
|
||||
|
||||
Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors)
|
||||
|
||||
General [tips and formatting](http://misc.flogisoft.com/bash/tip_colors_and_formatting)
|
||||
|
||||
What about support on Windows? Use [colorable by mattn](https://github.com/mattn/go-colorable).
|
||||
Ansi and colorable are used by [logxi](https://github.com/mgutz/logxi) to support logging in
|
||||
color on Windows.
|
||||
|
||||
## MIT License
|
||||
|
||||
Copyright (c) 2013 Mario Gutierrez mario@mgutz.com
|
||||
|
||||
See the file LICENSE for copying permission.
|
||||
|
|
@ -0,0 +1,246 @@
|
|||
package ansi
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
black = iota
|
||||
red
|
||||
green
|
||||
yellow
|
||||
blue
|
||||
magenta
|
||||
cyan
|
||||
white
|
||||
defaultt = 9
|
||||
|
||||
normalIntensityFG = 30
|
||||
highIntensityFG = 90
|
||||
normalIntensityBG = 40
|
||||
highIntensityBG = 100
|
||||
|
||||
start = "\033["
|
||||
bold = "1;"
|
||||
blink = "5;"
|
||||
underline = "4;"
|
||||
inverse = "7;"
|
||||
|
||||
// Reset is the ANSI reset escape sequence
|
||||
Reset = "\033[0m"
|
||||
// DefaultBG is the default background
|
||||
DefaultBG = "\033[49m"
|
||||
// DefaultFG is the default foreground
|
||||
DefaultFG = "\033[39m"
|
||||
)
|
||||
|
||||
// Black FG
|
||||
var Black string
|
||||
|
||||
// Red FG
|
||||
var Red string
|
||||
|
||||
// Green FG
|
||||
var Green string
|
||||
|
||||
// Yellow FG
|
||||
var Yellow string
|
||||
|
||||
// Blue FG
|
||||
var Blue string
|
||||
|
||||
// Magenta FG
|
||||
var Magenta string
|
||||
|
||||
// Cyan FG
|
||||
var Cyan string
|
||||
|
||||
// White FG
|
||||
var White string
|
||||
|
||||
// LightBlack FG
|
||||
var LightBlack string
|
||||
|
||||
// LightRed FG
|
||||
var LightRed string
|
||||
|
||||
// LightGreen FG
|
||||
var LightGreen string
|
||||
|
||||
// LightYellow FG
|
||||
var LightYellow string
|
||||
|
||||
// LightBlue FG
|
||||
var LightBlue string
|
||||
|
||||
// LightMagenta FG
|
||||
var LightMagenta string
|
||||
|
||||
// LightCyan FG
|
||||
var LightCyan string
|
||||
|
||||
// LightWhite FG
|
||||
var LightWhite string
|
||||
|
||||
var (
|
||||
plain = false
|
||||
// Colors maps common color names to their ANSI color code.
|
||||
Colors = map[string]int{
|
||||
"black": black,
|
||||
"red": red,
|
||||
"green": green,
|
||||
"yellow": yellow,
|
||||
"blue": blue,
|
||||
"magenta": magenta,
|
||||
"cyan": cyan,
|
||||
"white": white,
|
||||
"default": defaultt,
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
for i := 0; i < 256; i++ {
|
||||
Colors[strconv.Itoa(i)] = i
|
||||
}
|
||||
|
||||
Black = ColorCode("black")
|
||||
Red = ColorCode("red")
|
||||
Green = ColorCode("green")
|
||||
Yellow = ColorCode("yellow")
|
||||
Blue = ColorCode("blue")
|
||||
Magenta = ColorCode("magenta")
|
||||
Cyan = ColorCode("cyan")
|
||||
White = ColorCode("white")
|
||||
LightBlack = ColorCode("black+h")
|
||||
LightRed = ColorCode("red+h")
|
||||
LightGreen = ColorCode("green+h")
|
||||
LightYellow = ColorCode("yellow+h")
|
||||
LightBlue = ColorCode("blue+h")
|
||||
LightMagenta = ColorCode("magenta+h")
|
||||
LightCyan = ColorCode("cyan+h")
|
||||
LightWhite = ColorCode("white+h")
|
||||
}
|
||||
|
||||
// ColorCode returns the ANSI color color code for style.
|
||||
func ColorCode(style string) string {
|
||||
return colorCode(style).String()
|
||||
}
|
||||
|
||||
// Gets the ANSI color code for a style.
|
||||
func colorCode(style string) *bytes.Buffer {
|
||||
buf := bytes.NewBufferString("")
|
||||
if plain || style == "" {
|
||||
return buf
|
||||
}
|
||||
if style == "reset" {
|
||||
buf.WriteString(Reset)
|
||||
return buf
|
||||
} else if style == "off" {
|
||||
return buf
|
||||
}
|
||||
|
||||
foregroundBackground := strings.Split(style, ":")
|
||||
foreground := strings.Split(foregroundBackground[0], "+")
|
||||
fgKey := foreground[0]
|
||||
fg := Colors[fgKey]
|
||||
fgStyle := ""
|
||||
if len(foreground) > 1 {
|
||||
fgStyle = foreground[1]
|
||||
}
|
||||
|
||||
bg, bgStyle := "", ""
|
||||
|
||||
if len(foregroundBackground) > 1 {
|
||||
background := strings.Split(foregroundBackground[1], "+")
|
||||
bg = background[0]
|
||||
if len(background) > 1 {
|
||||
bgStyle = background[1]
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(start)
|
||||
base := normalIntensityFG
|
||||
if len(fgStyle) > 0 {
|
||||
if strings.Contains(fgStyle, "b") {
|
||||
buf.WriteString(bold)
|
||||
}
|
||||
if strings.Contains(fgStyle, "B") {
|
||||
buf.WriteString(blink)
|
||||
}
|
||||
if strings.Contains(fgStyle, "u") {
|
||||
buf.WriteString(underline)
|
||||
}
|
||||
if strings.Contains(fgStyle, "i") {
|
||||
buf.WriteString(inverse)
|
||||
}
|
||||
if strings.Contains(fgStyle, "h") {
|
||||
base = highIntensityFG
|
||||
}
|
||||
}
|
||||
|
||||
// if 256-color
|
||||
n, err := strconv.Atoi(fgKey)
|
||||
if err == nil {
|
||||
fmt.Fprintf(buf, "38;5;%d;", n)
|
||||
} else {
|
||||
fmt.Fprintf(buf, "%d;", base+fg)
|
||||
}
|
||||
|
||||
base = normalIntensityBG
|
||||
if len(bg) > 0 {
|
||||
if strings.Contains(bgStyle, "h") {
|
||||
base = highIntensityBG
|
||||
}
|
||||
// if 256-color
|
||||
n, err := strconv.Atoi(bg)
|
||||
if err == nil {
|
||||
fmt.Fprintf(buf, "48;5;%d;", n)
|
||||
} else {
|
||||
fmt.Fprintf(buf, "%d;", base+Colors[bg])
|
||||
}
|
||||
}
|
||||
|
||||
// remove last ";"
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
buf.WriteRune('m')
|
||||
return buf
|
||||
}
|
||||
|
||||
// Color colors a string based on the ANSI color code for style.
|
||||
func Color(s, style string) string {
|
||||
if plain || len(style) < 1 {
|
||||
return s
|
||||
}
|
||||
buf := colorCode(style)
|
||||
buf.WriteString(s)
|
||||
buf.WriteString(Reset)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// ColorFunc creates a closureto avoid ANSI color code calculation.
|
||||
func ColorFunc(style string) func(string) string {
|
||||
if style == "" {
|
||||
return func(s string) string {
|
||||
return s
|
||||
}
|
||||
}
|
||||
color := ColorCode(style)
|
||||
return func(s string) string {
|
||||
if plain || s == "" {
|
||||
return s
|
||||
}
|
||||
buf := bytes.NewBufferString(color)
|
||||
buf.WriteString(s)
|
||||
buf.WriteString(Reset)
|
||||
result := buf.String()
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
// DisableColors disables ANSI color codes. On by default.
|
||||
func DisableColors(disable bool) {
|
||||
plain = disable
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
Package ansi is a small, fast library to create ANSI colored strings and codes.
|
||||
|
||||
Installation
|
||||
|
||||
# this installs the color viewer and the package
|
||||
go get -u github.com/mgutz/ansi/cmd/ansi-mgutz
|
||||
|
||||
Example
|
||||
|
||||
// colorize a string, SLOW
|
||||
msg := ansi.Color("foo", "red+b:white")
|
||||
|
||||
// create a closure to avoid recalculating ANSI code compilation
|
||||
phosphorize := ansi.ColorFunc("green+h:black")
|
||||
msg = phosphorize("Bring back the 80s!")
|
||||
msg2 := phospohorize("Look, I'm a CRT!")
|
||||
|
||||
// cache escape codes and build strings manually
|
||||
lime := ansi.ColorCode("green+h:black")
|
||||
reset := ansi.ColorCode("reset")
|
||||
|
||||
fmt.Println(lime, "Bring back the 80s!", reset)
|
||||
|
||||
Other examples
|
||||
|
||||
Color(s, "red") // red
|
||||
Color(s, "red+b") // red bold
|
||||
Color(s, "red+B") // red blinking
|
||||
Color(s, "red+u") // red underline
|
||||
Color(s, "red+bh") // red bold bright
|
||||
Color(s, "red:white") // red on white
|
||||
Color(s, "red+b:white+h") // red bold on white bright
|
||||
Color(s, "red+B:white+h") // red blink on white bright
|
||||
|
||||
To view color combinations, from terminal
|
||||
|
||||
ansi-mgutz
|
||||
|
||||
Style format
|
||||
|
||||
"foregroundColor+attributes:backgroundColor+attributes"
|
||||
|
||||
Colors
|
||||
|
||||
black
|
||||
red
|
||||
green
|
||||
yellow
|
||||
blue
|
||||
magenta
|
||||
cyan
|
||||
white
|
||||
|
||||
Attributes
|
||||
|
||||
b = bold foreground
|
||||
B = Blink foreground
|
||||
u = underline foreground
|
||||
h = high intensity (bright) foreground, background
|
||||
i = inverse
|
||||
|
||||
Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors)
|
||||
*/
|
||||
package ansi
|
|
@ -0,0 +1,42 @@
|
|||
package ansi
|
||||
|
||||
// PrintStyles prints all style combinations to the terminal.
|
||||
func PrintStyles() {
|
||||
oldPlain := plain
|
||||
plain = false
|
||||
|
||||
bgColors := []string{
|
||||
"",
|
||||
":black",
|
||||
":red",
|
||||
":green",
|
||||
":yellow",
|
||||
":blue",
|
||||
":magenta",
|
||||
":cyan",
|
||||
":white",
|
||||
}
|
||||
for fg := range Colors {
|
||||
for _, bg := range bgColors {
|
||||
println(padColor(fg, []string{"" + bg, "+b" + bg, "+bh" + bg, "+u" + bg}))
|
||||
println(padColor(fg, []string{"+uh" + bg, "+B" + bg, "+Bb" + bg /* backgrounds */, "" + bg + "+h"}))
|
||||
println(padColor(fg, []string{"+b" + bg + "+h", "+bh" + bg + "+h", "+u" + bg + "+h", "+uh" + bg + "+h"}))
|
||||
}
|
||||
}
|
||||
plain = oldPlain
|
||||
}
|
||||
|
||||
func pad(s string, length int) string {
|
||||
for len(s) < length {
|
||||
s += " "
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func padColor(s string, styles []string) string {
|
||||
buffer := ""
|
||||
for _, style := range styles {
|
||||
buffer += Color(pad(s+style, 20), s+style)
|
||||
}
|
||||
return buffer
|
||||
}
|
|
@ -0,0 +1,261 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mgutz/ansi"
|
||||
)
|
||||
|
||||
type sourceLine struct {
|
||||
lineno int
|
||||
line string
|
||||
}
|
||||
|
||||
type frameInfo struct {
|
||||
filename string
|
||||
lineno int
|
||||
method string
|
||||
context []*sourceLine
|
||||
contextLines int
|
||||
}
|
||||
|
||||
func (ci *frameInfo) readSource(contextLines int) error {
|
||||
if ci.lineno == 0 || disableCallstack {
|
||||
return nil
|
||||
}
|
||||
start := maxInt(1, ci.lineno-contextLines)
|
||||
end := ci.lineno + contextLines
|
||||
|
||||
f, err := os.Open(ci.filename)
|
||||
if err != nil {
|
||||
// if we can't read a file, it means user is running this in production
|
||||
disableCallstack = true
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
lineno := 1
|
||||
scanner := bufio.NewScanner(f)
|
||||
for scanner.Scan() {
|
||||
if start <= lineno && lineno <= end {
|
||||
line := scanner.Text()
|
||||
line = expandTabs(line, 4)
|
||||
ci.context = append(ci.context, &sourceLine{lineno: lineno, line: line})
|
||||
}
|
||||
lineno++
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
InternalLog.Warn("scanner error", "file", ci.filename, "err", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ci *frameInfo) String(color string, sourceColor string) string {
|
||||
buf := pool.Get()
|
||||
defer pool.Put(buf)
|
||||
|
||||
if disableCallstack {
|
||||
buf.WriteString(color)
|
||||
buf.WriteString(Separator)
|
||||
buf.WriteString(indent)
|
||||
buf.WriteString(ci.filename)
|
||||
buf.WriteRune(':')
|
||||
buf.WriteString(strconv.Itoa(ci.lineno))
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// skip anything in the logxi package
|
||||
if isLogxiCode(ci.filename) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// make path relative to current working directory or home
|
||||
tildeFilename, err := filepath.Rel(wd, ci.filename)
|
||||
if err != nil {
|
||||
InternalLog.Warn("Could not make path relative", "path", ci.filename)
|
||||
return ""
|
||||
}
|
||||
// ../../../ is too complex. Make path relative to home
|
||||
if strings.HasPrefix(tildeFilename, strings.Repeat(".."+string(os.PathSeparator), 3)) {
|
||||
tildeFilename = strings.Replace(tildeFilename, home, "~", 1)
|
||||
}
|
||||
|
||||
buf.WriteString(color)
|
||||
buf.WriteString(Separator)
|
||||
buf.WriteString(indent)
|
||||
buf.WriteString("in ")
|
||||
buf.WriteString(ci.method)
|
||||
buf.WriteString("(")
|
||||
buf.WriteString(tildeFilename)
|
||||
buf.WriteRune(':')
|
||||
buf.WriteString(strconv.Itoa(ci.lineno))
|
||||
buf.WriteString(")")
|
||||
|
||||
if ci.contextLines == -1 {
|
||||
return buf.String()
|
||||
}
|
||||
buf.WriteString("\n")
|
||||
|
||||
// the width of the printed line number
|
||||
var linenoWidth int
|
||||
// trim spaces at start of source code based on common spaces
|
||||
var skipSpaces = 1000
|
||||
|
||||
// calculate width of lineno and number of leading spaces that can be
|
||||
// removed
|
||||
for _, li := range ci.context {
|
||||
linenoWidth = maxInt(linenoWidth, len(fmt.Sprintf("%d", li.lineno)))
|
||||
index := indexOfNonSpace(li.line)
|
||||
if index > -1 && index < skipSpaces {
|
||||
skipSpaces = index
|
||||
}
|
||||
}
|
||||
|
||||
for _, li := range ci.context {
|
||||
var format string
|
||||
format = fmt.Sprintf("%%s%%%dd: %%s\n", linenoWidth)
|
||||
|
||||
if li.lineno == ci.lineno {
|
||||
buf.WriteString(color)
|
||||
if ci.contextLines > 2 {
|
||||
format = fmt.Sprintf("%%s=> %%%dd: %%s\n", linenoWidth)
|
||||
}
|
||||
} else {
|
||||
buf.WriteString(sourceColor)
|
||||
if ci.contextLines > 2 {
|
||||
// account for "=> "
|
||||
format = fmt.Sprintf("%%s%%%dd: %%s\n", linenoWidth+3)
|
||||
}
|
||||
}
|
||||
// trim spaces at start
|
||||
idx := minInt(len(li.line), skipSpaces)
|
||||
buf.WriteString(fmt.Sprintf(format, Separator+indent+indent, li.lineno, li.line[idx:]))
|
||||
}
|
||||
// get rid of last \n
|
||||
buf.Truncate(buf.Len() - 1)
|
||||
if !disableColors {
|
||||
buf.WriteString(ansi.Reset)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// parseDebugStack parases a stack created by debug.Stack()
|
||||
//
|
||||
// This is what the string looks like
|
||||
// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:45 (0x5fa70)
|
||||
// (*JSONFormatter).writeError: jf.writeString(buf, err.Error()+"\n"+string(debug.Stack()))
|
||||
// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:82 (0x5fdc3)
|
||||
// (*JSONFormatter).appendValue: jf.writeError(buf, err)
|
||||
// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:109 (0x605ca)
|
||||
// (*JSONFormatter).set: jf.appendValue(buf, val)
|
||||
// ...
|
||||
// /Users/mgutz/goroot/src/runtime/asm_amd64.s:2232 (0x38bf1)
|
||||
// goexit:
|
||||
func parseDebugStack(stack string, skip int, ignoreRuntime bool) []*frameInfo {
|
||||
frames := []*frameInfo{}
|
||||
// BUG temporarily disable since there is a bug with embedded newlines
|
||||
if true {
|
||||
return frames
|
||||
}
|
||||
|
||||
lines := strings.Split(stack, "\n")
|
||||
|
||||
for i := skip * 2; i < len(lines); i += 2 {
|
||||
ci := &frameInfo{}
|
||||
sourceLine := lines[i]
|
||||
if sourceLine == "" {
|
||||
break
|
||||
}
|
||||
if ignoreRuntime && strings.Contains(sourceLine, filepath.Join("src", "runtime")) {
|
||||
break
|
||||
}
|
||||
|
||||
colon := strings.Index(sourceLine, ":")
|
||||
slash := strings.Index(sourceLine, "/")
|
||||
if colon < slash {
|
||||
// must be on Windows where paths look like c:/foo/bar.go:lineno
|
||||
colon = strings.Index(sourceLine[slash:], ":") + slash
|
||||
}
|
||||
space := strings.Index(sourceLine, " ")
|
||||
ci.filename = sourceLine[0:colon]
|
||||
|
||||
// BUG with callstack where the error message has embedded newlines
|
||||
// if colon > space {
|
||||
// fmt.Println("lines", lines)
|
||||
// }
|
||||
// fmt.Println("SOURCELINE", sourceLine, "len", len(sourceLine), "COLON", colon, "SPACE", space)
|
||||
numstr := sourceLine[colon+1 : space]
|
||||
lineno, err := strconv.Atoi(numstr)
|
||||
if err != nil {
|
||||
InternalLog.Warn("Could not parse line number", "sourceLine", sourceLine, "numstr", numstr)
|
||||
continue
|
||||
}
|
||||
ci.lineno = lineno
|
||||
|
||||
methodLine := lines[i+1]
|
||||
colon = strings.Index(methodLine, ":")
|
||||
ci.method = strings.Trim(methodLine[0:colon], "\t ")
|
||||
frames = append(frames, ci)
|
||||
}
|
||||
return frames
|
||||
}
|
||||
|
||||
// parseDebugStack parases a stack created by debug.Stack()
|
||||
//
|
||||
// This is what the string looks like
|
||||
// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:45 (0x5fa70)
|
||||
// (*JSONFormatter).writeError: jf.writeString(buf, err.Error()+"\n"+string(debug.Stack()))
|
||||
// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:82 (0x5fdc3)
|
||||
// (*JSONFormatter).appendValue: jf.writeError(buf, err)
|
||||
// /Users/mgutz/go/src/github.com/mgutz/logxi/v1/jsonFormatter.go:109 (0x605ca)
|
||||
// (*JSONFormatter).set: jf.appendValue(buf, val)
|
||||
// ...
|
||||
// /Users/mgutz/goroot/src/runtime/asm_amd64.s:2232 (0x38bf1)
|
||||
// goexit:
|
||||
func trimDebugStack(stack string) string {
|
||||
buf := pool.Get()
|
||||
defer pool.Put(buf)
|
||||
lines := strings.Split(stack, "\n")
|
||||
for i := 0; i < len(lines); i += 2 {
|
||||
sourceLine := lines[i]
|
||||
if sourceLine == "" {
|
||||
break
|
||||
}
|
||||
|
||||
colon := strings.Index(sourceLine, ":")
|
||||
slash := strings.Index(sourceLine, "/")
|
||||
if colon < slash {
|
||||
// must be on Windows where paths look like c:/foo/bar.go:lineno
|
||||
colon = strings.Index(sourceLine[slash:], ":") + slash
|
||||
}
|
||||
filename := sourceLine[0:colon]
|
||||
// skip anything in the logxi package
|
||||
if isLogxiCode(filename) {
|
||||
continue
|
||||
}
|
||||
buf.WriteString(sourceLine)
|
||||
buf.WriteRune('\n')
|
||||
buf.WriteString(lines[i+1])
|
||||
buf.WriteRune('\n')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func parseLogxiStack(entry map[string]interface{}, skip int, ignoreRuntime bool) []*frameInfo {
|
||||
kv := entry[KeyMap.CallStack]
|
||||
if kv == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var frames []*frameInfo
|
||||
if stack, ok := kv.(string); ok {
|
||||
frames = parseDebugStack(stack, skip, ignoreRuntime)
|
||||
}
|
||||
return frames
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ConcurrentWriter is a concurrent safe wrapper around io.Writer
|
||||
type ConcurrentWriter struct {
|
||||
writer io.Writer
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// NewConcurrentWriter crates a new concurrent writer wrapper around existing writer.
|
||||
func NewConcurrentWriter(writer io.Writer) io.Writer {
|
||||
return &ConcurrentWriter{writer: writer}
|
||||
}
|
||||
|
||||
func (cw *ConcurrentWriter) Write(p []byte) (n int, err error) {
|
||||
cw.Lock()
|
||||
defer cw.Unlock()
|
||||
// This is basically the same logic as in go's log.Output() which
|
||||
// doesn't look at the returned number of bytes returned
|
||||
return cw.writer.Write(p)
|
||||
}
|
|
@ -0,0 +1,149 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// DefaultLogger is the default logger for this package.
|
||||
type DefaultLogger struct {
|
||||
writer io.Writer
|
||||
name string
|
||||
level int
|
||||
formatter Formatter
|
||||
}
|
||||
|
||||
// NewLogger creates a new default logger. If writer is not concurrent
|
||||
// safe, wrap it with NewConcurrentWriter.
|
||||
func NewLogger(writer io.Writer, name string) Logger {
|
||||
formatter, err := createFormatter(name, logxiFormat)
|
||||
if err != nil {
|
||||
panic("Could not create formatter")
|
||||
}
|
||||
return NewLogger3(writer, name, formatter)
|
||||
}
|
||||
|
||||
// NewLogger3 creates a new logger with a writer, name and formatter. If writer is not concurrent
|
||||
// safe, wrap it with NewConcurrentWriter.
|
||||
func NewLogger3(writer io.Writer, name string, formatter Formatter) Logger {
|
||||
var level int
|
||||
if name != "__logxi" {
|
||||
// if err is returned, then it means the log is disabled
|
||||
level = getLogLevel(name)
|
||||
if level == LevelOff {
|
||||
return NullLog
|
||||
}
|
||||
}
|
||||
|
||||
log := &DefaultLogger{
|
||||
formatter: formatter,
|
||||
writer: writer,
|
||||
name: name,
|
||||
level: level,
|
||||
}
|
||||
|
||||
// TODO loggers will be used when watching changes to configuration such
|
||||
// as in consul, etcd
|
||||
loggers.Lock()
|
||||
loggers.loggers[name] = log
|
||||
loggers.Unlock()
|
||||
return log
|
||||
}
|
||||
|
||||
// New creates a colorable default logger.
|
||||
func New(name string) Logger {
|
||||
return NewLogger(colorableStdout, name)
|
||||
}
|
||||
|
||||
// Trace logs a debug entry.
|
||||
func (l *DefaultLogger) Trace(msg string, args ...interface{}) {
|
||||
l.Log(LevelTrace, msg, args)
|
||||
}
|
||||
|
||||
// Debug logs a debug entry.
|
||||
func (l *DefaultLogger) Debug(msg string, args ...interface{}) {
|
||||
l.Log(LevelDebug, msg, args)
|
||||
}
|
||||
|
||||
// Info logs an info entry.
|
||||
func (l *DefaultLogger) Info(msg string, args ...interface{}) {
|
||||
l.Log(LevelInfo, msg, args)
|
||||
}
|
||||
|
||||
// Warn logs a warn entry.
|
||||
func (l *DefaultLogger) Warn(msg string, args ...interface{}) error {
|
||||
if l.IsWarn() {
|
||||
defer l.Log(LevelWarn, msg, args)
|
||||
|
||||
for _, arg := range args {
|
||||
if err, ok := arg.(error); ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *DefaultLogger) extractLogError(level int, msg string, args []interface{}) error {
|
||||
defer l.Log(level, msg, args)
|
||||
|
||||
for _, arg := range args {
|
||||
if err, ok := arg.(error); ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(msg)
|
||||
}
|
||||
|
||||
// Error logs an error entry.
|
||||
func (l *DefaultLogger) Error(msg string, args ...interface{}) error {
|
||||
return l.extractLogError(LevelError, msg, args)
|
||||
}
|
||||
|
||||
// Fatal logs a fatal entry then panics.
|
||||
func (l *DefaultLogger) Fatal(msg string, args ...interface{}) {
|
||||
l.extractLogError(LevelFatal, msg, args)
|
||||
defer panic("Exit due to fatal error: ")
|
||||
}
|
||||
|
||||
// Log logs a leveled entry.
|
||||
func (l *DefaultLogger) Log(level int, msg string, args []interface{}) {
|
||||
// log if the log level (warn=4) >= level of message (err=3)
|
||||
if l.level < level || silent {
|
||||
return
|
||||
}
|
||||
l.formatter.Format(l.writer, level, msg, args)
|
||||
}
|
||||
|
||||
// IsTrace determines if this logger logs a debug statement.
|
||||
func (l *DefaultLogger) IsTrace() bool {
|
||||
// DEBUG(7) >= TRACE(10)
|
||||
return l.level >= LevelTrace
|
||||
}
|
||||
|
||||
// IsDebug determines if this logger logs a debug statement.
|
||||
func (l *DefaultLogger) IsDebug() bool {
|
||||
return l.level >= LevelDebug
|
||||
}
|
||||
|
||||
// IsInfo determines if this logger logs an info statement.
|
||||
func (l *DefaultLogger) IsInfo() bool {
|
||||
return l.level >= LevelInfo
|
||||
}
|
||||
|
||||
// IsWarn determines if this logger logs a warning statement.
|
||||
func (l *DefaultLogger) IsWarn() bool {
|
||||
return l.level >= LevelWarn
|
||||
}
|
||||
|
||||
// SetLevel sets the level of this logger.
|
||||
func (l *DefaultLogger) SetLevel(level int) {
|
||||
l.level = level
|
||||
}
|
||||
|
||||
// SetFormatter set the formatter for this logger.
|
||||
func (l *DefaultLogger) SetFormatter(formatter Formatter) {
|
||||
l.formatter = formatter
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var contextLines int
|
||||
|
||||
// Configuration comes from environment or external services like
|
||||
// consul, etcd.
|
||||
type Configuration struct {
|
||||
Format string `json:"format"`
|
||||
Colors string `json:"colors"`
|
||||
Levels string `json:"levels"`
|
||||
}
|
||||
|
||||
func readFromEnviron() *Configuration {
|
||||
conf := &Configuration{}
|
||||
|
||||
var envOrDefault = func(name, val string) string {
|
||||
result := os.Getenv(name)
|
||||
if result == "" {
|
||||
result = val
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
conf.Levels = envOrDefault("LOGXI", defaultLogxiEnv)
|
||||
conf.Format = envOrDefault("LOGXI_FORMAT", defaultLogxiFormatEnv)
|
||||
conf.Colors = envOrDefault("LOGXI_COLORS", defaultLogxiColorsEnv)
|
||||
return conf
|
||||
}
|
||||
|
||||
// ProcessEnv (re)processes environment.
|
||||
func ProcessEnv(env *Configuration) {
|
||||
// TODO: allow reading from etcd
|
||||
|
||||
ProcessLogxiEnv(env.Levels)
|
||||
ProcessLogxiColorsEnv(env.Colors)
|
||||
ProcessLogxiFormatEnv(env.Format)
|
||||
}
|
||||
|
||||
// ProcessLogxiFormatEnv parses LOGXI_FORMAT
|
||||
func ProcessLogxiFormatEnv(env string) {
|
||||
logxiFormat = env
|
||||
m := parseKVList(logxiFormat, ",")
|
||||
formatterFormat := ""
|
||||
tFormat := ""
|
||||
for key, value := range m {
|
||||
switch key {
|
||||
default:
|
||||
formatterFormat = key
|
||||
case "t":
|
||||
tFormat = value
|
||||
case "pretty":
|
||||
isPretty = value != "false" && value != "0"
|
||||
case "maxcol":
|
||||
col, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
maxCol = col
|
||||
} else {
|
||||
maxCol = defaultMaxCol
|
||||
}
|
||||
case "context":
|
||||
lines, err := strconv.Atoi(value)
|
||||
if err == nil {
|
||||
contextLines = lines
|
||||
} else {
|
||||
contextLines = defaultContextLines
|
||||
}
|
||||
case "LTSV":
|
||||
formatterFormat = "text"
|
||||
AssignmentChar = ltsvAssignmentChar
|
||||
Separator = ltsvSeparator
|
||||
}
|
||||
}
|
||||
if formatterFormat == "" || formatterCreators[formatterFormat] == nil {
|
||||
formatterFormat = defaultFormat
|
||||
}
|
||||
logxiFormat = formatterFormat
|
||||
if tFormat == "" {
|
||||
tFormat = defaultTimeFormat
|
||||
}
|
||||
timeFormat = tFormat
|
||||
}
|
||||
|
||||
// ProcessLogxiEnv parses LOGXI variable
|
||||
func ProcessLogxiEnv(env string) {
|
||||
logxiEnable := env
|
||||
if logxiEnable == "" {
|
||||
logxiEnable = defaultLogxiEnv
|
||||
}
|
||||
|
||||
logxiNameLevelMap = map[string]int{}
|
||||
m := parseKVList(logxiEnable, ",")
|
||||
if m == nil {
|
||||
logxiNameLevelMap["*"] = defaultLevel
|
||||
}
|
||||
for key, value := range m {
|
||||
if strings.HasPrefix(key, "-") {
|
||||
// LOGXI=*,-foo => disable foo
|
||||
logxiNameLevelMap[key[1:]] = LevelOff
|
||||
} else if value == "" {
|
||||
// LOGXI=* => default to all
|
||||
logxiNameLevelMap[key] = LevelAll
|
||||
} else {
|
||||
// LOGXI=*=ERR => use user-specified level
|
||||
level := LevelAtoi[value]
|
||||
if level == 0 {
|
||||
InternalLog.Error("Unknown level in LOGXI environment variable", "key", key, "value", value, "LOGXI", env)
|
||||
level = defaultLevel
|
||||
}
|
||||
logxiNameLevelMap[key] = level
|
||||
}
|
||||
}
|
||||
|
||||
// must always have global default, otherwise errs may get eaten up
|
||||
if _, ok := logxiNameLevelMap["*"]; !ok {
|
||||
logxiNameLevelMap["*"] = LevelError
|
||||
}
|
||||
}
|
||||
|
||||
func getLogLevel(name string) int {
|
||||
var wildcardLevel int
|
||||
var result int
|
||||
|
||||
for k, v := range logxiNameLevelMap {
|
||||
if k == name {
|
||||
result = v
|
||||
} else if k == "*" {
|
||||
wildcardLevel = v
|
||||
} else if strings.HasPrefix(k, "*") && strings.HasSuffix(name, k[1:]) {
|
||||
result = v
|
||||
} else if strings.HasSuffix(k, "*") && strings.HasPrefix(name, k[:len(k)-1]) {
|
||||
result = v
|
||||
}
|
||||
}
|
||||
|
||||
if result == LevelOff {
|
||||
return LevelOff
|
||||
}
|
||||
|
||||
if result > 0 {
|
||||
return result
|
||||
}
|
||||
|
||||
if wildcardLevel > 0 {
|
||||
return wildcardLevel
|
||||
}
|
||||
|
||||
return LevelOff
|
||||
}
|
||||
|
||||
// ProcessLogxiColorsEnv parases LOGXI_COLORS
|
||||
func ProcessLogxiColorsEnv(env string) {
|
||||
colors := env
|
||||
if colors == "" {
|
||||
colors = defaultLogxiColorsEnv
|
||||
} else if colors == "*=off" {
|
||||
// disable all colors
|
||||
disableColors = true
|
||||
}
|
||||
theme = parseTheme(colors)
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package log
|
||||
|
||||
var formatterCreators = map[string]CreateFormatterFunc{}
|
||||
|
||||
// CreateFormatterFunc is a function which creates a new instance
|
||||
// of a Formatter.
|
||||
type CreateFormatterFunc func(name, kind string) (Formatter, error)
|
||||
|
||||
// createFormatter creates formatters. It accepts a kind in {"text", "JSON"}
|
||||
// which correspond to TextFormatter and JSONFormatter, and the name of the
|
||||
// logger.
|
||||
func createFormatter(name string, kind string) (Formatter, error) {
|
||||
if kind == FormatEnv {
|
||||
kind = logxiFormat
|
||||
}
|
||||
if kind == "" {
|
||||
kind = FormatText
|
||||
}
|
||||
|
||||
fn := formatterCreators[kind]
|
||||
if fn == nil {
|
||||
fn = formatterCreators[FormatText]
|
||||
}
|
||||
|
||||
formatter, err := fn(name, kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// custom formatter may have not returned a formatter
|
||||
if formatter == nil {
|
||||
formatter, err = formatFactory(name, FormatText)
|
||||
}
|
||||
return formatter, err
|
||||
}
|
||||
|
||||
func formatFactory(name string, kind string) (Formatter, error) {
|
||||
var formatter Formatter
|
||||
var err error
|
||||
switch kind {
|
||||
default:
|
||||
formatter = NewTextFormatter(name)
|
||||
case FormatHappy:
|
||||
formatter = NewHappyDevFormatter(name)
|
||||
case FormatText:
|
||||
formatter = NewTextFormatter(name)
|
||||
case FormatJSON:
|
||||
formatter = NewJSONFormatter(name)
|
||||
}
|
||||
return formatter, err
|
||||
}
|
||||
|
||||
// RegisterFormatFactory registers a format factory function.
|
||||
func RegisterFormatFactory(kind string, fn CreateFormatterFunc) {
|
||||
if kind == "" {
|
||||
panic("kind is empty string")
|
||||
}
|
||||
if fn == nil {
|
||||
panic("creator is nil")
|
||||
}
|
||||
formatterCreators[kind] = fn
|
||||
}
|
|
@ -0,0 +1,373 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
||||
"github.com/mgutz/ansi"
|
||||
)
|
||||
|
||||
// colorScheme defines a color theme for HappyDevFormatter
|
||||
type colorScheme struct {
|
||||
Key string
|
||||
Message string
|
||||
Value string
|
||||
Misc string
|
||||
Source string
|
||||
|
||||
Trace string
|
||||
Debug string
|
||||
Info string
|
||||
Warn string
|
||||
Error string
|
||||
}
|
||||
|
||||
var indent = " "
|
||||
var maxCol = defaultMaxCol
|
||||
var theme *colorScheme
|
||||
|
||||
func parseKVList(s, separator string) map[string]string {
|
||||
pairs := strings.Split(s, separator)
|
||||
if len(pairs) == 0 {
|
||||
return nil
|
||||
}
|
||||
m := map[string]string{}
|
||||
for _, pair := range pairs {
|
||||
if pair == "" {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(pair, "=")
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
m[parts[0]] = ""
|
||||
case 2:
|
||||
m[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func parseTheme(theme string) *colorScheme {
|
||||
m := parseKVList(theme, ",")
|
||||
cs := &colorScheme{}
|
||||
var wildcard string
|
||||
|
||||
var color = func(key string) string {
|
||||
if disableColors {
|
||||
return ""
|
||||
}
|
||||
style := m[key]
|
||||
c := ansi.ColorCode(style)
|
||||
if c == "" {
|
||||
c = wildcard
|
||||
}
|
||||
//fmt.Printf("plain=%b [%s] %s=%q\n", ansi.DefaultFG, key, style, c)
|
||||
|
||||
return c
|
||||
}
|
||||
wildcard = color("*")
|
||||
|
||||
if wildcard != ansi.Reset {
|
||||
cs.Key = wildcard
|
||||
cs.Value = wildcard
|
||||
cs.Misc = wildcard
|
||||
cs.Source = wildcard
|
||||
cs.Message = wildcard
|
||||
|
||||
cs.Trace = wildcard
|
||||
cs.Debug = wildcard
|
||||
cs.Warn = wildcard
|
||||
cs.Info = wildcard
|
||||
cs.Error = wildcard
|
||||
}
|
||||
|
||||
cs.Key = color("key")
|
||||
cs.Value = color("value")
|
||||
cs.Misc = color("misc")
|
||||
cs.Source = color("source")
|
||||
cs.Message = color("message")
|
||||
|
||||
cs.Trace = color("TRC")
|
||||
cs.Debug = color("DBG")
|
||||
cs.Warn = color("WRN")
|
||||
cs.Info = color("INF")
|
||||
cs.Error = color("ERR")
|
||||
return cs
|
||||
}
|
||||
|
||||
// HappyDevFormatter is the formatter used for terminals. It is
|
||||
// colorful, dev friendly and provides meaningful logs when
|
||||
// warnings and errors occur.
|
||||
//
|
||||
// HappyDevFormatter does not worry about performance. It's at least 3-4X
|
||||
// slower than JSONFormatter since it delegates to JSONFormatter to marshal
|
||||
// then unmarshal JSON. Then it does other stuff like read source files, sort
|
||||
// keys all to give a developer more information.
|
||||
//
|
||||
// SHOULD NOT be used in production for extended period of time. However, it
|
||||
// works fine in SSH terminals and binary deployments.
|
||||
type HappyDevFormatter struct {
|
||||
name string
|
||||
col int
|
||||
// always use the production formatter
|
||||
jsonFormatter *JSONFormatter
|
||||
}
|
||||
|
||||
// NewHappyDevFormatter returns a new instance of HappyDevFormatter.
|
||||
func NewHappyDevFormatter(name string) *HappyDevFormatter {
|
||||
jf := NewJSONFormatter(name)
|
||||
return &HappyDevFormatter{
|
||||
name: name,
|
||||
jsonFormatter: jf,
|
||||
}
|
||||
}
|
||||
|
||||
func (hd *HappyDevFormatter) writeKey(buf bufferWriter, key string) {
|
||||
// assumes this is not the first key
|
||||
hd.writeString(buf, Separator)
|
||||
if key == "" {
|
||||
return
|
||||
}
|
||||
buf.WriteString(theme.Key)
|
||||
hd.writeString(buf, key)
|
||||
hd.writeString(buf, AssignmentChar)
|
||||
if !disableColors {
|
||||
buf.WriteString(ansi.Reset)
|
||||
}
|
||||
}
|
||||
|
||||
func (hd *HappyDevFormatter) set(buf bufferWriter, key string, value interface{}, color string) {
|
||||
var str string
|
||||
if s, ok := value.(string); ok {
|
||||
str = s
|
||||
} else if s, ok := value.(fmt.Stringer); ok {
|
||||
str = s.String()
|
||||
} else {
|
||||
str = fmt.Sprintf("%v", value)
|
||||
}
|
||||
val := strings.Trim(str, "\n ")
|
||||
if (isPretty && key != "") || hd.col+len(key)+2+len(val) >= maxCol {
|
||||
buf.WriteString("\n")
|
||||
hd.col = 0
|
||||
hd.writeString(buf, indent)
|
||||
}
|
||||
hd.writeKey(buf, key)
|
||||
if color != "" {
|
||||
buf.WriteString(color)
|
||||
}
|
||||
hd.writeString(buf, val)
|
||||
if color != "" && !disableColors {
|
||||
buf.WriteString(ansi.Reset)
|
||||
}
|
||||
}
|
||||
|
||||
// Write a string and tracks the position of the string so we can break lines
|
||||
// cleanly. Do not send ANSI escape sequences, just raw strings
|
||||
func (hd *HappyDevFormatter) writeString(buf bufferWriter, s string) {
|
||||
buf.WriteString(s)
|
||||
hd.col += len(s)
|
||||
}
|
||||
|
||||
func (hd *HappyDevFormatter) getContext(color string) string {
|
||||
if disableCallstack {
|
||||
return ""
|
||||
}
|
||||
frames := parseDebugStack(string(debug.Stack()), 5, true)
|
||||
if len(frames) == 0 {
|
||||
return ""
|
||||
}
|
||||
for _, frame := range frames {
|
||||
context := frame.String(color, theme.Source)
|
||||
if context != "" {
|
||||
return context
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (hd *HappyDevFormatter) getLevelContext(level int, entry map[string]interface{}) (message string, context string, color string) {
|
||||
|
||||
switch level {
|
||||
case LevelTrace:
|
||||
color = theme.Trace
|
||||
context = hd.getContext(color)
|
||||
context += "\n"
|
||||
case LevelDebug:
|
||||
color = theme.Debug
|
||||
case LevelInfo:
|
||||
color = theme.Info
|
||||
// case LevelWarn:
|
||||
// color = theme.Warn
|
||||
// context = hd.getContext(color)
|
||||
// context += "\n"
|
||||
case LevelWarn, LevelError, LevelFatal:
|
||||
|
||||
// warnings return an error but if it does not have an error
|
||||
// then print line info only
|
||||
if level == LevelWarn {
|
||||
color = theme.Warn
|
||||
kv := entry[KeyMap.CallStack]
|
||||
if kv == nil {
|
||||
context = hd.getContext(color)
|
||||
context += "\n"
|
||||
break
|
||||
}
|
||||
} else {
|
||||
color = theme.Error
|
||||
}
|
||||
|
||||
if disableCallstack || contextLines == -1 {
|
||||
context = trimDebugStack(string(debug.Stack()))
|
||||
break
|
||||
}
|
||||
frames := parseLogxiStack(entry, 4, true)
|
||||
if frames == nil {
|
||||
frames = parseDebugStack(string(debug.Stack()), 4, true)
|
||||
}
|
||||
|
||||
if len(frames) == 0 {
|
||||
break
|
||||
}
|
||||
errbuf := pool.Get()
|
||||
defer pool.Put(errbuf)
|
||||
lines := 0
|
||||
for _, frame := range frames {
|
||||
err := frame.readSource(contextLines)
|
||||
if err != nil {
|
||||
// by setting to empty, the original stack is used
|
||||
errbuf.Reset()
|
||||
break
|
||||
}
|
||||
ctx := frame.String(color, theme.Source)
|
||||
if ctx == "" {
|
||||
continue
|
||||
}
|
||||
errbuf.WriteString(ctx)
|
||||
errbuf.WriteRune('\n')
|
||||
lines++
|
||||
}
|
||||
context = errbuf.String()
|
||||
default:
|
||||
panic("should never get here")
|
||||
}
|
||||
return message, context, color
|
||||
}
|
||||
|
||||
// Format a log entry.
|
||||
func (hd *HappyDevFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) {
|
||||
buf := pool.Get()
|
||||
defer pool.Put(buf)
|
||||
|
||||
if len(args) == 1 {
|
||||
args = append(args, 0)
|
||||
copy(args[1:], args[0:])
|
||||
args[0] = singleArgKey
|
||||
}
|
||||
|
||||
// warn about reserved, bad and complex keys
|
||||
for i := 0; i < len(args); i += 2 {
|
||||
isReserved, err := isReservedKey(args[i])
|
||||
if err != nil {
|
||||
InternalLog.Error("Key is not a string.", "err", fmt.Errorf("args[%d]=%v", i, args[i]))
|
||||
} else if isReserved {
|
||||
InternalLog.Fatal("Key conflicts with reserved key. Avoiding using single rune keys.", "key", args[i].(string))
|
||||
} else {
|
||||
// Ensure keys are simple strings. The JSONFormatter doesn't escape
|
||||
// keys as a performance tradeoff. This panics if the JSON key
|
||||
// value has a different value than a simple quoted string.
|
||||
key := args[i].(string)
|
||||
b, err := json.Marshal(key)
|
||||
if err != nil {
|
||||
panic("Key is invalid. " + err.Error())
|
||||
}
|
||||
if string(b) != `"`+key+`"` {
|
||||
panic("Key is complex. Use simpler key for: " + fmt.Sprintf("%q", key))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use the production JSON formatter to format the log first. This
|
||||
// ensures JSON will marshal/unmarshal correctly in production.
|
||||
entry := hd.jsonFormatter.LogEntry(level, msg, args)
|
||||
|
||||
// reset the column tracker used for fancy formatting
|
||||
hd.col = 0
|
||||
|
||||
// timestamp
|
||||
buf.WriteString(theme.Misc)
|
||||
hd.writeString(buf, entry[KeyMap.Time].(string))
|
||||
if !disableColors {
|
||||
buf.WriteString(ansi.Reset)
|
||||
}
|
||||
|
||||
// emphasize warnings and errors
|
||||
message, context, color := hd.getLevelContext(level, entry)
|
||||
if message == "" {
|
||||
message = entry[KeyMap.Message].(string)
|
||||
}
|
||||
|
||||
// DBG, INF ...
|
||||
hd.set(buf, "", entry[KeyMap.Level].(string), color)
|
||||
// logger name
|
||||
hd.set(buf, "", entry[KeyMap.Name], theme.Misc)
|
||||
// message from user
|
||||
hd.set(buf, "", message, theme.Message)
|
||||
|
||||
// Preserve key order in the sequencethey were added by developer.This
|
||||
// makes it easier for developers to follow the log.
|
||||
order := []string{}
|
||||
lenArgs := len(args)
|
||||
for i := 0; i < len(args); i += 2 {
|
||||
if i+1 >= lenArgs {
|
||||
continue
|
||||
}
|
||||
if key, ok := args[i].(string); ok {
|
||||
order = append(order, key)
|
||||
} else {
|
||||
order = append(order, badKeyAtIndex(i))
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range order {
|
||||
// skip reserved keys which were already added to buffer above
|
||||
isReserved, err := isReservedKey(key)
|
||||
if err != nil {
|
||||
panic("key is invalid. Should never get here. " + err.Error())
|
||||
} else if isReserved {
|
||||
continue
|
||||
}
|
||||
hd.set(buf, key, entry[key], theme.Value)
|
||||
}
|
||||
|
||||
addLF := true
|
||||
hasCallStack := entry[KeyMap.CallStack] != nil
|
||||
// WRN,ERR file, line number context
|
||||
|
||||
if context != "" {
|
||||
// warnings and traces are single line, space can be optimized
|
||||
if level == LevelTrace || (level == LevelWarn && !hasCallStack) {
|
||||
// gets rid of "in "
|
||||
idx := strings.IndexRune(context, 'n')
|
||||
hd.set(buf, "in", context[idx+2:], color)
|
||||
} else {
|
||||
buf.WriteRune('\n')
|
||||
if !disableColors {
|
||||
buf.WriteString(color)
|
||||
}
|
||||
addLF = context[len(context)-1:len(context)] != "\n"
|
||||
buf.WriteString(context)
|
||||
if !disableColors {
|
||||
buf.WriteString(ansi.Reset)
|
||||
}
|
||||
}
|
||||
} else if hasCallStack {
|
||||
hd.set(buf, "", entry[KeyMap.CallStack], color)
|
||||
}
|
||||
if addLF {
|
||||
buf.WriteRune('\n')
|
||||
}
|
||||
buf.WriteTo(writer)
|
||||
}
|
|
@ -0,0 +1,200 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// scream so user fixes it
|
||||
const warnImbalancedKey = "FIX_IMBALANCED_PAIRS"
|
||||
const warnImbalancedPairs = warnImbalancedKey + " => "
|
||||
const singleArgKey = "_"
|
||||
|
||||
func badKeyAtIndex(i int) string {
|
||||
return "BAD_KEY_AT_INDEX_" + strconv.Itoa(i)
|
||||
}
|
||||
|
||||
// DefaultLogLog is the default log for this package.
|
||||
var DefaultLog Logger
|
||||
|
||||
// Suppress supresses logging and is useful to supress output in
|
||||
// in unit tests.
|
||||
//
|
||||
// Example
|
||||
// log.Suppress(true)
|
||||
// defer log.suppress(false)
|
||||
func Suppress(quiet bool) {
|
||||
silent = quiet
|
||||
}
|
||||
|
||||
var silent bool
|
||||
|
||||
// internalLog is the logger used by logxi itself
|
||||
var InternalLog Logger
|
||||
|
||||
type loggerMap struct {
|
||||
sync.Mutex
|
||||
loggers map[string]Logger
|
||||
}
|
||||
|
||||
var loggers = &loggerMap{
|
||||
loggers: map[string]Logger{},
|
||||
}
|
||||
|
||||
func (lm *loggerMap) set(name string, logger Logger) {
|
||||
lm.loggers[name] = logger
|
||||
}
|
||||
|
||||
// The assignment character between key-value pairs
|
||||
var AssignmentChar = ": "
|
||||
|
||||
// Separator is the separator to use between key value pairs
|
||||
//var Separator = "{~}"
|
||||
var Separator = " "
|
||||
|
||||
const ltsvAssignmentChar = ":"
|
||||
const ltsvSeparator = "\t"
|
||||
|
||||
// logxiEnabledMap maps log name patterns to levels
|
||||
var logxiNameLevelMap map[string]int
|
||||
|
||||
// logxiFormat is the formatter kind to create
|
||||
var logxiFormat string
|
||||
|
||||
var colorableStdout io.Writer
|
||||
var defaultContextLines = 2
|
||||
var defaultFormat string
|
||||
var defaultLevel int
|
||||
var defaultLogxiEnv string
|
||||
var defaultLogxiFormatEnv string
|
||||
var defaultMaxCol = 80
|
||||
var defaultPretty = false
|
||||
var defaultLogxiColorsEnv string
|
||||
var defaultTimeFormat string
|
||||
var disableCallstack bool
|
||||
var disableCheckKeys bool
|
||||
var disableColors bool
|
||||
var home string
|
||||
var isPretty bool
|
||||
var isTerminal bool
|
||||
var isWindows = runtime.GOOS == "windows"
|
||||
var pkgMutex sync.Mutex
|
||||
var pool = NewBufferPool()
|
||||
var timeFormat string
|
||||
var wd string
|
||||
var pid = os.Getpid()
|
||||
var pidStr = strconv.Itoa(os.Getpid())
|
||||
|
||||
// KeyMapping is the key map used to print built-in log entry fields.
|
||||
type KeyMapping struct {
|
||||
Level string
|
||||
Message string
|
||||
Name string
|
||||
PID string
|
||||
Time string
|
||||
CallStack string
|
||||
}
|
||||
|
||||
// KeyMap is the key map to use when printing log statements.
|
||||
var KeyMap = &KeyMapping{
|
||||
Level: "_l",
|
||||
Message: "_m",
|
||||
Name: "_n",
|
||||
PID: "_p",
|
||||
Time: "_t",
|
||||
CallStack: "_c",
|
||||
}
|
||||
|
||||
var logxiKeys []string
|
||||
|
||||
func setDefaults(isTerminal bool) {
|
||||
var err error
|
||||
contextLines = defaultContextLines
|
||||
wd, err = os.Getwd()
|
||||
if err != nil {
|
||||
InternalLog.Error("Could not get working directory")
|
||||
}
|
||||
|
||||
logxiKeys = []string{KeyMap.Level, KeyMap.Message, KeyMap.Name, KeyMap.Time, KeyMap.CallStack, KeyMap.PID}
|
||||
|
||||
if isTerminal {
|
||||
defaultLogxiEnv = "*=WRN"
|
||||
defaultLogxiFormatEnv = "happy,fit,maxcol=80,t=15:04:05.000000,context=-1"
|
||||
defaultFormat = FormatHappy
|
||||
defaultLevel = LevelWarn
|
||||
defaultTimeFormat = "15:04:05.000000"
|
||||
} else {
|
||||
defaultLogxiEnv = "*=ERR"
|
||||
defaultLogxiFormatEnv = "JSON,t=2006-01-02T15:04:05-0700"
|
||||
defaultFormat = FormatJSON
|
||||
defaultLevel = LevelError
|
||||
defaultTimeFormat = "2006-01-02T15:04:05-0700"
|
||||
disableColors = true
|
||||
}
|
||||
|
||||
if isWindows {
|
||||
home = os.Getenv("HOMEPATH")
|
||||
if os.Getenv("ConEmuANSI") == "ON" {
|
||||
defaultLogxiColorsEnv = "key=cyan+h,value,misc=blue+h,source=yellow,TRC,DBG,WRN=yellow+h,INF=green+h,ERR=red+h"
|
||||
} else {
|
||||
colorableStdout = NewConcurrentWriter(colorable.NewColorableStdout())
|
||||
defaultLogxiColorsEnv = "ERR=red,misc=cyan,key=cyan"
|
||||
}
|
||||
// DefaultScheme is a color scheme optimized for dark background
|
||||
// but works well with light backgrounds
|
||||
} else {
|
||||
home = os.Getenv("HOME")
|
||||
term := os.Getenv("TERM")
|
||||
if term == "xterm-256color" {
|
||||
defaultLogxiColorsEnv = "key=cyan+h,value,misc=blue,source=88,TRC,DBG,WRN=yellow,INF=green+h,ERR=red+h,message=magenta+h"
|
||||
} else {
|
||||
defaultLogxiColorsEnv = "key=cyan+h,value,misc=blue,source=magenta,TRC,DBG,WRN=yellow,INF=green,ERR=red+h"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isReservedKey(k interface{}) (bool, error) {
|
||||
key, ok := k.(string)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Key is not a string")
|
||||
}
|
||||
|
||||
// check if reserved
|
||||
for _, key2 := range logxiKeys {
|
||||
if key == key2 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
colorableStdout = NewConcurrentWriter(os.Stdout)
|
||||
|
||||
isTerminal = isatty.IsTerminal(os.Stdout.Fd())
|
||||
|
||||
// the internal logger to report errors
|
||||
if isTerminal {
|
||||
InternalLog = NewLogger3(NewConcurrentWriter(os.Stdout), "__logxi", NewTextFormatter("__logxi"))
|
||||
} else {
|
||||
InternalLog = NewLogger3(NewConcurrentWriter(os.Stdout), "__logxi", NewJSONFormatter("__logxi"))
|
||||
}
|
||||
InternalLog.SetLevel(LevelError)
|
||||
|
||||
setDefaults(isTerminal)
|
||||
|
||||
RegisterFormatFactory(FormatHappy, formatFactory)
|
||||
RegisterFormatFactory(FormatText, formatFactory)
|
||||
RegisterFormatFactory(FormatJSON, formatFactory)
|
||||
ProcessEnv(readFromEnviron())
|
||||
|
||||
// package logger for users
|
||||
DefaultLog = New("~")
|
||||
}
|
|
@ -0,0 +1,205 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type bufferWriter interface {
|
||||
Write(p []byte) (nn int, err error)
|
||||
WriteRune(r rune) (n int, err error)
|
||||
WriteString(s string) (n int, err error)
|
||||
}
|
||||
|
||||
// JSONFormatter is a fast, efficient JSON formatter optimized for logging.
|
||||
//
|
||||
// * log entry keys are not escaped
|
||||
// Who uses complex keys when coding? Checked by HappyDevFormatter in case user does.
|
||||
// Nested object keys are escaped by json.Marshal().
|
||||
// * Primitive types uses strconv
|
||||
// * Logger reserved key values (time, log name, level) require no conversion
|
||||
// * sync.Pool buffer for bytes.Buffer
|
||||
type JSONFormatter struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// NewJSONFormatter creates a new instance of JSONFormatter.
|
||||
func NewJSONFormatter(name string) *JSONFormatter {
|
||||
return &JSONFormatter{name: name}
|
||||
}
|
||||
|
||||
func (jf *JSONFormatter) writeString(buf bufferWriter, s string) {
|
||||
b, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
InternalLog.Error("Could not json.Marshal string.", "str", s)
|
||||
buf.WriteString(`"Could not marshal this key's string"`)
|
||||
return
|
||||
}
|
||||
buf.Write(b)
|
||||
}
|
||||
|
||||
func (jf *JSONFormatter) writeError(buf bufferWriter, err error) {
|
||||
jf.writeString(buf, err.Error())
|
||||
jf.set(buf, KeyMap.CallStack, string(debug.Stack()))
|
||||
return
|
||||
}
|
||||
|
||||
func (jf *JSONFormatter) appendValue(buf bufferWriter, val interface{}) {
|
||||
if val == nil {
|
||||
buf.WriteString("null")
|
||||
return
|
||||
}
|
||||
|
||||
// always show error stack even at cost of some performance. there's
|
||||
// nothing worse than looking at production logs without a clue
|
||||
if err, ok := val.(error); ok {
|
||||
jf.writeError(buf, err)
|
||||
return
|
||||
}
|
||||
|
||||
value := reflect.ValueOf(val)
|
||||
kind := value.Kind()
|
||||
if kind == reflect.Ptr {
|
||||
if value.IsNil() {
|
||||
buf.WriteString("null")
|
||||
return
|
||||
}
|
||||
value = value.Elem()
|
||||
kind = value.Kind()
|
||||
}
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
if value.Bool() {
|
||||
buf.WriteString("true")
|
||||
} else {
|
||||
buf.WriteString("false")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
buf.WriteString(strconv.FormatInt(value.Int(), 10))
|
||||
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
buf.WriteString(strconv.FormatUint(value.Uint(), 10))
|
||||
|
||||
case reflect.Float32:
|
||||
buf.WriteString(strconv.FormatFloat(value.Float(), 'g', -1, 32))
|
||||
|
||||
case reflect.Float64:
|
||||
buf.WriteString(strconv.FormatFloat(value.Float(), 'g', -1, 64))
|
||||
|
||||
default:
|
||||
var err error
|
||||
var b []byte
|
||||
if stringer, ok := val.(fmt.Stringer); ok {
|
||||
b, err = json.Marshal(stringer.String())
|
||||
} else {
|
||||
b, err = json.Marshal(val)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
InternalLog.Error("Could not json.Marshal value: ", "formatter", "JSONFormatter", "err", err.Error())
|
||||
if s, ok := val.(string); ok {
|
||||
b, err = json.Marshal(s)
|
||||
} else if s, ok := val.(fmt.Stringer); ok {
|
||||
b, err = json.Marshal(s.String())
|
||||
} else {
|
||||
b, err = json.Marshal(fmt.Sprintf("%#v", val))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// should never get here, but JSONFormatter should never panic
|
||||
msg := "Could not Sprintf value"
|
||||
InternalLog.Error(msg)
|
||||
buf.WriteString(`"` + msg + `"`)
|
||||
return
|
||||
}
|
||||
}
|
||||
buf.Write(b)
|
||||
}
|
||||
}
|
||||
|
||||
func (jf *JSONFormatter) set(buf bufferWriter, key string, val interface{}) {
|
||||
// WARNING: assumes this is not first key
|
||||
buf.WriteString(`, "`)
|
||||
buf.WriteString(key)
|
||||
buf.WriteString(`":`)
|
||||
jf.appendValue(buf, val)
|
||||
}
|
||||
|
||||
// Format formats log entry as JSON.
|
||||
func (jf *JSONFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) {
|
||||
buf := pool.Get()
|
||||
defer pool.Put(buf)
|
||||
|
||||
const lead = `", "`
|
||||
const colon = `":"`
|
||||
|
||||
buf.WriteString(`{"`)
|
||||
buf.WriteString(KeyMap.Time)
|
||||
buf.WriteString(`":"`)
|
||||
buf.WriteString(time.Now().Format(timeFormat))
|
||||
|
||||
buf.WriteString(`", "`)
|
||||
buf.WriteString(KeyMap.PID)
|
||||
buf.WriteString(`":"`)
|
||||
buf.WriteString(pidStr)
|
||||
|
||||
buf.WriteString(`", "`)
|
||||
buf.WriteString(KeyMap.Level)
|
||||
buf.WriteString(`":"`)
|
||||
buf.WriteString(LevelMap[level])
|
||||
|
||||
buf.WriteString(`", "`)
|
||||
buf.WriteString(KeyMap.Name)
|
||||
buf.WriteString(`":"`)
|
||||
buf.WriteString(jf.name)
|
||||
|
||||
buf.WriteString(`", "`)
|
||||
buf.WriteString(KeyMap.Message)
|
||||
buf.WriteString(`":`)
|
||||
jf.appendValue(buf, msg)
|
||||
|
||||
var lenArgs = len(args)
|
||||
if lenArgs > 0 {
|
||||
if lenArgs == 1 {
|
||||
jf.set(buf, singleArgKey, args[0])
|
||||
} else if lenArgs%2 == 0 {
|
||||
for i := 0; i < lenArgs; i += 2 {
|
||||
if key, ok := args[i].(string); ok {
|
||||
if key == "" {
|
||||
// show key is invalid
|
||||
jf.set(buf, badKeyAtIndex(i), args[i+1])
|
||||
} else {
|
||||
jf.set(buf, key, args[i+1])
|
||||
}
|
||||
} else {
|
||||
// show key is invalid
|
||||
jf.set(buf, badKeyAtIndex(i), args[i+1])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
jf.set(buf, warnImbalancedKey, args)
|
||||
}
|
||||
}
|
||||
buf.WriteString("}\n")
|
||||
buf.WriteTo(writer)
|
||||
}
|
||||
|
||||
// LogEntry returns the JSON log entry object built by Format(). Used by
|
||||
// HappyDevFormatter to ensure any data logged while developing properly
|
||||
// logs in production.
|
||||
func (jf *JSONFormatter) LogEntry(level int, msg string, args []interface{}) map[string]interface{} {
|
||||
buf := pool.Get()
|
||||
defer pool.Put(buf)
|
||||
jf.Format(buf, level, msg, args)
|
||||
var entry map[string]interface{}
|
||||
err := json.Unmarshal(buf.Bytes(), &entry)
|
||||
if err != nil {
|
||||
panic("Unable to unmarhsal entry from JSONFormatter: " + err.Error() + " \"" + string(buf.Bytes()) + "\"")
|
||||
}
|
||||
return entry
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
package log
|
||||
|
||||
/*
|
||||
http://en.wikipedia.org/wiki/Syslog
|
||||
|
||||
Code Severity Keyword
|
||||
0 Emergency emerg (panic) System is unusable.
|
||||
|
||||
A "panic" condition usually affecting multiple apps/servers/sites. At this
|
||||
level it would usually notify all tech staff on call.
|
||||
|
||||
1 Alert alert Action must be taken immediately.
|
||||
|
||||
Should be corrected immediately, therefore notify staff who can fix the
|
||||
problem. An example would be the loss of a primary ISP connection.
|
||||
|
||||
2 Critical crit Critical conditions.
|
||||
|
||||
Should be corrected immediately, but indicates failure in a secondary
|
||||
system, an example is a loss of a backup ISP connection.
|
||||
|
||||
3 Error err (error) Error conditions.
|
||||
|
||||
Non-urgent failures, these should be relayed to developers or admins; each
|
||||
item must be resolved within a given time.
|
||||
|
||||
4 Warning warning (warn) Warning conditions.
|
||||
|
||||
Warning messages, not an error, but indication that an error will occur if
|
||||
action is not taken, e.g. file system 85% full - each item must be resolved
|
||||
within a given time.
|
||||
|
||||
5 Notice notice Normal but significant condition.
|
||||
|
||||
Events that are unusual but not error conditions - might be summarized in
|
||||
an email to developers or admins to spot potential problems - no immediate
|
||||
action required.
|
||||
|
||||
6 Informational info Informational messages.
|
||||
|
||||
Normal operational messages - may be harvested for reporting, measuring
|
||||
throughput, etc. - no action required.
|
||||
|
||||
7 Debug debug Debug-level messages.
|
||||
|
||||
Info useful to developers for debugging the application, not useful during operations.
|
||||
*/
|
||||
|
||||
const (
|
||||
// LevelEnv chooses level from LOGXI environment variable or defaults
|
||||
// to LevelInfo
|
||||
LevelEnv = -10000
|
||||
|
||||
// LevelOff means logging is disabled for logger. This should always
|
||||
// be first
|
||||
LevelOff = -1000
|
||||
|
||||
// LevelEmergency is usually 0 but that is also the "zero" value
|
||||
// for Go, which means whenever we do any lookup in string -> int
|
||||
// map 0 is returned (not good).
|
||||
LevelEmergency = -1
|
||||
|
||||
// LevelAlert means action must be taken immediately.
|
||||
LevelAlert = 1
|
||||
|
||||
// LevelFatal means it should be corrected immediately, eg cannot connect to database.
|
||||
LevelFatal = 2
|
||||
|
||||
// LevelCritical is alias for LevelFatal
|
||||
LevelCritical = 2
|
||||
|
||||
// LevelError is a non-urgen failure to notify devlopers or admins
|
||||
LevelError = 3
|
||||
|
||||
// LevelWarn indiates an error will occur if action is not taken, eg file system 85% full
|
||||
LevelWarn = 4
|
||||
|
||||
// LevelNotice is normal but significant condition.
|
||||
LevelNotice = 5
|
||||
|
||||
// LevelInfo is info level
|
||||
LevelInfo = 6
|
||||
|
||||
// LevelDebug is debug level
|
||||
LevelDebug = 7
|
||||
|
||||
// LevelTrace is trace level and displays file and line in terminal
|
||||
LevelTrace = 10
|
||||
|
||||
// LevelAll is all levels
|
||||
LevelAll = 1000
|
||||
)
|
||||
|
||||
// FormatHappy uses HappyDevFormatter
|
||||
const FormatHappy = "happy"
|
||||
|
||||
// FormatText uses TextFormatter
|
||||
const FormatText = "text"
|
||||
|
||||
// FormatJSON uses JSONFormatter
|
||||
const FormatJSON = "JSON"
|
||||
|
||||
// FormatEnv selects formatter based on LOGXI_FORMAT environment variable
|
||||
const FormatEnv = ""
|
||||
|
||||
// LevelMap maps int enums to string level.
|
||||
var LevelMap = map[int]string{
|
||||
LevelFatal: "FTL",
|
||||
LevelError: "ERR",
|
||||
LevelWarn: "WRN",
|
||||
LevelInfo: "INF",
|
||||
LevelDebug: "DBG",
|
||||
LevelTrace: "TRC",
|
||||
}
|
||||
|
||||
// LevelMap maps int enums to string level.
|
||||
var LevelAtoi = map[string]int{
|
||||
"OFF": LevelOff,
|
||||
"FTL": LevelFatal,
|
||||
"ERR": LevelError,
|
||||
"WRN": LevelWarn,
|
||||
"INF": LevelInfo,
|
||||
"DBG": LevelDebug,
|
||||
"TRC": LevelTrace,
|
||||
"ALL": LevelAll,
|
||||
|
||||
"off": LevelOff,
|
||||
"fatal": LevelFatal,
|
||||
"error": LevelError,
|
||||
"warn": LevelWarn,
|
||||
"info": LevelInfo,
|
||||
"debug": LevelDebug,
|
||||
"trace": LevelTrace,
|
||||
"all": LevelAll,
|
||||
}
|
||||
|
||||
// Logger is the interface for logging.
|
||||
type Logger interface {
|
||||
Trace(msg string, args ...interface{})
|
||||
Debug(msg string, args ...interface{})
|
||||
Info(msg string, args ...interface{})
|
||||
Warn(msg string, args ...interface{}) error
|
||||
Error(msg string, args ...interface{}) error
|
||||
Fatal(msg string, args ...interface{})
|
||||
Log(level int, msg string, args []interface{})
|
||||
|
||||
SetLevel(int)
|
||||
IsTrace() bool
|
||||
IsDebug() bool
|
||||
IsInfo() bool
|
||||
IsWarn() bool
|
||||
// Error, Fatal not needed, those SHOULD always be logged
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
package log
|
||||
|
||||
// Trace logs a trace statement. On terminals file and line number are logged.
|
||||
func Trace(msg string, args ...interface{}) {
|
||||
DefaultLog.Trace(msg, args...)
|
||||
}
|
||||
|
||||
// Debug logs a debug statement.
|
||||
func Debug(msg string, args ...interface{}) {
|
||||
DefaultLog.Debug(msg, args...)
|
||||
}
|
||||
|
||||
// Info logs an info statement.
|
||||
func Info(msg string, args ...interface{}) {
|
||||
DefaultLog.Info(msg, args...)
|
||||
}
|
||||
|
||||
// Warn logs a warning statement. On terminals it logs file and line number.
|
||||
func Warn(msg string, args ...interface{}) {
|
||||
DefaultLog.Warn(msg, args...)
|
||||
}
|
||||
|
||||
// Error logs an error statement with callstack.
|
||||
func Error(msg string, args ...interface{}) {
|
||||
DefaultLog.Error(msg, args...)
|
||||
}
|
||||
|
||||
// Fatal logs a fatal statement.
|
||||
func Fatal(msg string, args ...interface{}) {
|
||||
DefaultLog.Fatal(msg, args...)
|
||||
}
|
||||
|
||||
// IsTrace determines if this logger logs a trace statement.
|
||||
func IsTrace() bool {
|
||||
return DefaultLog.IsTrace()
|
||||
}
|
||||
|
||||
// IsDebug determines if this logger logs a debug statement.
|
||||
func IsDebug() bool {
|
||||
return DefaultLog.IsDebug()
|
||||
}
|
||||
|
||||
// IsInfo determines if this logger logs an info statement.
|
||||
func IsInfo() bool {
|
||||
return DefaultLog.IsInfo()
|
||||
}
|
||||
|
||||
// IsWarn determines if this logger logs a warning statement.
|
||||
func IsWarn() bool {
|
||||
return DefaultLog.IsWarn()
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
package log
|
||||
|
||||
// NullLog is a noop logger. Think of it as /dev/null.
|
||||
var NullLog = &NullLogger{}
|
||||
|
||||
// NullLogger is the default logger for this package.
|
||||
type NullLogger struct{}
|
||||
|
||||
// Trace logs a debug entry.
|
||||
func (l *NullLogger) Trace(msg string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Debug logs a debug entry.
|
||||
func (l *NullLogger) Debug(msg string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Info logs an info entry.
|
||||
func (l *NullLogger) Info(msg string, args ...interface{}) {
|
||||
}
|
||||
|
||||
// Warn logs a warn entry.
|
||||
func (l *NullLogger) Warn(msg string, args ...interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error logs an error entry.
|
||||
func (l *NullLogger) Error(msg string, args ...interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fatal logs a fatal entry then panics.
|
||||
func (l *NullLogger) Fatal(msg string, args ...interface{}) {
|
||||
panic("exit due to fatal error")
|
||||
}
|
||||
|
||||
// Log logs a leveled entry.
|
||||
func (l *NullLogger) Log(level int, msg string, args []interface{}) {
|
||||
}
|
||||
|
||||
// IsTrace determines if this logger logs a trace statement.
|
||||
func (l *NullLogger) IsTrace() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsDebug determines if this logger logs a debug statement.
|
||||
func (l *NullLogger) IsDebug() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsInfo determines if this logger logs an info statement.
|
||||
func (l *NullLogger) IsInfo() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsWarn determines if this logger logs a warning statement.
|
||||
func (l *NullLogger) IsWarn() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// SetLevel sets the level of this logger.
|
||||
func (l *NullLogger) SetLevel(level int) {
|
||||
}
|
||||
|
||||
// SetFormatter set the formatter for this logger.
|
||||
func (l *NullLogger) SetFormatter(formatter Formatter) {
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type BufferPool struct {
|
||||
sync.Pool
|
||||
}
|
||||
|
||||
func NewBufferPool() *BufferPool {
|
||||
return &BufferPool{
|
||||
Pool: sync.Pool{New: func() interface{} {
|
||||
b := bytes.NewBuffer(make([]byte, 128))
|
||||
b.Reset()
|
||||
return b
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *BufferPool) Get() *bytes.Buffer {
|
||||
return bp.Pool.Get().(*bytes.Buffer)
|
||||
}
|
||||
|
||||
func (bp *BufferPool) Put(b *bytes.Buffer) {
|
||||
b.Reset()
|
||||
bp.Pool.Put(b)
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Formatter records log entries.
|
||||
type Formatter interface {
|
||||
Format(writer io.Writer, level int, msg string, args []interface{})
|
||||
}
|
||||
|
||||
// TextFormatter is the default recorder used if one is unspecified when
|
||||
// creating a new Logger.
|
||||
type TextFormatter struct {
|
||||
name string
|
||||
itoaLevelMap map[int]string
|
||||
timeLabel string
|
||||
}
|
||||
|
||||
// NewTextFormatter returns a new instance of TextFormatter. SetName
|
||||
// must be called befored using it.
|
||||
func NewTextFormatter(name string) *TextFormatter {
|
||||
timeLabel := KeyMap.Time + AssignmentChar
|
||||
levelLabel := Separator + KeyMap.Level + AssignmentChar
|
||||
messageLabel := Separator + KeyMap.Message + AssignmentChar
|
||||
nameLabel := Separator + KeyMap.Name + AssignmentChar
|
||||
pidLabel := Separator + KeyMap.PID + AssignmentChar
|
||||
|
||||
var buildKV = func(level string) string {
|
||||
buf := pool.Get()
|
||||
defer pool.Put(buf)
|
||||
|
||||
buf.WriteString(pidLabel)
|
||||
buf.WriteString(pidStr)
|
||||
|
||||
//buf.WriteString(Separator)
|
||||
buf.WriteString(nameLabel)
|
||||
buf.WriteString(name)
|
||||
|
||||
//buf.WriteString(Separator)
|
||||
buf.WriteString(levelLabel)
|
||||
buf.WriteString(level)
|
||||
|
||||
//buf.WriteString(Separator)
|
||||
buf.WriteString(messageLabel)
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
itoaLevelMap := map[int]string{
|
||||
LevelDebug: buildKV(LevelMap[LevelDebug]),
|
||||
LevelWarn: buildKV(LevelMap[LevelWarn]),
|
||||
LevelInfo: buildKV(LevelMap[LevelInfo]),
|
||||
LevelError: buildKV(LevelMap[LevelError]),
|
||||
LevelFatal: buildKV(LevelMap[LevelFatal]),
|
||||
}
|
||||
return &TextFormatter{itoaLevelMap: itoaLevelMap, name: name, timeLabel: timeLabel}
|
||||
}
|
||||
|
||||
func (tf *TextFormatter) set(buf bufferWriter, key string, val interface{}) {
|
||||
buf.WriteString(Separator)
|
||||
buf.WriteString(key)
|
||||
buf.WriteString(AssignmentChar)
|
||||
if err, ok := val.(error); ok {
|
||||
buf.WriteString(err.Error())
|
||||
buf.WriteRune('\n')
|
||||
buf.WriteString(string(debug.Stack()))
|
||||
return
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%v", val))
|
||||
}
|
||||
|
||||
// Format records a log entry.
|
||||
func (tf *TextFormatter) Format(writer io.Writer, level int, msg string, args []interface{}) {
|
||||
buf := pool.Get()
|
||||
defer pool.Put(buf)
|
||||
buf.WriteString(tf.timeLabel)
|
||||
buf.WriteString(time.Now().Format(timeFormat))
|
||||
buf.WriteString(tf.itoaLevelMap[level])
|
||||
buf.WriteString(msg)
|
||||
var lenArgs = len(args)
|
||||
if lenArgs > 0 {
|
||||
if lenArgs == 1 {
|
||||
tf.set(buf, singleArgKey, args[0])
|
||||
} else if lenArgs%2 == 0 {
|
||||
for i := 0; i < lenArgs; i += 2 {
|
||||
if key, ok := args[i].(string); ok {
|
||||
if key == "" {
|
||||
// show key is invalid
|
||||
tf.set(buf, badKeyAtIndex(i), args[i+1])
|
||||
} else {
|
||||
tf.set(buf, key, args[i+1])
|
||||
}
|
||||
} else {
|
||||
// show key is invalid
|
||||
tf.set(buf, badKeyAtIndex(i), args[i+1])
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tf.set(buf, warnImbalancedKey, args)
|
||||
}
|
||||
}
|
||||
buf.WriteRune('\n')
|
||||
buf.WriteTo(writer)
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func expandTabs(s string, tabLen int) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
parts := strings.Split(s, "\t")
|
||||
buf := pool.Get()
|
||||
defer pool.Put(buf)
|
||||
for _, part := range parts {
|
||||
buf.WriteString(part)
|
||||
buf.WriteString(strings.Repeat(" ", tabLen-len(part)%tabLen))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func maxInt(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
func minInt(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func indexOfNonSpace(s string) int {
|
||||
if s == "" {
|
||||
return -1
|
||||
}
|
||||
for i, r := range s {
|
||||
if r != ' ' {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
var inLogxiPath = filepath.Join("mgutz", "logxi", "v"+strings.Split(Version, ".")[0])
|
||||
|
||||
func isLogxiCode(filename string) bool {
|
||||
// need to see errors in tests
|
||||
return strings.HasSuffix(filepath.Dir(filename), inLogxiPath) &&
|
||||
!strings.HasSuffix(filename, "_test.go")
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue