New command: consul debug (#4754)
* agent/debug: add package for debugging, host info * api: add v1/agent/host endpoint * agent: add v1/agent/host endpoint * command/debug: implementation of static capture * command/debug: tests and only configured targets * agent/debug: add basic test for host metrics * command/debug: add methods for dynamic data capture * api: add debug/pprof endpoints * command/debug: add pprof * command/debug: timing, wg, logs to disk * vendor: add gopsutil/disk * command/debug: add a usage section * website: add docs for consul debug * agent/host: require operator:read * api/host: improve docs and no retry timing * command/debug: fail on extra arguments * command/debug: fixup file permissions to 0644 * command/debug: remove server flags * command/debug: improve clarity of usage section * api/debug: add Trace for profiling, fix profile * command/debug: capture profile and trace at the same time * command/debug: add index document * command/debug: use "clusters" in place of members * command/debug: remove address in output * command/debug: improve comment on metrics sleep * command/debug: clarify usage * agent: always register pprof handlers and protect This will allow us to avoid a restart of a target agent for profiling by always registering the pprof handlers. Given this is a potentially sensitive path, it is protected with an operator:read ACL and enable debug being set to true on the target agent. enable_debug still requires a restart. If ACLs are disabled, enable_debug is sufficient. * command/debug: use trace.out instead of .prof More in line with golang docs. * agent: fix comment wording * agent: wrap table driven tests in t.run()
This commit is contained in:
parent
96a35f8abc
commit
197d62c6ca
|
@ -19,6 +19,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/cache-types"
|
"github.com/hashicorp/consul/agent/cache-types"
|
||||||
"github.com/hashicorp/consul/agent/checks"
|
"github.com/hashicorp/consul/agent/checks"
|
||||||
"github.com/hashicorp/consul/agent/config"
|
"github.com/hashicorp/consul/agent/config"
|
||||||
|
"github.com/hashicorp/consul/agent/debug"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
"github.com/hashicorp/consul/ipaddr"
|
"github.com/hashicorp/consul/ipaddr"
|
||||||
|
@ -1463,3 +1464,27 @@ type connectAuthorizeResp struct {
|
||||||
Authorized bool // True if authorized, false if not
|
Authorized bool // True if authorized, false if not
|
||||||
Reason string // Reason for the Authorized value (whether true or false)
|
Reason string // Reason for the Authorized value (whether true or false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AgentHost
|
||||||
|
//
|
||||||
|
// GET /v1/agent/host
|
||||||
|
//
|
||||||
|
// Retrieves information about resources available and in-use for the
|
||||||
|
// host the agent is running on such as CPU, memory, and disk usage. Requires
|
||||||
|
// a operator:read ACL token.
|
||||||
|
func (s *HTTPServer) AgentHost(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||||
|
// Fetch the ACL token, if any, and enforce agent policy.
|
||||||
|
var token string
|
||||||
|
s.parseToken(req, &token)
|
||||||
|
rule, err := s.agent.resolveToken(token)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// TODO(pearkes): Is agent:read appropriate here? There could be relatively
|
||||||
|
// sensitive information made available in this API
|
||||||
|
if rule != nil && !rule.OperatorRead() {
|
||||||
|
return nil, acl.ErrPermissionDenied
|
||||||
|
}
|
||||||
|
|
||||||
|
return debug.CollectHostInfo(), nil
|
||||||
|
}
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"github.com/hashicorp/consul/agent/checks"
|
"github.com/hashicorp/consul/agent/checks"
|
||||||
"github.com/hashicorp/consul/agent/config"
|
"github.com/hashicorp/consul/agent/config"
|
||||||
"github.com/hashicorp/consul/agent/connect"
|
"github.com/hashicorp/consul/agent/connect"
|
||||||
|
"github.com/hashicorp/consul/agent/debug"
|
||||||
"github.com/hashicorp/consul/agent/local"
|
"github.com/hashicorp/consul/agent/local"
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
|
@ -1877,9 +1878,9 @@ func TestAgent_RegisterService_TranslateKeys(t *testing.T) {
|
||||||
|
|
||||||
json := `
|
json := `
|
||||||
{
|
{
|
||||||
"name":"test",
|
"name":"test",
|
||||||
"port":8000,
|
"port":8000,
|
||||||
"enable_tag_override": true,
|
"enable_tag_override": true,
|
||||||
"meta": {
|
"meta": {
|
||||||
"some": "meta",
|
"some": "meta",
|
||||||
"enable_tag_override": "meta is 'opaque' so should not get translated"
|
"enable_tag_override": "meta is 'opaque' so should not get translated"
|
||||||
|
@ -1929,9 +1930,9 @@ func TestAgent_RegisterService_TranslateKeys(t *testing.T) {
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"sidecar_service": {
|
"sidecar_service": {
|
||||||
"name":"test-proxy",
|
"name":"test-proxy",
|
||||||
"port":8001,
|
"port":8001,
|
||||||
"enable_tag_override": true,
|
"enable_tag_override": true,
|
||||||
"meta": {
|
"meta": {
|
||||||
"some": "meta",
|
"some": "meta",
|
||||||
"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated"
|
"enable_tag_override": "sidecar_service.meta is 'opaque' so should not get translated"
|
||||||
|
@ -2791,7 +2792,7 @@ func TestAgent_RegisterServiceDeregisterService_Sidecar(t *testing.T) {
|
||||||
require := require.New(t)
|
require := require.New(t)
|
||||||
|
|
||||||
// Constrain auto ports to 1 available to make it deterministic
|
// Constrain auto ports to 1 available to make it deterministic
|
||||||
hcl := `ports {
|
hcl := `ports {
|
||||||
sidecar_min_port = 2222
|
sidecar_min_port = 2222
|
||||||
sidecar_max_port = 2222
|
sidecar_max_port = 2222
|
||||||
}
|
}
|
||||||
|
@ -5537,3 +5538,55 @@ func testAllowProxyConfig() string {
|
||||||
}
|
}
|
||||||
`
|
`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAgent_Host(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
dc1 := "dc1"
|
||||||
|
a := NewTestAgent(t.Name(), `
|
||||||
|
acl_datacenter = "`+dc1+`"
|
||||||
|
acl_default_policy = "allow"
|
||||||
|
acl_master_token = "master"
|
||||||
|
acl_agent_token = "agent"
|
||||||
|
acl_agent_master_token = "towel"
|
||||||
|
acl_enforce_version_8 = true
|
||||||
|
`)
|
||||||
|
defer a.Shutdown()
|
||||||
|
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
req, _ := http.NewRequest("GET", "/v1/agent/host?token=master", nil)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
respRaw, err := a.srv.AgentHost(resp, req)
|
||||||
|
assert.Nil(err)
|
||||||
|
assert.Equal(http.StatusOK, resp.Code)
|
||||||
|
assert.NotNil(respRaw)
|
||||||
|
|
||||||
|
obj := respRaw.(*debug.HostInfo)
|
||||||
|
assert.NotNil(obj.CollectionTime)
|
||||||
|
assert.Empty(obj.Errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAgent_HostBadACL(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
dc1 := "dc1"
|
||||||
|
a := NewTestAgent(t.Name(), `
|
||||||
|
acl_datacenter = "`+dc1+`"
|
||||||
|
acl_default_policy = "deny"
|
||||||
|
acl_master_token = "root"
|
||||||
|
acl_agent_token = "agent"
|
||||||
|
acl_agent_master_token = "towel"
|
||||||
|
acl_enforce_version_8 = true
|
||||||
|
`)
|
||||||
|
defer a.Shutdown()
|
||||||
|
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
req, _ := http.NewRequest("GET", "/v1/agent/host?token=agent", nil)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
respRaw, err := a.srv.AgentHost(resp, req)
|
||||||
|
assert.EqualError(err, "ACL not found")
|
||||||
|
assert.Equal(http.StatusOK, resp.Code)
|
||||||
|
assert.Nil(respRaw)
|
||||||
|
}
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
package debug
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/cpu"
|
||||||
|
"github.com/shirou/gopsutil/disk"
|
||||||
|
"github.com/shirou/gopsutil/host"
|
||||||
|
"github.com/shirou/gopsutil/mem"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// DiskUsagePath is the path to check usage of the disk.
|
||||||
|
// Must be a filessytem path such as "/", not device file path like "/dev/vda1"
|
||||||
|
DiskUsagePath = "/"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HostInfo includes information about resources on the host as well as
|
||||||
|
// collection time and
|
||||||
|
type HostInfo struct {
|
||||||
|
Memory *mem.VirtualMemoryStat
|
||||||
|
CPU []cpu.InfoStat
|
||||||
|
Host *host.InfoStat
|
||||||
|
Disk *disk.UsageStat
|
||||||
|
CollectionTime int64
|
||||||
|
Errors []error
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectHostInfo queries the host system and returns HostInfo. Any
|
||||||
|
// errors encountered will be returned in HostInfo.Errors
|
||||||
|
func CollectHostInfo() *HostInfo {
|
||||||
|
info := &HostInfo{CollectionTime: time.Now().UTC().UnixNano()}
|
||||||
|
|
||||||
|
if h, err := host.Info(); err != nil {
|
||||||
|
info.Errors = append(info.Errors, err)
|
||||||
|
} else {
|
||||||
|
info.Host = h
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, err := mem.VirtualMemory(); err != nil {
|
||||||
|
info.Errors = append(info.Errors, err)
|
||||||
|
} else {
|
||||||
|
info.Memory = v
|
||||||
|
}
|
||||||
|
|
||||||
|
if d, err := disk.Usage(DiskUsagePath); err != nil {
|
||||||
|
info.Errors = append(info.Errors, err)
|
||||||
|
} else {
|
||||||
|
info.Disk = d
|
||||||
|
}
|
||||||
|
|
||||||
|
if c, err := cpu.Info(); err != nil {
|
||||||
|
info.Errors = append(info.Errors, err)
|
||||||
|
} else {
|
||||||
|
info.CPU = c
|
||||||
|
}
|
||||||
|
|
||||||
|
return info
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
package debug
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCollectHostInfo(t *testing.T) {
|
||||||
|
assert := assert.New(t)
|
||||||
|
|
||||||
|
host := CollectHostInfo()
|
||||||
|
|
||||||
|
assert.Nil(host.Errors)
|
||||||
|
|
||||||
|
assert.NotNil(host.CollectionTime)
|
||||||
|
assert.NotNil(host.Host)
|
||||||
|
assert.NotNil(host.Disk)
|
||||||
|
assert.NotNil(host.Memory)
|
||||||
|
}
|
|
@ -142,6 +142,41 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler {
|
||||||
mux.Handle(pattern, gzipHandler)
|
mux.Handle(pattern, gzipHandler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handlePProf takes the given pattern and pprof handler
|
||||||
|
// and wraps it to add authorization and metrics
|
||||||
|
handlePProf := func(pattern string, handler http.HandlerFunc) {
|
||||||
|
wrapper := func(resp http.ResponseWriter, req *http.Request) {
|
||||||
|
var token string
|
||||||
|
s.parseToken(req, &token)
|
||||||
|
|
||||||
|
rule, err := s.agent.resolveToken(token)
|
||||||
|
if err != nil {
|
||||||
|
resp.WriteHeader(http.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If enableDebug is not set, and ACLs are disabled, write
|
||||||
|
// an unauthorized response
|
||||||
|
if !enableDebug {
|
||||||
|
if s.checkACLDisabled(resp, req) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the token provided does not have the necessary permissions,
|
||||||
|
// write a forbidden response
|
||||||
|
if rule != nil && !rule.OperatorRead() {
|
||||||
|
resp.WriteHeader(http.StatusForbidden)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the pprof handler
|
||||||
|
handler(resp, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
handleFuncMetrics(pattern, http.HandlerFunc(wrapper))
|
||||||
|
}
|
||||||
|
|
||||||
mux.HandleFunc("/", s.Index)
|
mux.HandleFunc("/", s.Index)
|
||||||
for pattern, fn := range endpoints {
|
for pattern, fn := range endpoints {
|
||||||
thisFn := fn
|
thisFn := fn
|
||||||
|
@ -151,12 +186,13 @@ func (s *HTTPServer) handler(enableDebug bool) http.Handler {
|
||||||
}
|
}
|
||||||
handleFuncMetrics(pattern, s.wrap(bound, methods))
|
handleFuncMetrics(pattern, s.wrap(bound, methods))
|
||||||
}
|
}
|
||||||
if enableDebug {
|
|
||||||
handleFuncMetrics("/debug/pprof/", pprof.Index)
|
// Register wrapped pprof handlers
|
||||||
handleFuncMetrics("/debug/pprof/cmdline", pprof.Cmdline)
|
handlePProf("/debug/pprof/", pprof.Index)
|
||||||
handleFuncMetrics("/debug/pprof/profile", pprof.Profile)
|
handlePProf("/debug/pprof/cmdline", pprof.Cmdline)
|
||||||
handleFuncMetrics("/debug/pprof/symbol", pprof.Symbol)
|
handlePProf("/debug/pprof/profile", pprof.Profile)
|
||||||
}
|
handlePProf("/debug/pprof/symbol", pprof.Symbol)
|
||||||
|
handlePProf("/debug/pprof/trace", pprof.Trace)
|
||||||
|
|
||||||
if s.IsUIEnabled() {
|
if s.IsUIEnabled() {
|
||||||
legacy_ui, err := strconv.ParseBool(os.Getenv("CONSUL_UI_LEGACY"))
|
legacy_ui, err := strconv.ParseBool(os.Getenv("CONSUL_UI_LEGACY"))
|
||||||
|
|
|
@ -13,6 +13,7 @@ func init() {
|
||||||
registerEndpoint("/v1/acl/replication", []string{"GET"}, (*HTTPServer).ACLReplicationStatus)
|
registerEndpoint("/v1/acl/replication", []string{"GET"}, (*HTTPServer).ACLReplicationStatus)
|
||||||
registerEndpoint("/v1/agent/token/", []string{"PUT"}, (*HTTPServer).AgentToken)
|
registerEndpoint("/v1/agent/token/", []string{"PUT"}, (*HTTPServer).AgentToken)
|
||||||
registerEndpoint("/v1/agent/self", []string{"GET"}, (*HTTPServer).AgentSelf)
|
registerEndpoint("/v1/agent/self", []string{"GET"}, (*HTTPServer).AgentSelf)
|
||||||
|
registerEndpoint("/v1/agent/host", []string{"GET"}, (*HTTPServer).AgentHost)
|
||||||
registerEndpoint("/v1/agent/maintenance", []string{"PUT"}, (*HTTPServer).AgentNodeMaintenance)
|
registerEndpoint("/v1/agent/maintenance", []string{"PUT"}, (*HTTPServer).AgentNodeMaintenance)
|
||||||
registerEndpoint("/v1/agent/reload", []string{"PUT"}, (*HTTPServer).AgentReload)
|
registerEndpoint("/v1/agent/reload", []string{"PUT"}, (*HTTPServer).AgentReload)
|
||||||
registerEndpoint("/v1/agent/monitor", []string{"GET"}, (*HTTPServer).AgentMonitor)
|
registerEndpoint("/v1/agent/monitor", []string{"GET"}, (*HTTPServer).AgentMonitor)
|
||||||
|
|
|
@ -20,8 +20,10 @@ import (
|
||||||
|
|
||||||
"github.com/hashicorp/consul/agent/structs"
|
"github.com/hashicorp/consul/agent/structs"
|
||||||
"github.com/hashicorp/consul/api"
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/testrpc"
|
||||||
"github.com/hashicorp/consul/testutil"
|
"github.com/hashicorp/consul/testutil"
|
||||||
"github.com/hashicorp/go-cleanhttp"
|
"github.com/hashicorp/go-cleanhttp"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
)
|
)
|
||||||
|
@ -724,6 +726,104 @@ func TestParseWait(t *testing.T) {
|
||||||
t.Fatalf("Bad: %v", b)
|
t.Fatalf("Bad: %v", b)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
func TestPProfHandlers_EnableDebug(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
require := require.New(t)
|
||||||
|
a := NewTestAgent(t.Name(), "enable_debug = true")
|
||||||
|
defer a.Shutdown()
|
||||||
|
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
req, _ := http.NewRequest("GET", "/debug/pprof/profile", nil)
|
||||||
|
|
||||||
|
a.srv.Handler.ServeHTTP(resp, req)
|
||||||
|
|
||||||
|
require.Equal(http.StatusOK, resp.Code)
|
||||||
|
}
|
||||||
|
func TestPProfHandlers_DisableDebugNoACLs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
require := require.New(t)
|
||||||
|
a := NewTestAgent(t.Name(), "enable_debug = false")
|
||||||
|
defer a.Shutdown()
|
||||||
|
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
req, _ := http.NewRequest("GET", "/debug/pprof/profile", nil)
|
||||||
|
|
||||||
|
a.srv.Handler.ServeHTTP(resp, req)
|
||||||
|
|
||||||
|
require.Equal(http.StatusUnauthorized, resp.Code)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPProfHandlers_ACLs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
assert := assert.New(t)
|
||||||
|
dc1 := "dc1"
|
||||||
|
|
||||||
|
a := NewTestAgent(t.Name(), `
|
||||||
|
acl_datacenter = "`+dc1+`"
|
||||||
|
acl_default_policy = "deny"
|
||||||
|
acl_master_token = "master"
|
||||||
|
acl_agent_token = "agent"
|
||||||
|
acl_agent_master_token = "towel"
|
||||||
|
acl_enforce_version_8 = true
|
||||||
|
enable_debug = false
|
||||||
|
`)
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
code int
|
||||||
|
token string
|
||||||
|
endpoint string
|
||||||
|
nilResponse bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
code: http.StatusOK,
|
||||||
|
token: "master",
|
||||||
|
endpoint: "/debug/pprof/heap",
|
||||||
|
nilResponse: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
code: http.StatusForbidden,
|
||||||
|
token: "agent",
|
||||||
|
endpoint: "/debug/pprof/heap",
|
||||||
|
nilResponse: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
code: http.StatusForbidden,
|
||||||
|
token: "agent",
|
||||||
|
endpoint: "/debug/pprof/",
|
||||||
|
nilResponse: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
code: http.StatusForbidden,
|
||||||
|
token: "",
|
||||||
|
endpoint: "/debug/pprof/",
|
||||||
|
nilResponse: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
code: http.StatusOK,
|
||||||
|
token: "master",
|
||||||
|
endpoint: "/debug/pprof/heap",
|
||||||
|
nilResponse: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
code: http.StatusForbidden,
|
||||||
|
token: "towel",
|
||||||
|
endpoint: "/debug/pprof/heap",
|
||||||
|
nilResponse: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
for i, c := range cases {
|
||||||
|
t.Run(fmt.Sprintf("case %d (%#v)", i, c), func(t *testing.T) {
|
||||||
|
req, _ := http.NewRequest("GET", fmt.Sprintf("%s?token=%s", c.endpoint, c.token), nil)
|
||||||
|
resp := httptest.NewRecorder()
|
||||||
|
a.srv.Handler.ServeHTTP(resp, req)
|
||||||
|
assert.Equal(c.code, resp.Code)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestParseWait_InvalidTime(t *testing.T) {
|
func TestParseWait_InvalidTime(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
18
api/agent.go
18
api/agent.go
|
@ -314,6 +314,24 @@ func (a *Agent) Self() (map[string]map[string]interface{}, error) {
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Host is used to retrieve information about the host the
|
||||||
|
// agent is running on such as CPU, memory, and disk. Requires
|
||||||
|
// a operator:read ACL token.
|
||||||
|
func (a *Agent) Host() (map[string]interface{}, error) {
|
||||||
|
r := a.c.newRequest("GET", "/v1/agent/host")
|
||||||
|
_, resp, err := requireOK(a.c.doRequest(r))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
var out map[string]interface{}
|
||||||
|
if err := decodeBody(resp, &out); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Metrics is used to query the agent we are speaking to for
|
// Metrics is used to query the agent we are speaking to for
|
||||||
// its current internal metric data
|
// its current internal metric data
|
||||||
func (a *Agent) Metrics() (*MetricsInfo, error) {
|
func (a *Agent) Metrics() (*MetricsInfo, error) {
|
||||||
|
|
|
@ -53,6 +53,26 @@ func TestAPI_AgentMetrics(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAPI_AgentHost(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClient(t)
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
agent := c.Agent()
|
||||||
|
timer := &retry.Timer{}
|
||||||
|
retry.RunWith(timer, t, func(r *retry.R) {
|
||||||
|
host, err := agent.Host()
|
||||||
|
if err != nil {
|
||||||
|
r.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollectionTime should exist on all responses
|
||||||
|
if host["CollectionTime"] == nil {
|
||||||
|
r.Fatalf("missing host response")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func TestAPI_AgentReload(t *testing.T) {
|
func TestAPI_AgentReload(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,106 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Debug can be used to query the /debug/pprof endpoints to gather
|
||||||
|
// profiling information about the target agent.Debug
|
||||||
|
//
|
||||||
|
// The agent must have enable_debug set to true for profiling to be enabled
|
||||||
|
// and for these endpoints to function.
|
||||||
|
type Debug struct {
|
||||||
|
c *Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug returns a handle that exposes the internal debug endpoints.
|
||||||
|
func (c *Client) Debug() *Debug {
|
||||||
|
return &Debug{c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Heap returns a pprof heap dump
|
||||||
|
func (d *Debug) Heap() ([]byte, error) {
|
||||||
|
r := d.c.newRequest("GET", "/debug/pprof/heap")
|
||||||
|
_, resp, err := d.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// We return a raw response because we're just passing through a response
|
||||||
|
// from the pprof handlers
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Profile returns a pprof CPU profile for the specified number of seconds
|
||||||
|
func (d *Debug) Profile(seconds int) ([]byte, error) {
|
||||||
|
r := d.c.newRequest("GET", "/debug/pprof/profile")
|
||||||
|
|
||||||
|
// Capture a profile for the specified number of seconds
|
||||||
|
r.params.Set("seconds", strconv.Itoa(seconds))
|
||||||
|
|
||||||
|
_, resp, err := d.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// We return a raw response because we're just passing through a response
|
||||||
|
// from the pprof handlers
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trace returns an execution trace
|
||||||
|
func (d *Debug) Trace(seconds int) ([]byte, error) {
|
||||||
|
r := d.c.newRequest("GET", "/debug/pprof/trace")
|
||||||
|
|
||||||
|
// Capture a trace for the specified number of seconds
|
||||||
|
r.params.Set("seconds", strconv.Itoa(seconds))
|
||||||
|
|
||||||
|
_, resp, err := d.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// We return a raw response because we're just passing through a response
|
||||||
|
// from the pprof handlers
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Goroutine returns a pprof goroutine profile
|
||||||
|
func (d *Debug) Goroutine() ([]byte, error) {
|
||||||
|
r := d.c.newRequest("GET", "/debug/pprof/goroutine")
|
||||||
|
|
||||||
|
_, resp, err := d.c.doRequest(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error making request: %s", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
// We return a raw response because we're just passing through a response
|
||||||
|
// from the pprof handlers
|
||||||
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error decoding body: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return body, nil
|
||||||
|
}
|
|
@ -0,0 +1,83 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/testutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAPI_DebugHeap(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
||||||
|
conf.EnableDebug = true
|
||||||
|
})
|
||||||
|
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
debug := c.Debug()
|
||||||
|
raw, err := debug.Heap()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(raw) <= 0 {
|
||||||
|
t.Fatalf("no response: %#v", raw)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPI_DebugProfile(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
||||||
|
conf.EnableDebug = true
|
||||||
|
})
|
||||||
|
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
debug := c.Debug()
|
||||||
|
raw, err := debug.Profile(1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(raw) <= 0 {
|
||||||
|
t.Fatalf("no response: %#v", raw)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPI_DebugGoroutine(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
||||||
|
conf.EnableDebug = true
|
||||||
|
})
|
||||||
|
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
debug := c.Debug()
|
||||||
|
raw, err := debug.Goroutine()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(raw) <= 0 {
|
||||||
|
t.Fatalf("no response: %#v", raw)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAPI_DebugTrace(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
c, s := makeClientWithConfig(t, nil, func(conf *testutil.TestServerConfig) {
|
||||||
|
conf.EnableDebug = true
|
||||||
|
})
|
||||||
|
|
||||||
|
defer s.Stop()
|
||||||
|
|
||||||
|
debug := c.Debug()
|
||||||
|
raw, err := debug.Trace(1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(raw) <= 0 {
|
||||||
|
t.Fatalf("no response: %#v", raw)
|
||||||
|
}
|
||||||
|
}
|
|
@ -12,6 +12,7 @@ import (
|
||||||
caset "github.com/hashicorp/consul/command/connect/ca/set"
|
caset "github.com/hashicorp/consul/command/connect/ca/set"
|
||||||
"github.com/hashicorp/consul/command/connect/envoy"
|
"github.com/hashicorp/consul/command/connect/envoy"
|
||||||
"github.com/hashicorp/consul/command/connect/proxy"
|
"github.com/hashicorp/consul/command/connect/proxy"
|
||||||
|
"github.com/hashicorp/consul/command/debug"
|
||||||
"github.com/hashicorp/consul/command/event"
|
"github.com/hashicorp/consul/command/event"
|
||||||
"github.com/hashicorp/consul/command/exec"
|
"github.com/hashicorp/consul/command/exec"
|
||||||
"github.com/hashicorp/consul/command/forceleave"
|
"github.com/hashicorp/consul/command/forceleave"
|
||||||
|
@ -79,6 +80,7 @@ func init() {
|
||||||
Register("connect ca set-config", func(ui cli.Ui) (cli.Command, error) { return caset.New(ui), nil })
|
Register("connect ca set-config", func(ui cli.Ui) (cli.Command, error) { return caset.New(ui), nil })
|
||||||
Register("connect proxy", func(ui cli.Ui) (cli.Command, error) { return proxy.New(ui, MakeShutdownCh()), nil })
|
Register("connect proxy", func(ui cli.Ui) (cli.Command, error) { return proxy.New(ui, MakeShutdownCh()), nil })
|
||||||
Register("connect envoy", func(ui cli.Ui) (cli.Command, error) { return envoy.New(ui), nil })
|
Register("connect envoy", func(ui cli.Ui) (cli.Command, error) { return envoy.New(ui), nil })
|
||||||
|
Register("debug", func(ui cli.Ui) (cli.Command, error) { return debug.New(ui, MakeShutdownCh()), nil })
|
||||||
Register("event", func(ui cli.Ui) (cli.Command, error) { return event.New(ui), nil })
|
Register("event", func(ui cli.Ui) (cli.Command, error) { return event.New(ui), nil })
|
||||||
Register("exec", func(ui cli.Ui) (cli.Command, error) { return exec.New(ui, MakeShutdownCh()), nil })
|
Register("exec", func(ui cli.Ui) (cli.Command, error) { return exec.New(ui, MakeShutdownCh()), nil })
|
||||||
Register("force-leave", func(ui cli.Ui) (cli.Command, error) { return forceleave.New(ui), nil })
|
Register("force-leave", func(ui cli.Ui) (cli.Command, error) { return forceleave.New(ui), nil })
|
||||||
|
|
|
@ -0,0 +1,702 @@
|
||||||
|
package debug
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/api"
|
||||||
|
"github.com/hashicorp/consul/command/flags"
|
||||||
|
multierror "github.com/hashicorp/go-multierror"
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// debugInterval is the interval in which to capture dynamic information
|
||||||
|
// when running debug
|
||||||
|
debugInterval = 30 * time.Second
|
||||||
|
|
||||||
|
// debugDuration is the total duration that debug runs before being
|
||||||
|
// shut down
|
||||||
|
debugDuration = 2 * time.Minute
|
||||||
|
|
||||||
|
// debugDurationGrace is a period of time added to the specified
|
||||||
|
// duration to allow intervals to capture within that time
|
||||||
|
debugDurationGrace = 2 * time.Second
|
||||||
|
|
||||||
|
// debugMinInterval is the minimum a user can configure the interval
|
||||||
|
// to prevent accidental DOS
|
||||||
|
debugMinInterval = 5 * time.Second
|
||||||
|
|
||||||
|
// debugMinDuration is the minimum a user can configure the duration
|
||||||
|
// to ensure that all information can be collected in time
|
||||||
|
debugMinDuration = 10 * time.Second
|
||||||
|
|
||||||
|
// debugArchiveExtension is the extension for archive files
|
||||||
|
debugArchiveExtension = ".tar.gz"
|
||||||
|
|
||||||
|
// debugProtocolVersion is the version of the package that is
|
||||||
|
// generated. If this format changes interface, this version
|
||||||
|
// can be incremented so clients can selectively support packages
|
||||||
|
debugProtocolVersion = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
func New(ui cli.Ui, shutdownCh <-chan struct{}) *cmd {
|
||||||
|
ui = &cli.PrefixedUi{
|
||||||
|
OutputPrefix: "==> ",
|
||||||
|
InfoPrefix: " ",
|
||||||
|
ErrorPrefix: "==> ",
|
||||||
|
Ui: ui,
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &cmd{UI: ui, shutdownCh: shutdownCh}
|
||||||
|
c.init()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
type cmd struct {
|
||||||
|
UI cli.Ui
|
||||||
|
flags *flag.FlagSet
|
||||||
|
http *flags.HTTPFlags
|
||||||
|
help string
|
||||||
|
|
||||||
|
shutdownCh <-chan struct{}
|
||||||
|
|
||||||
|
// flags
|
||||||
|
interval time.Duration
|
||||||
|
duration time.Duration
|
||||||
|
output string
|
||||||
|
archive bool
|
||||||
|
capture []string
|
||||||
|
client *api.Client
|
||||||
|
// validateTiming can be used to skip validation of interval, duration. This
|
||||||
|
// is primarily useful for testing
|
||||||
|
validateTiming bool
|
||||||
|
|
||||||
|
index *debugIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugIndex is used to manage the summary of all data recorded
|
||||||
|
// during the debug, to be written to json at the end of the run
|
||||||
|
// and stored at the root. Each attribute corresponds to a file or files.
|
||||||
|
type debugIndex struct {
|
||||||
|
// Version of the debug package
|
||||||
|
Version int
|
||||||
|
// Version of the target Consul agent
|
||||||
|
AgentVersion string
|
||||||
|
|
||||||
|
Interval string
|
||||||
|
Duration string
|
||||||
|
|
||||||
|
Targets []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) init() {
|
||||||
|
c.flags = flag.NewFlagSet("", flag.ContinueOnError)
|
||||||
|
|
||||||
|
defaultFilename := fmt.Sprintf("consul-debug-%d", time.Now().Unix())
|
||||||
|
|
||||||
|
c.flags.Var((*flags.AppendSliceValue)(&c.capture), "capture",
|
||||||
|
fmt.Sprintf("One or more types of information to capture. This can be used "+
|
||||||
|
"to capture a subset of information, and defaults to capturing "+
|
||||||
|
"everything available. Possible information for capture: %s. "+
|
||||||
|
"This can be repeated multiple times.", strings.Join(c.defaultTargets(), ", ")))
|
||||||
|
c.flags.DurationVar(&c.interval, "interval", debugInterval,
|
||||||
|
fmt.Sprintf("The interval in which to capture dynamic information such as "+
|
||||||
|
"telemetry, and profiling. Defaults to %s.", debugInterval))
|
||||||
|
c.flags.DurationVar(&c.duration, "duration", debugDuration,
|
||||||
|
fmt.Sprintf("The total time to record information. "+
|
||||||
|
"Defaults to %s.", debugDuration))
|
||||||
|
c.flags.BoolVar(&c.archive, "archive", true, "Boolean value for if the files "+
|
||||||
|
"should be archived and compressed. Setting this to false will skip the "+
|
||||||
|
"archive step and leave the directory of information on the current path.")
|
||||||
|
c.flags.StringVar(&c.output, "output", defaultFilename, "The path "+
|
||||||
|
"to the compressed archive that will be created with the "+
|
||||||
|
"information after collection.")
|
||||||
|
|
||||||
|
c.http = &flags.HTTPFlags{}
|
||||||
|
flags.Merge(c.flags, c.http.ClientFlags())
|
||||||
|
c.help = flags.Usage(help, c.flags)
|
||||||
|
|
||||||
|
c.validateTiming = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Run(args []string) int {
|
||||||
|
if err := c.flags.Parse(args); err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error parsing flags: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.flags.Args()) > 0 {
|
||||||
|
c.UI.Error("debug: Too many arguments provided, expected 0")
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect to the agent
|
||||||
|
client, err := c.http.APIClient()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error connecting to Consul agent: %s", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
c.client = client
|
||||||
|
|
||||||
|
version, err := c.prepare()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Capture validation failed: %v", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
archiveName := c.output
|
||||||
|
// Show the user the final file path if archiving
|
||||||
|
if c.archive {
|
||||||
|
archiveName = archiveName + debugArchiveExtension
|
||||||
|
}
|
||||||
|
|
||||||
|
c.UI.Output("Starting debugger and capturing static information...")
|
||||||
|
|
||||||
|
// Output metadata about target agent
|
||||||
|
c.UI.Info(fmt.Sprintf(" Agent Version: '%s'", version))
|
||||||
|
c.UI.Info(fmt.Sprintf(" Interval: '%s'", c.interval))
|
||||||
|
c.UI.Info(fmt.Sprintf(" Duration: '%s'", c.duration))
|
||||||
|
c.UI.Info(fmt.Sprintf(" Output: '%s'", archiveName))
|
||||||
|
c.UI.Info(fmt.Sprintf(" Capture: '%s'", strings.Join(c.capture, ", ")))
|
||||||
|
|
||||||
|
// Record some information for the index at the root of the archive
|
||||||
|
index := &debugIndex{
|
||||||
|
Version: debugProtocolVersion,
|
||||||
|
AgentVersion: version,
|
||||||
|
Interval: c.interval.String(),
|
||||||
|
Duration: c.duration.String(),
|
||||||
|
Targets: c.capture,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the extra grace period to ensure
|
||||||
|
// all intervals will be captured within the time allotted
|
||||||
|
c.duration = c.duration + debugDurationGrace
|
||||||
|
|
||||||
|
// Capture static information from the target agent
|
||||||
|
err = c.captureStatic()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Warn(fmt.Sprintf("Static capture failed: %v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture dynamic information from the target agent, blocking for duration
|
||||||
|
if c.configuredTarget("metrics") || c.configuredTarget("logs") || c.configuredTarget("pprof") {
|
||||||
|
err = c.captureDynamic()
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error encountered during collection: %v", err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the index document
|
||||||
|
idxMarshalled, err := json.MarshalIndent(index, "", "\t")
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error marshalling index document: %v", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(fmt.Sprintf("%s/index.json", c.output), idxMarshalled, 0644)
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Error(fmt.Sprintf("Error creating index document: %v", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Archive the data if configured to
|
||||||
|
if c.archive {
|
||||||
|
err = c.createArchive()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
c.UI.Warn(fmt.Sprintf("Archive creation failed: %v", err))
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.UI.Info(fmt.Sprintf("Saved debug archive: %s", archiveName))
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepare validates agent settings against targets and prepares the environment for capturing
|
||||||
|
func (c *cmd) prepare() (version string, err error) {
|
||||||
|
// Ensure realistic duration and intervals exists
|
||||||
|
if c.validateTiming {
|
||||||
|
if c.duration < debugMinDuration {
|
||||||
|
return "", fmt.Errorf("duration must be longer than %s", debugMinDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.interval < debugMinInterval {
|
||||||
|
return "", fmt.Errorf("interval must be longer than %s", debugMinDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.duration < c.interval {
|
||||||
|
return "", fmt.Errorf("duration (%s) must be longer than interval (%s)", c.duration, c.interval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve and process agent information necessary to validate
|
||||||
|
self, err := c.client.Agent().Self()
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("error querying target agent: %s. verify connectivity and agent address", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
version, ok := self["Config"]["Version"].(string)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("agent response did not contain version key")
|
||||||
|
}
|
||||||
|
|
||||||
|
debugEnabled, ok := self["DebugConfig"]["EnableDebug"].(bool)
|
||||||
|
if !ok {
|
||||||
|
return version, fmt.Errorf("agent response did not contain debug key")
|
||||||
|
}
|
||||||
|
|
||||||
|
// If none are specified we will collect information from
|
||||||
|
// all by default
|
||||||
|
if len(c.capture) == 0 {
|
||||||
|
c.capture = c.defaultTargets()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !debugEnabled && c.configuredTarget("pprof") {
|
||||||
|
cs := c.capture
|
||||||
|
for i := 0; i < len(cs); i++ {
|
||||||
|
if cs[i] == "pprof" {
|
||||||
|
c.capture = append(cs[:i], cs[i+1:]...)
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.UI.Warn("[WARN] Unable to capture pprof. Set enable_debug to true on target agent to enable profiling.")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, t := range c.capture {
|
||||||
|
if !c.allowedTarget(t) {
|
||||||
|
return version, fmt.Errorf("target not found: %s", t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := os.Stat(c.output); os.IsNotExist(err) {
|
||||||
|
err := os.MkdirAll(c.output, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return version, fmt.Errorf("could not create output directory: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return version, fmt.Errorf("output directory already exists: %s", c.output)
|
||||||
|
}
|
||||||
|
|
||||||
|
return version, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// captureStatic captures static target information and writes it
|
||||||
|
// to the output path
|
||||||
|
func (c *cmd) captureStatic() error {
|
||||||
|
// Collect errors via multierror as we want to gracefully
|
||||||
|
// fail if an API is inacessible
|
||||||
|
var errors error
|
||||||
|
|
||||||
|
// Collect the named outputs here
|
||||||
|
outputs := make(map[string]interface{}, 0)
|
||||||
|
|
||||||
|
// Capture host information
|
||||||
|
if c.configuredTarget("host") {
|
||||||
|
host, err := c.client.Agent().Host()
|
||||||
|
if err != nil {
|
||||||
|
errors = multierror.Append(errors, err)
|
||||||
|
}
|
||||||
|
outputs["host"] = host
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture agent information
|
||||||
|
if c.configuredTarget("agent") {
|
||||||
|
agent, err := c.client.Agent().Self()
|
||||||
|
if err != nil {
|
||||||
|
errors = multierror.Append(errors, err)
|
||||||
|
}
|
||||||
|
outputs["agent"] = agent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture cluster members information, including WAN
|
||||||
|
if c.configuredTarget("cluster") {
|
||||||
|
members, err := c.client.Agent().Members(true)
|
||||||
|
if err != nil {
|
||||||
|
errors = multierror.Append(errors, err)
|
||||||
|
}
|
||||||
|
outputs["cluster"] = members
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write all outputs to disk as JSON
|
||||||
|
for output, v := range outputs {
|
||||||
|
marshaled, err := json.MarshalIndent(v, "", "\t")
|
||||||
|
if err != nil {
|
||||||
|
errors = multierror.Append(errors, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(fmt.Sprintf("%s/%s.json", c.output, output), marshaled, 0644)
|
||||||
|
if err != nil {
|
||||||
|
errors = multierror.Append(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return errors
|
||||||
|
}
|
||||||
|
|
||||||
|
// captureDynamic blocks for the duration of the command
|
||||||
|
// specified by the duration flag, capturing the dynamic
|
||||||
|
// targets at the interval specified
|
||||||
|
func (c *cmd) captureDynamic() error {
|
||||||
|
successChan := make(chan int64)
|
||||||
|
errCh := make(chan error)
|
||||||
|
durationChn := time.After(c.duration)
|
||||||
|
intervalCount := 0
|
||||||
|
|
||||||
|
c.UI.Output(fmt.Sprintf("Beginning capture interval %s (%d)", time.Now().Local().String(), intervalCount))
|
||||||
|
|
||||||
|
// We'll wait for all of the targets configured to be
|
||||||
|
// captured before continuing
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
capture := func() {
|
||||||
|
timestamp := time.Now().Local().Unix()
|
||||||
|
|
||||||
|
// Make the directory that will store all captured data
|
||||||
|
// for this interval
|
||||||
|
timestampDir := fmt.Sprintf("%s/%d", c.output, timestamp)
|
||||||
|
err := os.MkdirAll(timestampDir, 0755)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture metrics
|
||||||
|
if c.configuredTarget("metrics") {
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
metrics, err := c.client.Agent().Metrics()
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
marshaled, err := json.MarshalIndent(metrics, "", "\t")
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(fmt.Sprintf("%s/%s.json", timestampDir, "metrics"), marshaled, 0644)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to sleep for the configured interval in the case
|
||||||
|
// of metrics being the only target captured. When it is,
|
||||||
|
// the waitgroup would return on Wait() and repeat without
|
||||||
|
// waiting for the interval.
|
||||||
|
time.Sleep(c.interval)
|
||||||
|
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture pprof
|
||||||
|
if c.configuredTarget("pprof") {
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
// We need to capture profiles and traces at the same time
|
||||||
|
// and block for both of them
|
||||||
|
var wgProf sync.WaitGroup
|
||||||
|
|
||||||
|
heap, err := c.client.Debug().Heap()
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(fmt.Sprintf("%s/heap.prof", timestampDir), heap, 0644)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture a profile/trace with a minimum of 1s
|
||||||
|
s := c.interval.Seconds()
|
||||||
|
if s < 1 {
|
||||||
|
s = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
wgProf.Add(1)
|
||||||
|
|
||||||
|
prof, err := c.client.Debug().Profile(int(s))
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(fmt.Sprintf("%s/profile.prof", timestampDir), prof, 0644)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
wgProf.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
wgProf.Add(1)
|
||||||
|
|
||||||
|
trace, err := c.client.Debug().Trace(int(s))
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(fmt.Sprintf("%s/trace.out", timestampDir), trace, 0644)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
wgProf.Done()
|
||||||
|
}()
|
||||||
|
|
||||||
|
gr, err := c.client.Debug().Goroutine()
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(fmt.Sprintf("%s/goroutine.prof", timestampDir), gr, 0644)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
|
||||||
|
wgProf.Wait()
|
||||||
|
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Capture logs
|
||||||
|
if c.configuredTarget("logs") {
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
endLogChn := make(chan struct{})
|
||||||
|
logCh, err := c.client.Agent().Monitor("DEBUG", endLogChn, nil)
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
// Close the log stream
|
||||||
|
defer close(endLogChn)
|
||||||
|
|
||||||
|
// Create the log file for writing
|
||||||
|
f, err := os.Create(fmt.Sprintf("%s/%s", timestampDir, "consul.log"))
|
||||||
|
if err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
intervalChn := time.After(c.interval)
|
||||||
|
|
||||||
|
OUTER:
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case log := <-logCh:
|
||||||
|
// Append the line to the file
|
||||||
|
if _, err = f.WriteString(log + "\n"); err != nil {
|
||||||
|
errCh <- err
|
||||||
|
break OUTER
|
||||||
|
}
|
||||||
|
// Stop collecting the logs after the interval specified
|
||||||
|
case <-intervalChn:
|
||||||
|
break OUTER
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all captures to complete
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// Send down the timestamp for UI output
|
||||||
|
successChan <- timestamp
|
||||||
|
}
|
||||||
|
|
||||||
|
go capture()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case t := <-successChan:
|
||||||
|
intervalCount++
|
||||||
|
c.UI.Output(fmt.Sprintf("Capture successful %s (%d)", time.Unix(t, 0).Local().String(), intervalCount))
|
||||||
|
go capture()
|
||||||
|
case e := <-errCh:
|
||||||
|
c.UI.Error(fmt.Sprintf("Capture failure %s", e))
|
||||||
|
case <-durationChn:
|
||||||
|
return nil
|
||||||
|
case <-c.shutdownCh:
|
||||||
|
return errors.New("stopping collection due to shutdown signal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// allowedTarget returns a boolean if the target is able to be captured
|
||||||
|
func (c *cmd) allowedTarget(target string) bool {
|
||||||
|
for _, dt := range c.defaultTargets() {
|
||||||
|
if dt == target {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// configuredTarget returns a boolean if the target is configured to be
|
||||||
|
// captured in the command
|
||||||
|
func (c *cmd) configuredTarget(target string) bool {
|
||||||
|
for _, dt := range c.capture {
|
||||||
|
if dt == target {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// createArchive walks the files in the temporary directory
|
||||||
|
// and creates a tar file that is gzipped with the contents
|
||||||
|
func (c *cmd) createArchive() error {
|
||||||
|
f, err := os.Create(c.output + debugArchiveExtension)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create compressed archive: %s", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
g := gzip.NewWriter(f)
|
||||||
|
defer g.Close()
|
||||||
|
t := tar.NewWriter(f)
|
||||||
|
defer t.Close()
|
||||||
|
|
||||||
|
err = filepath.Walk(c.output, func(file string, fi os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to walk filepath for archive: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
header, err := tar.FileInfoHeader(fi, fi.Name())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create compressed archive header: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
header.Name = filepath.Join(filepath.Base(c.output), strings.TrimPrefix(file, c.output))
|
||||||
|
|
||||||
|
if err := t.WriteHeader(header); err != nil {
|
||||||
|
return fmt.Errorf("failed to write compressed archive header: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only copy files
|
||||||
|
if !fi.Mode().IsRegular() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := os.Open(file)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open target files for archive: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(t, f); err != nil {
|
||||||
|
return fmt.Errorf("failed to copy files for archive: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
f.Close()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to walk output path for archive: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove directory that has been archived
|
||||||
|
err = os.RemoveAll(c.output)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to remove archived directory: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultTargets specifies the list of all targets that
|
||||||
|
// will be captured by default
|
||||||
|
func (c *cmd) defaultTargets() []string {
|
||||||
|
return append(c.dynamicTargets(), c.staticTargets()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// dynamicTargets returns all the supported targets
|
||||||
|
// that are retrieved at the interval specified
|
||||||
|
func (c *cmd) dynamicTargets() []string {
|
||||||
|
return []string{"metrics", "logs", "pprof"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// staticTargets returns all the supported targets
|
||||||
|
// that are retrieved at the start of the command execution
|
||||||
|
func (c *cmd) staticTargets() []string {
|
||||||
|
return []string{"host", "agent", "cluster"}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Synopsis() string {
|
||||||
|
return synopsis
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cmd) Help() string {
|
||||||
|
return c.help
|
||||||
|
}
|
||||||
|
|
||||||
|
const synopsis = "Records a debugging archive for operators"
|
||||||
|
const help = `
|
||||||
|
Usage: consul debug [options]
|
||||||
|
|
||||||
|
Monitors a Consul agent for the specified period of time, recording
|
||||||
|
information about the agent, cluster, and environment to an archive
|
||||||
|
written to the specified path.
|
||||||
|
|
||||||
|
If ACLs are enabled, an 'operator:read' token must be supplied in order
|
||||||
|
to perform this operation.
|
||||||
|
|
||||||
|
To create a debug archive in the current directory for the default
|
||||||
|
duration and interval, capturing all information available:
|
||||||
|
|
||||||
|
$ consul debug
|
||||||
|
|
||||||
|
The command stores captured data at the configured output path
|
||||||
|
through the duration, and will archive the data at the same
|
||||||
|
path if interrupted.
|
||||||
|
|
||||||
|
Flags can be used to customize the duration and interval of the
|
||||||
|
operation. Duration is the total time to capture data for from the target
|
||||||
|
agent and interval controls how often dynamic data such as metrics
|
||||||
|
are scraped.
|
||||||
|
|
||||||
|
$ consul debug -interval=20s -duration=1m
|
||||||
|
|
||||||
|
The capture flag can be specified multiple times to limit information
|
||||||
|
retrieved.
|
||||||
|
|
||||||
|
$ consul debug -capture metrics -capture agent
|
||||||
|
|
||||||
|
By default, the archive containing the debugging information is
|
||||||
|
saved to the current directory as a .tar.gz file. The
|
||||||
|
output path can be specified, as well as an option to disable
|
||||||
|
archiving, leaving the directory intact.
|
||||||
|
|
||||||
|
$ consul debug -output=/foo/bar/my-debugging -archive=false
|
||||||
|
|
||||||
|
Note: Information collected by this command has the potential
|
||||||
|
to be highly sensitive. Sensitive material such as ACL tokens and
|
||||||
|
other commonly secret material are redacted automatically, but we
|
||||||
|
strongly recommend review of the data within the archive prior to
|
||||||
|
transmitting it.
|
||||||
|
|
||||||
|
For a full list of options and examples, please see the Consul
|
||||||
|
documentation.
|
||||||
|
`
|
|
@ -0,0 +1,482 @@
|
||||||
|
package debug
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/consul/agent"
|
||||||
|
"github.com/hashicorp/consul/logger"
|
||||||
|
"github.com/hashicorp/consul/testrpc"
|
||||||
|
"github.com/hashicorp/consul/testutil"
|
||||||
|
"github.com/mitchellh/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDebugCommand_noTabs(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
if strings.ContainsRune(New(cli.NewMockUi(), nil).Help(), '\t') {
|
||||||
|
t.Fatal("help has tabs")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t.Name(), `
|
||||||
|
enable_debug = true
|
||||||
|
`)
|
||||||
|
a.Agent.LogWriter = logger.NewLogWriter(512)
|
||||||
|
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, nil)
|
||||||
|
cmd.validateTiming = false
|
||||||
|
|
||||||
|
outputPath := fmt.Sprintf("%s/debug", testDir)
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-output=" + outputPath,
|
||||||
|
"-duration=100ms",
|
||||||
|
"-interval=50ms",
|
||||||
|
}
|
||||||
|
|
||||||
|
code := cmd.Run(args)
|
||||||
|
|
||||||
|
if code != 0 {
|
||||||
|
t.Errorf("should exit 0, got code: %d", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
errOutput := ui.ErrorWriter.String()
|
||||||
|
if errOutput != "" {
|
||||||
|
t.Errorf("expected no error output, got %q", errOutput)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand_Archive(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t.Name(), `
|
||||||
|
enable_debug = true
|
||||||
|
`)
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, nil)
|
||||||
|
cmd.validateTiming = false
|
||||||
|
|
||||||
|
outputPath := fmt.Sprintf("%s/debug", testDir)
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-output=" + outputPath,
|
||||||
|
"-capture=agent",
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := cmd.Run(args); code != 0 {
|
||||||
|
t.Fatalf("should exit 0, got code: %d", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
archivePath := fmt.Sprintf("%s%s", outputPath, debugArchiveExtension)
|
||||||
|
file, err := os.Open(archivePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to open archive: %s", err)
|
||||||
|
}
|
||||||
|
tr := tar.NewReader(file)
|
||||||
|
|
||||||
|
for {
|
||||||
|
h, err := tr.Next()
|
||||||
|
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to read file in archive: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ignore the outer directory
|
||||||
|
if h.Name == "debug" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// should only contain this one capture target
|
||||||
|
if h.Name != "debug/agent.json" && h.Name != "debug/index.json" {
|
||||||
|
t.Fatalf("archive contents do not match: %s", h.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand_ArgsBad(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, nil)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"foo",
|
||||||
|
"bad",
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := cmd.Run(args); code == 0 {
|
||||||
|
t.Fatalf("should exit non-zero, got code: %d", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
errOutput := ui.ErrorWriter.String()
|
||||||
|
if !strings.Contains(errOutput, "Too many arguments") {
|
||||||
|
t.Errorf("expected error output, got %q", errOutput)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand_OutputPathBad(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t.Name(), "")
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, nil)
|
||||||
|
cmd.validateTiming = false
|
||||||
|
|
||||||
|
outputPath := ""
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-output=" + outputPath,
|
||||||
|
"-duration=100ms",
|
||||||
|
"-interval=50ms",
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := cmd.Run(args); code == 0 {
|
||||||
|
t.Fatalf("should exit non-zero, got code: %d", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
errOutput := ui.ErrorWriter.String()
|
||||||
|
if !strings.Contains(errOutput, "no such file or directory") {
|
||||||
|
t.Errorf("expected error output, got %q", errOutput)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand_OutputPathExists(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t.Name(), "")
|
||||||
|
a.Agent.LogWriter = logger.NewLogWriter(512)
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, nil)
|
||||||
|
cmd.validateTiming = false
|
||||||
|
|
||||||
|
outputPath := fmt.Sprintf("%s/debug", testDir)
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-output=" + outputPath,
|
||||||
|
"-duration=100ms",
|
||||||
|
"-interval=50ms",
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a directory that conflicts with the output path
|
||||||
|
err := os.Mkdir(outputPath, 0755)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("duplicate test directory creation failed: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := cmd.Run(args); code == 0 {
|
||||||
|
t.Fatalf("should exit non-zero, got code: %d", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
errOutput := ui.ErrorWriter.String()
|
||||||
|
if !strings.Contains(errOutput, "directory already exists") {
|
||||||
|
t.Errorf("expected error output, got %q", errOutput)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand_CaptureTargets(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cases := map[string]struct {
|
||||||
|
// used in -target param
|
||||||
|
targets []string
|
||||||
|
// existence verified after execution
|
||||||
|
files []string
|
||||||
|
// non-existence verified after execution
|
||||||
|
excludedFiles []string
|
||||||
|
}{
|
||||||
|
"single": {
|
||||||
|
[]string{"agent"},
|
||||||
|
[]string{"agent.json"},
|
||||||
|
[]string{"host.json", "cluster.json"},
|
||||||
|
},
|
||||||
|
"static": {
|
||||||
|
[]string{"agent", "host", "cluster"},
|
||||||
|
[]string{"agent.json", "host.json", "cluster.json"},
|
||||||
|
[]string{"*/metrics.json"},
|
||||||
|
},
|
||||||
|
"metrics-only": {
|
||||||
|
[]string{"metrics"},
|
||||||
|
[]string{"*/metrics.json"},
|
||||||
|
[]string{"agent.json", "host.json", "cluster.json"},
|
||||||
|
},
|
||||||
|
"all-but-pprof": {
|
||||||
|
[]string{
|
||||||
|
"metrics",
|
||||||
|
"logs",
|
||||||
|
"host",
|
||||||
|
"agent",
|
||||||
|
"cluster",
|
||||||
|
},
|
||||||
|
[]string{
|
||||||
|
"host.json",
|
||||||
|
"agent.json",
|
||||||
|
"cluster.json",
|
||||||
|
"*/metrics.json",
|
||||||
|
"*/consul.log",
|
||||||
|
},
|
||||||
|
[]string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range cases {
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t.Name(), `
|
||||||
|
enable_debug = true
|
||||||
|
`)
|
||||||
|
a.Agent.LogWriter = logger.NewLogWriter(512)
|
||||||
|
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, nil)
|
||||||
|
cmd.validateTiming = false
|
||||||
|
|
||||||
|
outputPath := fmt.Sprintf("%s/debug-%s", testDir, name)
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-output=" + outputPath,
|
||||||
|
"-archive=false",
|
||||||
|
"-duration=100ms",
|
||||||
|
"-interval=50ms",
|
||||||
|
}
|
||||||
|
for _, t := range tc.targets {
|
||||||
|
args = append(args, "-capture="+t)
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := cmd.Run(args); code != 0 {
|
||||||
|
t.Fatalf("should exit 0, got code: %d", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
errOutput := ui.ErrorWriter.String()
|
||||||
|
if errOutput != "" {
|
||||||
|
t.Errorf("expected no error output, got %q", errOutput)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the debug data was written
|
||||||
|
_, err := os.Stat(outputPath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("output path should exist: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure the captured static files exist
|
||||||
|
for _, f := range tc.files {
|
||||||
|
path := fmt.Sprintf("%s/%s", outputPath, f)
|
||||||
|
// Glob ignores file system errors
|
||||||
|
fs, _ := filepath.Glob(path)
|
||||||
|
if len(fs) <= 0 {
|
||||||
|
t.Fatalf("%s: output data should exist for %s", name, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure any excluded files do not exist
|
||||||
|
for _, f := range tc.excludedFiles {
|
||||||
|
path := fmt.Sprintf("%s/%s", outputPath, f)
|
||||||
|
// Glob ignores file system errors
|
||||||
|
fs, _ := filepath.Glob(path)
|
||||||
|
if len(fs) > 0 {
|
||||||
|
t.Fatalf("%s: output data should not exist for %s", name, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand_ProfilesExist(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t.Name(), `
|
||||||
|
enable_debug = true
|
||||||
|
`)
|
||||||
|
a.Agent.LogWriter = logger.NewLogWriter(512)
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, nil)
|
||||||
|
cmd.validateTiming = false
|
||||||
|
|
||||||
|
outputPath := fmt.Sprintf("%s/debug", testDir)
|
||||||
|
println(outputPath)
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-output=" + outputPath,
|
||||||
|
// CPU profile has a minimum of 1s
|
||||||
|
"-archive=false",
|
||||||
|
"-duration=1s",
|
||||||
|
"-interval=1s",
|
||||||
|
"-capture=pprof",
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := cmd.Run(args); code != 0 {
|
||||||
|
t.Fatalf("should exit 0, got code: %d", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
profiles := []string{"heap.prof", "profile.prof", "goroutine.prof", "trace.out"}
|
||||||
|
// Glob ignores file system errors
|
||||||
|
for _, v := range profiles {
|
||||||
|
fs, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v))
|
||||||
|
if len(fs) == 0 {
|
||||||
|
t.Errorf("output data should exist for %s", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errOutput := ui.ErrorWriter.String()
|
||||||
|
if errOutput != "" {
|
||||||
|
t.Errorf("expected no error output, got %s", errOutput)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand_ValidateTiming(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
cases := map[string]struct {
|
||||||
|
duration string
|
||||||
|
interval string
|
||||||
|
output string
|
||||||
|
code int
|
||||||
|
}{
|
||||||
|
"both": {
|
||||||
|
"20ms",
|
||||||
|
"10ms",
|
||||||
|
"duration must be longer",
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
"short interval": {
|
||||||
|
"10s",
|
||||||
|
"10ms",
|
||||||
|
"interval must be longer",
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
"lower duration": {
|
||||||
|
"20s",
|
||||||
|
"30s",
|
||||||
|
"must be longer than interval",
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, tc := range cases {
|
||||||
|
// Because we're only testng validation, we want to shut down
|
||||||
|
// the valid duration test to avoid hanging
|
||||||
|
shutdownCh := make(chan struct{})
|
||||||
|
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t.Name(), "")
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, shutdownCh)
|
||||||
|
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-duration=" + tc.duration,
|
||||||
|
"-interval=" + tc.interval,
|
||||||
|
"-capture=agent",
|
||||||
|
}
|
||||||
|
code := cmd.Run(args)
|
||||||
|
|
||||||
|
if code != tc.code {
|
||||||
|
t.Errorf("%s: should exit %d, got code: %d", name, tc.code, code)
|
||||||
|
}
|
||||||
|
|
||||||
|
errOutput := ui.ErrorWriter.String()
|
||||||
|
if !strings.Contains(errOutput, tc.output) {
|
||||||
|
t.Errorf("%s: expected error output '%s', got '%q'", name, tc.output, errOutput)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDebugCommand_DebugDisabled(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
testDir := testutil.TempDir(t, "debug")
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
a := agent.NewTestAgent(t.Name(), `
|
||||||
|
enable_debug = false
|
||||||
|
`)
|
||||||
|
a.Agent.LogWriter = logger.NewLogWriter(512)
|
||||||
|
defer a.Shutdown()
|
||||||
|
testrpc.WaitForLeader(t, a.RPC, "dc1")
|
||||||
|
|
||||||
|
ui := cli.NewMockUi()
|
||||||
|
cmd := New(ui, nil)
|
||||||
|
cmd.validateTiming = false
|
||||||
|
|
||||||
|
outputPath := fmt.Sprintf("%s/debug", testDir)
|
||||||
|
args := []string{
|
||||||
|
"-http-addr=" + a.HTTPAddr(),
|
||||||
|
"-output=" + outputPath,
|
||||||
|
"-archive=false",
|
||||||
|
// CPU profile has a minimum of 1s
|
||||||
|
"-duration=1s",
|
||||||
|
"-interval=1s",
|
||||||
|
}
|
||||||
|
|
||||||
|
if code := cmd.Run(args); code != 0 {
|
||||||
|
t.Fatalf("should exit 0, got code: %d", code)
|
||||||
|
}
|
||||||
|
|
||||||
|
profiles := []string{"heap.prof", "profile.prof", "goroutine.prof", "trace.out"}
|
||||||
|
// Glob ignores file system errors
|
||||||
|
for _, v := range profiles {
|
||||||
|
fs, _ := filepath.Glob(fmt.Sprintf("%s/*/%s", outputPath, v))
|
||||||
|
if len(fs) > 0 {
|
||||||
|
t.Errorf("output data should not exist for %s", v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
errOutput := ui.ErrorWriter.String()
|
||||||
|
if !strings.Contains(errOutput, "Unable to capture pprof") {
|
||||||
|
t.Errorf("expected warn output, got %s", errOutput)
|
||||||
|
}
|
||||||
|
}
|
|
@ -98,6 +98,7 @@ type TestServerConfig struct {
|
||||||
VerifyOutgoing bool `json:"verify_outgoing,omitempty"`
|
VerifyOutgoing bool `json:"verify_outgoing,omitempty"`
|
||||||
EnableScriptChecks bool `json:"enable_script_checks,omitempty"`
|
EnableScriptChecks bool `json:"enable_script_checks,omitempty"`
|
||||||
Connect map[string]interface{} `json:"connect,omitempty"`
|
Connect map[string]interface{} `json:"connect,omitempty"`
|
||||||
|
EnableDebug bool `json:"enable_debug,omitempty"`
|
||||||
ReadyTimeout time.Duration `json:"-"`
|
ReadyTimeout time.Duration `json:"-"`
|
||||||
Stdout, Stderr io.Writer `json:"-"`
|
Stdout, Stderr io.Writer `json:"-"`
|
||||||
Args []string `json:"-"`
|
Args []string `json:"-"`
|
||||||
|
|
|
@ -0,0 +1,61 @@
|
||||||
|
gopsutil is distributed under BSD license reproduced below.
|
||||||
|
|
||||||
|
Copyright (c) 2014, WAKAYAMA Shirou
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice, this
|
||||||
|
list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
* Neither the name of the gopsutil authors nor the names of its contributors
|
||||||
|
may be used to endorse or promote products derived from this software without
|
||||||
|
specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||||
|
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
|
-------
|
||||||
|
internal/common/binary.go in the gopsutil is copied and modifid from golang/encoding/binary.go.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without
|
||||||
|
modification, are permitted provided that the following conditions are
|
||||||
|
met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright
|
||||||
|
notice, this list of conditions and the following disclaimer.
|
||||||
|
* Redistributions in binary form must reproduce the above
|
||||||
|
copyright notice, this list of conditions and the following disclaimer
|
||||||
|
in the documentation and/or other materials provided with the
|
||||||
|
distribution.
|
||||||
|
* Neither the name of Google Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from
|
||||||
|
this software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,64 @@
|
||||||
|
package disk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/internal/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
var invoke common.Invoker
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
invoke = common.Invoke{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type UsageStat struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
Fstype string `json:"fstype"`
|
||||||
|
Total uint64 `json:"total"`
|
||||||
|
Free uint64 `json:"free"`
|
||||||
|
Used uint64 `json:"used"`
|
||||||
|
UsedPercent float64 `json:"usedPercent"`
|
||||||
|
InodesTotal uint64 `json:"inodesTotal"`
|
||||||
|
InodesUsed uint64 `json:"inodesUsed"`
|
||||||
|
InodesFree uint64 `json:"inodesFree"`
|
||||||
|
InodesUsedPercent float64 `json:"inodesUsedPercent"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PartitionStat struct {
|
||||||
|
Device string `json:"device"`
|
||||||
|
Mountpoint string `json:"mountpoint"`
|
||||||
|
Fstype string `json:"fstype"`
|
||||||
|
Opts string `json:"opts"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type IOCountersStat struct {
|
||||||
|
ReadCount uint64 `json:"readCount"`
|
||||||
|
MergedReadCount uint64 `json:"mergedReadCount"`
|
||||||
|
WriteCount uint64 `json:"writeCount"`
|
||||||
|
MergedWriteCount uint64 `json:"mergedWriteCount"`
|
||||||
|
ReadBytes uint64 `json:"readBytes"`
|
||||||
|
WriteBytes uint64 `json:"writeBytes"`
|
||||||
|
ReadTime uint64 `json:"readTime"`
|
||||||
|
WriteTime uint64 `json:"writeTime"`
|
||||||
|
IopsInProgress uint64 `json:"iopsInProgress"`
|
||||||
|
IoTime uint64 `json:"ioTime"`
|
||||||
|
WeightedIO uint64 `json:"weightedIO"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
SerialNumber string `json:"serialNumber"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d UsageStat) String() string {
|
||||||
|
s, _ := json.Marshal(d)
|
||||||
|
return string(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d PartitionStat) String() string {
|
||||||
|
s, _ := json.Marshal(d)
|
||||||
|
return string(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d IOCountersStat) String() string {
|
||||||
|
s, _ := json.Marshal(d)
|
||||||
|
return string(s)
|
||||||
|
}
|
|
@ -0,0 +1,111 @@
|
||||||
|
// +build darwin
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/internal/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Partitions(all bool) ([]PartitionStat, error) {
|
||||||
|
var ret []PartitionStat
|
||||||
|
|
||||||
|
count, err := Getfsstat(nil, MntWait)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
fs := make([]Statfs_t, count)
|
||||||
|
_, err = Getfsstat(fs, MntWait)
|
||||||
|
for _, stat := range fs {
|
||||||
|
opts := "rw"
|
||||||
|
if stat.Flags&MntReadOnly != 0 {
|
||||||
|
opts = "ro"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntSynchronous != 0 {
|
||||||
|
opts += ",sync"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntNoExec != 0 {
|
||||||
|
opts += ",noexec"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntNoSuid != 0 {
|
||||||
|
opts += ",nosuid"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntUnion != 0 {
|
||||||
|
opts += ",union"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntAsync != 0 {
|
||||||
|
opts += ",async"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntSuidDir != 0 {
|
||||||
|
opts += ",suiddir"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntSoftDep != 0 {
|
||||||
|
opts += ",softdep"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntNoSymFollow != 0 {
|
||||||
|
opts += ",nosymfollow"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntGEOMJournal != 0 {
|
||||||
|
opts += ",gjounalc"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntMultilabel != 0 {
|
||||||
|
opts += ",multilabel"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntACLs != 0 {
|
||||||
|
opts += ",acls"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntNoATime != 0 {
|
||||||
|
opts += ",noattime"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntClusterRead != 0 {
|
||||||
|
opts += ",nocluster"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntClusterWrite != 0 {
|
||||||
|
opts += ",noclusterw"
|
||||||
|
}
|
||||||
|
if stat.Flags&MntNFS4ACLs != 0 {
|
||||||
|
opts += ",nfs4acls"
|
||||||
|
}
|
||||||
|
d := PartitionStat{
|
||||||
|
Device: common.IntToString(stat.Mntfromname[:]),
|
||||||
|
Mountpoint: common.IntToString(stat.Mntonname[:]),
|
||||||
|
Fstype: common.IntToString(stat.Fstypename[:]),
|
||||||
|
Opts: opts,
|
||||||
|
}
|
||||||
|
if all == false {
|
||||||
|
if !path.IsAbs(d.Device) || !common.PathExists(d.Device) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = append(ret, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func IOCounters() (map[string]IOCountersStat, error) {
|
||||||
|
return nil, common.ErrNotImplementedError
|
||||||
|
}
|
||||||
|
|
||||||
|
func Getfsstat(buf []Statfs_t, flags int) (n int, err error) {
|
||||||
|
var _p0 unsafe.Pointer
|
||||||
|
var bufsize uintptr
|
||||||
|
if len(buf) > 0 {
|
||||||
|
_p0 = unsafe.Pointer(&buf[0])
|
||||||
|
bufsize = unsafe.Sizeof(Statfs_t{}) * uintptr(len(buf))
|
||||||
|
}
|
||||||
|
r0, _, e1 := syscall.Syscall(SYS_GETFSSTAT64, uintptr(_p0), bufsize, uintptr(flags))
|
||||||
|
n = int(r0)
|
||||||
|
if e1 != 0 {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFsType(stat syscall.Statfs_t) string {
|
||||||
|
return common.IntToString(stat.Fstypename[:])
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
// +build darwin
|
||||||
|
// +build amd64
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
const (
|
||||||
|
MntWait = 1
|
||||||
|
MfsNameLen = 15 /* length of fs type name, not inc. nul */
|
||||||
|
MNameLen = 90 /* length of buffer for returned name */
|
||||||
|
|
||||||
|
MFSTYPENAMELEN = 16 /* length of fs type name including null */
|
||||||
|
MAXPATHLEN = 1024
|
||||||
|
MNAMELEN = MAXPATHLEN
|
||||||
|
|
||||||
|
SYS_GETFSSTAT64 = 347
|
||||||
|
)
|
||||||
|
|
||||||
|
type Fsid struct{ val [2]int32 } /* file system id type */
|
||||||
|
type uid_t int32
|
||||||
|
|
||||||
|
// sys/mount.h
|
||||||
|
const (
|
||||||
|
MntReadOnly = 0x00000001 /* read only filesystem */
|
||||||
|
MntSynchronous = 0x00000002 /* filesystem written synchronously */
|
||||||
|
MntNoExec = 0x00000004 /* can't exec from filesystem */
|
||||||
|
MntNoSuid = 0x00000008 /* don't honor setuid bits on fs */
|
||||||
|
MntUnion = 0x00000020 /* union with underlying filesystem */
|
||||||
|
MntAsync = 0x00000040 /* filesystem written asynchronously */
|
||||||
|
MntSuidDir = 0x00100000 /* special handling of SUID on dirs */
|
||||||
|
MntSoftDep = 0x00200000 /* soft updates being done */
|
||||||
|
MntNoSymFollow = 0x00400000 /* do not follow symlinks */
|
||||||
|
MntGEOMJournal = 0x02000000 /* GEOM journal support enabled */
|
||||||
|
MntMultilabel = 0x04000000 /* MAC support for individual objects */
|
||||||
|
MntACLs = 0x08000000 /* ACL support enabled */
|
||||||
|
MntNoATime = 0x10000000 /* disable update of file access time */
|
||||||
|
MntClusterRead = 0x40000000 /* disable cluster read */
|
||||||
|
MntClusterWrite = 0x80000000 /* disable cluster write */
|
||||||
|
MntNFS4ACLs = 0x00000010
|
||||||
|
)
|
||||||
|
|
||||||
|
type Statfs_t struct {
|
||||||
|
Bsize uint32
|
||||||
|
Iosize int32
|
||||||
|
Blocks uint64
|
||||||
|
Bfree uint64
|
||||||
|
Bavail uint64
|
||||||
|
Files uint64
|
||||||
|
Ffree uint64
|
||||||
|
Fsid Fsid
|
||||||
|
Owner uint32
|
||||||
|
Type uint32
|
||||||
|
Flags uint32
|
||||||
|
Fssubtype uint32
|
||||||
|
Fstypename [16]int8
|
||||||
|
Mntonname [1024]int8
|
||||||
|
Mntfromname [1024]int8
|
||||||
|
Reserved [8]uint32
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
// +build darwin
|
||||||
|
// +build arm64
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
const (
|
||||||
|
MntWait = 1
|
||||||
|
MfsNameLen = 15 /* length of fs type name, not inc. nul */
|
||||||
|
MNameLen = 90 /* length of buffer for returned name */
|
||||||
|
|
||||||
|
MFSTYPENAMELEN = 16 /* length of fs type name including null */
|
||||||
|
MAXPATHLEN = 1024
|
||||||
|
MNAMELEN = MAXPATHLEN
|
||||||
|
|
||||||
|
SYS_GETFSSTAT64 = 347
|
||||||
|
)
|
||||||
|
|
||||||
|
type Fsid struct{ val [2]int32 } /* file system id type */
|
||||||
|
type uid_t int32
|
||||||
|
|
||||||
|
// sys/mount.h
|
||||||
|
const (
|
||||||
|
MntReadOnly = 0x00000001 /* read only filesystem */
|
||||||
|
MntSynchronous = 0x00000002 /* filesystem written synchronously */
|
||||||
|
MntNoExec = 0x00000004 /* can't exec from filesystem */
|
||||||
|
MntNoSuid = 0x00000008 /* don't honor setuid bits on fs */
|
||||||
|
MntUnion = 0x00000020 /* union with underlying filesystem */
|
||||||
|
MntAsync = 0x00000040 /* filesystem written asynchronously */
|
||||||
|
MntSuidDir = 0x00100000 /* special handling of SUID on dirs */
|
||||||
|
MntSoftDep = 0x00200000 /* soft updates being done */
|
||||||
|
MntNoSymFollow = 0x00400000 /* do not follow symlinks */
|
||||||
|
MntGEOMJournal = 0x02000000 /* GEOM journal support enabled */
|
||||||
|
MntMultilabel = 0x04000000 /* MAC support for individual objects */
|
||||||
|
MntACLs = 0x08000000 /* ACL support enabled */
|
||||||
|
MntNoATime = 0x10000000 /* disable update of file access time */
|
||||||
|
MntClusterRead = 0x40000000 /* disable cluster read */
|
||||||
|
MntClusterWrite = 0x80000000 /* disable cluster write */
|
||||||
|
MntNFS4ACLs = 0x00000010
|
||||||
|
)
|
||||||
|
|
||||||
|
type Statfs_t struct {
|
||||||
|
Bsize uint32
|
||||||
|
Iosize int32
|
||||||
|
Blocks uint64
|
||||||
|
Bfree uint64
|
||||||
|
Bavail uint64
|
||||||
|
Files uint64
|
||||||
|
Ffree uint64
|
||||||
|
Fsid Fsid
|
||||||
|
Owner uint32
|
||||||
|
Type uint32
|
||||||
|
Flags uint32
|
||||||
|
Fssubtype uint32
|
||||||
|
Fstypename [16]int8
|
||||||
|
Mntonname [1024]int8
|
||||||
|
Mntfromname [1024]int8
|
||||||
|
Reserved [8]uint32
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
// +build !darwin,!linux,!freebsd,!openbsd,!windows
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
import "github.com/shirou/gopsutil/internal/common"
|
||||||
|
|
||||||
|
func IOCounters() (map[string]IOCountersStat, error) {
|
||||||
|
return nil, common.ErrNotImplementedError
|
||||||
|
}
|
||||||
|
|
||||||
|
func Partitions(all bool) ([]PartitionStat, error) {
|
||||||
|
return []PartitionStat{}, common.ErrNotImplementedError
|
||||||
|
}
|
||||||
|
|
||||||
|
func Usage(path string) (*UsageStat, error) {
|
||||||
|
return nil, common.ErrNotImplementedError
|
||||||
|
}
|
|
@ -0,0 +1,176 @@
|
||||||
|
// +build freebsd
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/internal/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Partitions(all bool) ([]PartitionStat, error) {
|
||||||
|
var ret []PartitionStat
|
||||||
|
|
||||||
|
// get length
|
||||||
|
count, err := syscall.Getfsstat(nil, MNT_WAIT)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := make([]Statfs, count)
|
||||||
|
_, err = Getfsstat(fs, MNT_WAIT)
|
||||||
|
|
||||||
|
for _, stat := range fs {
|
||||||
|
opts := "rw"
|
||||||
|
if stat.Flags&MNT_RDONLY != 0 {
|
||||||
|
opts = "ro"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_SYNCHRONOUS != 0 {
|
||||||
|
opts += ",sync"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_NOEXEC != 0 {
|
||||||
|
opts += ",noexec"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_NOSUID != 0 {
|
||||||
|
opts += ",nosuid"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_UNION != 0 {
|
||||||
|
opts += ",union"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_ASYNC != 0 {
|
||||||
|
opts += ",async"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_SUIDDIR != 0 {
|
||||||
|
opts += ",suiddir"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_SOFTDEP != 0 {
|
||||||
|
opts += ",softdep"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_NOSYMFOLLOW != 0 {
|
||||||
|
opts += ",nosymfollow"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_GJOURNAL != 0 {
|
||||||
|
opts += ",gjounalc"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_MULTILABEL != 0 {
|
||||||
|
opts += ",multilabel"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_ACLS != 0 {
|
||||||
|
opts += ",acls"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_NOATIME != 0 {
|
||||||
|
opts += ",noattime"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_NOCLUSTERR != 0 {
|
||||||
|
opts += ",nocluster"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_NOCLUSTERW != 0 {
|
||||||
|
opts += ",noclusterw"
|
||||||
|
}
|
||||||
|
if stat.Flags&MNT_NFS4ACLS != 0 {
|
||||||
|
opts += ",nfs4acls"
|
||||||
|
}
|
||||||
|
|
||||||
|
d := PartitionStat{
|
||||||
|
Device: common.IntToString(stat.Mntfromname[:]),
|
||||||
|
Mountpoint: common.IntToString(stat.Mntonname[:]),
|
||||||
|
Fstype: common.IntToString(stat.Fstypename[:]),
|
||||||
|
Opts: opts,
|
||||||
|
}
|
||||||
|
if all == false {
|
||||||
|
if !path.IsAbs(d.Device) || !common.PathExists(d.Device) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = append(ret, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func IOCounters() (map[string]IOCountersStat, error) {
|
||||||
|
// statinfo->devinfo->devstat
|
||||||
|
// /usr/include/devinfo.h
|
||||||
|
ret := make(map[string]IOCountersStat)
|
||||||
|
|
||||||
|
r, err := syscall.Sysctl("kern.devstat.all")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buf := []byte(r)
|
||||||
|
length := len(buf)
|
||||||
|
|
||||||
|
count := int(uint64(length) / uint64(sizeOfDevstat))
|
||||||
|
|
||||||
|
buf = buf[8:] // devstat.all has version in the head.
|
||||||
|
// parse buf to Devstat
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
b := buf[i*sizeOfDevstat : i*sizeOfDevstat+sizeOfDevstat]
|
||||||
|
d, err := parseDevstat(b)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
un := strconv.Itoa(int(d.Unit_number))
|
||||||
|
name := common.IntToString(d.Device_name[:]) + un
|
||||||
|
|
||||||
|
ds := IOCountersStat{
|
||||||
|
ReadCount: d.Operations[DEVSTAT_READ],
|
||||||
|
WriteCount: d.Operations[DEVSTAT_WRITE],
|
||||||
|
ReadBytes: d.Bytes[DEVSTAT_READ],
|
||||||
|
WriteBytes: d.Bytes[DEVSTAT_WRITE],
|
||||||
|
ReadTime: uint64(d.Duration[DEVSTAT_READ].Compute() * 1000),
|
||||||
|
WriteTime: uint64(d.Duration[DEVSTAT_WRITE].Compute() * 1000),
|
||||||
|
IoTime: uint64(d.Busy_time.Compute() * 1000),
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
ret[name] = ds
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Bintime) Compute() float64 {
|
||||||
|
BINTIME_SCALE := 5.42101086242752217003726400434970855712890625e-20
|
||||||
|
return float64(b.Sec) + float64(b.Frac)*BINTIME_SCALE
|
||||||
|
}
|
||||||
|
|
||||||
|
// BT2LD(time) ((long double)(time).sec + (time).frac * BINTIME_SCALE)
|
||||||
|
|
||||||
|
// Getfsstat is borrowed from pkg/syscall/syscall_freebsd.go
|
||||||
|
// change Statfs_t to Statfs in order to get more information
|
||||||
|
func Getfsstat(buf []Statfs, flags int) (n int, err error) {
|
||||||
|
var _p0 unsafe.Pointer
|
||||||
|
var bufsize uintptr
|
||||||
|
if len(buf) > 0 {
|
||||||
|
_p0 = unsafe.Pointer(&buf[0])
|
||||||
|
bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf))
|
||||||
|
}
|
||||||
|
r0, _, e1 := syscall.Syscall(syscall.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
|
||||||
|
n = int(r0)
|
||||||
|
if e1 != 0 {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDevstat(buf []byte) (Devstat, error) {
|
||||||
|
var ds Devstat
|
||||||
|
br := bytes.NewReader(buf)
|
||||||
|
// err := binary.Read(br, binary.LittleEndian, &ds)
|
||||||
|
err := common.Read(br, binary.LittleEndian, &ds)
|
||||||
|
if err != nil {
|
||||||
|
return ds, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFsType(stat syscall.Statfs_t) string {
|
||||||
|
return common.IntToString(stat.Fstypename[:])
|
||||||
|
}
|
|
@ -0,0 +1,112 @@
|
||||||
|
// Created by cgo -godefs - DO NOT EDIT
|
||||||
|
// cgo -godefs types_freebsd.go
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofPtr = 0x4
|
||||||
|
sizeofShort = 0x2
|
||||||
|
sizeofInt = 0x4
|
||||||
|
sizeofLong = 0x4
|
||||||
|
sizeofLongLong = 0x8
|
||||||
|
sizeofLongDouble = 0x8
|
||||||
|
|
||||||
|
DEVSTAT_NO_DATA = 0x00
|
||||||
|
DEVSTAT_READ = 0x01
|
||||||
|
DEVSTAT_WRITE = 0x02
|
||||||
|
DEVSTAT_FREE = 0x03
|
||||||
|
|
||||||
|
MNT_RDONLY = 0x00000001
|
||||||
|
MNT_SYNCHRONOUS = 0x00000002
|
||||||
|
MNT_NOEXEC = 0x00000004
|
||||||
|
MNT_NOSUID = 0x00000008
|
||||||
|
MNT_UNION = 0x00000020
|
||||||
|
MNT_ASYNC = 0x00000040
|
||||||
|
MNT_SUIDDIR = 0x00100000
|
||||||
|
MNT_SOFTDEP = 0x00200000
|
||||||
|
MNT_NOSYMFOLLOW = 0x00400000
|
||||||
|
MNT_GJOURNAL = 0x02000000
|
||||||
|
MNT_MULTILABEL = 0x04000000
|
||||||
|
MNT_ACLS = 0x08000000
|
||||||
|
MNT_NOATIME = 0x10000000
|
||||||
|
MNT_NOCLUSTERR = 0x40000000
|
||||||
|
MNT_NOCLUSTERW = 0x80000000
|
||||||
|
MNT_NFS4ACLS = 0x00000010
|
||||||
|
|
||||||
|
MNT_WAIT = 1
|
||||||
|
MNT_NOWAIT = 2
|
||||||
|
MNT_LAZY = 3
|
||||||
|
MNT_SUSPEND = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeOfDevstat = 0xf0
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
_C_short int16
|
||||||
|
_C_int int32
|
||||||
|
_C_long int32
|
||||||
|
_C_long_long int64
|
||||||
|
_C_long_double int64
|
||||||
|
)
|
||||||
|
|
||||||
|
type Statfs struct {
|
||||||
|
Version uint32
|
||||||
|
Type uint32
|
||||||
|
Flags uint64
|
||||||
|
Bsize uint64
|
||||||
|
Iosize uint64
|
||||||
|
Blocks uint64
|
||||||
|
Bfree uint64
|
||||||
|
Bavail int64
|
||||||
|
Files uint64
|
||||||
|
Ffree int64
|
||||||
|
Syncwrites uint64
|
||||||
|
Asyncwrites uint64
|
||||||
|
Syncreads uint64
|
||||||
|
Asyncreads uint64
|
||||||
|
Spare [10]uint64
|
||||||
|
Namemax uint32
|
||||||
|
Owner uint32
|
||||||
|
Fsid Fsid
|
||||||
|
Charspare [80]int8
|
||||||
|
Fstypename [16]int8
|
||||||
|
Mntfromname [88]int8
|
||||||
|
Mntonname [88]int8
|
||||||
|
}
|
||||||
|
type Fsid struct {
|
||||||
|
Val [2]int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type Devstat struct {
|
||||||
|
Sequence0 uint32
|
||||||
|
Allocated int32
|
||||||
|
Start_count uint32
|
||||||
|
End_count uint32
|
||||||
|
Busy_from Bintime
|
||||||
|
Dev_links _Ctype_struct___0
|
||||||
|
Device_number uint32
|
||||||
|
Device_name [16]int8
|
||||||
|
Unit_number int32
|
||||||
|
Bytes [4]uint64
|
||||||
|
Operations [4]uint64
|
||||||
|
Duration [4]Bintime
|
||||||
|
Busy_time Bintime
|
||||||
|
Creation_time Bintime
|
||||||
|
Block_size uint32
|
||||||
|
Tag_types [3]uint64
|
||||||
|
Flags uint32
|
||||||
|
Device_type uint32
|
||||||
|
Priority uint32
|
||||||
|
Id *byte
|
||||||
|
Sequence1 uint32
|
||||||
|
}
|
||||||
|
type Bintime struct {
|
||||||
|
Sec int32
|
||||||
|
Frac uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type _Ctype_struct___0 struct {
|
||||||
|
Empty uint32
|
||||||
|
}
|
|
@ -0,0 +1,115 @@
|
||||||
|
// Created by cgo -godefs - DO NOT EDIT
|
||||||
|
// cgo -godefs types_freebsd.go
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofPtr = 0x8
|
||||||
|
sizeofShort = 0x2
|
||||||
|
sizeofInt = 0x4
|
||||||
|
sizeofLong = 0x8
|
||||||
|
sizeofLongLong = 0x8
|
||||||
|
sizeofLongDouble = 0x8
|
||||||
|
|
||||||
|
DEVSTAT_NO_DATA = 0x00
|
||||||
|
DEVSTAT_READ = 0x01
|
||||||
|
DEVSTAT_WRITE = 0x02
|
||||||
|
DEVSTAT_FREE = 0x03
|
||||||
|
|
||||||
|
MNT_RDONLY = 0x00000001
|
||||||
|
MNT_SYNCHRONOUS = 0x00000002
|
||||||
|
MNT_NOEXEC = 0x00000004
|
||||||
|
MNT_NOSUID = 0x00000008
|
||||||
|
MNT_UNION = 0x00000020
|
||||||
|
MNT_ASYNC = 0x00000040
|
||||||
|
MNT_SUIDDIR = 0x00100000
|
||||||
|
MNT_SOFTDEP = 0x00200000
|
||||||
|
MNT_NOSYMFOLLOW = 0x00400000
|
||||||
|
MNT_GJOURNAL = 0x02000000
|
||||||
|
MNT_MULTILABEL = 0x04000000
|
||||||
|
MNT_ACLS = 0x08000000
|
||||||
|
MNT_NOATIME = 0x10000000
|
||||||
|
MNT_NOCLUSTERR = 0x40000000
|
||||||
|
MNT_NOCLUSTERW = 0x80000000
|
||||||
|
MNT_NFS4ACLS = 0x00000010
|
||||||
|
|
||||||
|
MNT_WAIT = 1
|
||||||
|
MNT_NOWAIT = 2
|
||||||
|
MNT_LAZY = 3
|
||||||
|
MNT_SUSPEND = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeOfDevstat = 0x120
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
_C_short int16
|
||||||
|
_C_int int32
|
||||||
|
_C_long int64
|
||||||
|
_C_long_long int64
|
||||||
|
_C_long_double int64
|
||||||
|
)
|
||||||
|
|
||||||
|
type Statfs struct {
|
||||||
|
Version uint32
|
||||||
|
Type uint32
|
||||||
|
Flags uint64
|
||||||
|
Bsize uint64
|
||||||
|
Iosize uint64
|
||||||
|
Blocks uint64
|
||||||
|
Bfree uint64
|
||||||
|
Bavail int64
|
||||||
|
Files uint64
|
||||||
|
Ffree int64
|
||||||
|
Syncwrites uint64
|
||||||
|
Asyncwrites uint64
|
||||||
|
Syncreads uint64
|
||||||
|
Asyncreads uint64
|
||||||
|
Spare [10]uint64
|
||||||
|
Namemax uint32
|
||||||
|
Owner uint32
|
||||||
|
Fsid Fsid
|
||||||
|
Charspare [80]int8
|
||||||
|
Fstypename [16]int8
|
||||||
|
Mntfromname [88]int8
|
||||||
|
Mntonname [88]int8
|
||||||
|
}
|
||||||
|
type Fsid struct {
|
||||||
|
Val [2]int32
|
||||||
|
}
|
||||||
|
|
||||||
|
type Devstat struct {
|
||||||
|
Sequence0 uint32
|
||||||
|
Allocated int32
|
||||||
|
Start_count uint32
|
||||||
|
End_count uint32
|
||||||
|
Busy_from Bintime
|
||||||
|
Dev_links _Ctype_struct___0
|
||||||
|
Device_number uint32
|
||||||
|
Device_name [16]int8
|
||||||
|
Unit_number int32
|
||||||
|
Bytes [4]uint64
|
||||||
|
Operations [4]uint64
|
||||||
|
Duration [4]Bintime
|
||||||
|
Busy_time Bintime
|
||||||
|
Creation_time Bintime
|
||||||
|
Block_size uint32
|
||||||
|
Pad_cgo_0 [4]byte
|
||||||
|
Tag_types [3]uint64
|
||||||
|
Flags uint32
|
||||||
|
Device_type uint32
|
||||||
|
Priority uint32
|
||||||
|
Pad_cgo_1 [4]byte
|
||||||
|
ID *byte
|
||||||
|
Sequence1 uint32
|
||||||
|
Pad_cgo_2 [4]byte
|
||||||
|
}
|
||||||
|
type Bintime struct {
|
||||||
|
Sec int64
|
||||||
|
Frac uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type _Ctype_struct___0 struct {
|
||||||
|
Empty uint64
|
||||||
|
}
|
|
@ -0,0 +1,393 @@
|
||||||
|
// +build linux
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/internal/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
SectorSize = 512
|
||||||
|
)
|
||||||
|
const (
|
||||||
|
// man statfs
|
||||||
|
ADFS_SUPER_MAGIC = 0xadf5
|
||||||
|
AFFS_SUPER_MAGIC = 0xADFF
|
||||||
|
BDEVFS_MAGIC = 0x62646576
|
||||||
|
BEFS_SUPER_MAGIC = 0x42465331
|
||||||
|
BFS_MAGIC = 0x1BADFACE
|
||||||
|
BINFMTFS_MAGIC = 0x42494e4d
|
||||||
|
BTRFS_SUPER_MAGIC = 0x9123683E
|
||||||
|
CGROUP_SUPER_MAGIC = 0x27e0eb
|
||||||
|
CIFS_MAGIC_NUMBER = 0xFF534D42
|
||||||
|
CODA_SUPER_MAGIC = 0x73757245
|
||||||
|
COH_SUPER_MAGIC = 0x012FF7B7
|
||||||
|
CRAMFS_MAGIC = 0x28cd3d45
|
||||||
|
DEBUGFS_MAGIC = 0x64626720
|
||||||
|
DEVFS_SUPER_MAGIC = 0x1373
|
||||||
|
DEVPTS_SUPER_MAGIC = 0x1cd1
|
||||||
|
EFIVARFS_MAGIC = 0xde5e81e4
|
||||||
|
EFS_SUPER_MAGIC = 0x00414A53
|
||||||
|
EXT_SUPER_MAGIC = 0x137D
|
||||||
|
EXT2_OLD_SUPER_MAGIC = 0xEF51
|
||||||
|
EXT2_SUPER_MAGIC = 0xEF53
|
||||||
|
EXT3_SUPER_MAGIC = 0xEF53
|
||||||
|
EXT4_SUPER_MAGIC = 0xEF53
|
||||||
|
FUSE_SUPER_MAGIC = 0x65735546
|
||||||
|
FUTEXFS_SUPER_MAGIC = 0xBAD1DEA
|
||||||
|
HFS_SUPER_MAGIC = 0x4244
|
||||||
|
HOSTFS_SUPER_MAGIC = 0x00c0ffee
|
||||||
|
HPFS_SUPER_MAGIC = 0xF995E849
|
||||||
|
HUGETLBFS_MAGIC = 0x958458f6
|
||||||
|
ISOFS_SUPER_MAGIC = 0x9660
|
||||||
|
JFFS2_SUPER_MAGIC = 0x72b6
|
||||||
|
JFS_SUPER_MAGIC = 0x3153464a
|
||||||
|
MINIX_SUPER_MAGIC = 0x137F /* orig. minix */
|
||||||
|
MINIX_SUPER_MAGIC2 = 0x138F /* 30 char minix */
|
||||||
|
MINIX2_SUPER_MAGIC = 0x2468 /* minix V2 */
|
||||||
|
MINIX2_SUPER_MAGIC2 = 0x2478 /* minix V2, 30 char names */
|
||||||
|
MINIX3_SUPER_MAGIC = 0x4d5a /* minix V3 fs, 60 char names */
|
||||||
|
MQUEUE_MAGIC = 0x19800202
|
||||||
|
MSDOS_SUPER_MAGIC = 0x4d44
|
||||||
|
NCP_SUPER_MAGIC = 0x564c
|
||||||
|
NFS_SUPER_MAGIC = 0x6969
|
||||||
|
NILFS_SUPER_MAGIC = 0x3434
|
||||||
|
NTFS_SB_MAGIC = 0x5346544e
|
||||||
|
OCFS2_SUPER_MAGIC = 0x7461636f
|
||||||
|
OPENPROM_SUPER_MAGIC = 0x9fa1
|
||||||
|
PIPEFS_MAGIC = 0x50495045
|
||||||
|
PROC_SUPER_MAGIC = 0x9fa0
|
||||||
|
PSTOREFS_MAGIC = 0x6165676C
|
||||||
|
QNX4_SUPER_MAGIC = 0x002f
|
||||||
|
QNX6_SUPER_MAGIC = 0x68191122
|
||||||
|
RAMFS_MAGIC = 0x858458f6
|
||||||
|
REISERFS_SUPER_MAGIC = 0x52654973
|
||||||
|
ROMFS_MAGIC = 0x7275
|
||||||
|
SELINUX_MAGIC = 0xf97cff8c
|
||||||
|
SMACK_MAGIC = 0x43415d53
|
||||||
|
SMB_SUPER_MAGIC = 0x517B
|
||||||
|
SOCKFS_MAGIC = 0x534F434B
|
||||||
|
SQUASHFS_MAGIC = 0x73717368
|
||||||
|
SYSFS_MAGIC = 0x62656572
|
||||||
|
SYSV2_SUPER_MAGIC = 0x012FF7B6
|
||||||
|
SYSV4_SUPER_MAGIC = 0x012FF7B5
|
||||||
|
TMPFS_MAGIC = 0x01021994
|
||||||
|
UDF_SUPER_MAGIC = 0x15013346
|
||||||
|
UFS_MAGIC = 0x00011954
|
||||||
|
USBDEVICE_SUPER_MAGIC = 0x9fa2
|
||||||
|
V9FS_MAGIC = 0x01021997
|
||||||
|
VXFS_SUPER_MAGIC = 0xa501FCF5
|
||||||
|
XENFS_SUPER_MAGIC = 0xabba1974
|
||||||
|
XENIX_SUPER_MAGIC = 0x012FF7B4
|
||||||
|
XFS_SUPER_MAGIC = 0x58465342
|
||||||
|
_XIAFS_SUPER_MAGIC = 0x012FD16D
|
||||||
|
|
||||||
|
AFS_SUPER_MAGIC = 0x5346414F
|
||||||
|
AUFS_SUPER_MAGIC = 0x61756673
|
||||||
|
ANON_INODE_FS_SUPER_MAGIC = 0x09041934
|
||||||
|
CEPH_SUPER_MAGIC = 0x00C36400
|
||||||
|
ECRYPTFS_SUPER_MAGIC = 0xF15F
|
||||||
|
FAT_SUPER_MAGIC = 0x4006
|
||||||
|
FHGFS_SUPER_MAGIC = 0x19830326
|
||||||
|
FUSEBLK_SUPER_MAGIC = 0x65735546
|
||||||
|
FUSECTL_SUPER_MAGIC = 0x65735543
|
||||||
|
GFS_SUPER_MAGIC = 0x1161970
|
||||||
|
GPFS_SUPER_MAGIC = 0x47504653
|
||||||
|
MTD_INODE_FS_SUPER_MAGIC = 0x11307854
|
||||||
|
INOTIFYFS_SUPER_MAGIC = 0x2BAD1DEA
|
||||||
|
ISOFS_R_WIN_SUPER_MAGIC = 0x4004
|
||||||
|
ISOFS_WIN_SUPER_MAGIC = 0x4000
|
||||||
|
JFFS_SUPER_MAGIC = 0x07C0
|
||||||
|
KAFS_SUPER_MAGIC = 0x6B414653
|
||||||
|
LUSTRE_SUPER_MAGIC = 0x0BD00BD0
|
||||||
|
NFSD_SUPER_MAGIC = 0x6E667364
|
||||||
|
PANFS_SUPER_MAGIC = 0xAAD7AAEA
|
||||||
|
RPC_PIPEFS_SUPER_MAGIC = 0x67596969
|
||||||
|
SECURITYFS_SUPER_MAGIC = 0x73636673
|
||||||
|
UFS_BYTESWAPPED_SUPER_MAGIC = 0x54190100
|
||||||
|
VMHGFS_SUPER_MAGIC = 0xBACBACBC
|
||||||
|
VZFS_SUPER_MAGIC = 0x565A4653
|
||||||
|
ZFS_SUPER_MAGIC = 0x2FC12FC1
|
||||||
|
)
|
||||||
|
|
||||||
|
// coreutils/src/stat.c
|
||||||
|
var fsTypeMap = map[int64]string{
|
||||||
|
ADFS_SUPER_MAGIC: "adfs", /* 0xADF5 local */
|
||||||
|
AFFS_SUPER_MAGIC: "affs", /* 0xADFF local */
|
||||||
|
AFS_SUPER_MAGIC: "afs", /* 0x5346414F remote */
|
||||||
|
ANON_INODE_FS_SUPER_MAGIC: "anon-inode FS", /* 0x09041934 local */
|
||||||
|
AUFS_SUPER_MAGIC: "aufs", /* 0x61756673 remote */
|
||||||
|
// AUTOFS_SUPER_MAGIC: "autofs", /* 0x0187 local */
|
||||||
|
BEFS_SUPER_MAGIC: "befs", /* 0x42465331 local */
|
||||||
|
BDEVFS_MAGIC: "bdevfs", /* 0x62646576 local */
|
||||||
|
BFS_MAGIC: "bfs", /* 0x1BADFACE local */
|
||||||
|
BINFMTFS_MAGIC: "binfmt_misc", /* 0x42494E4D local */
|
||||||
|
BTRFS_SUPER_MAGIC: "btrfs", /* 0x9123683E local */
|
||||||
|
CEPH_SUPER_MAGIC: "ceph", /* 0x00C36400 remote */
|
||||||
|
CGROUP_SUPER_MAGIC: "cgroupfs", /* 0x0027E0EB local */
|
||||||
|
CIFS_MAGIC_NUMBER: "cifs", /* 0xFF534D42 remote */
|
||||||
|
CODA_SUPER_MAGIC: "coda", /* 0x73757245 remote */
|
||||||
|
COH_SUPER_MAGIC: "coh", /* 0x012FF7B7 local */
|
||||||
|
CRAMFS_MAGIC: "cramfs", /* 0x28CD3D45 local */
|
||||||
|
DEBUGFS_MAGIC: "debugfs", /* 0x64626720 local */
|
||||||
|
DEVFS_SUPER_MAGIC: "devfs", /* 0x1373 local */
|
||||||
|
DEVPTS_SUPER_MAGIC: "devpts", /* 0x1CD1 local */
|
||||||
|
ECRYPTFS_SUPER_MAGIC: "ecryptfs", /* 0xF15F local */
|
||||||
|
EFS_SUPER_MAGIC: "efs", /* 0x00414A53 local */
|
||||||
|
EXT_SUPER_MAGIC: "ext", /* 0x137D local */
|
||||||
|
EXT2_SUPER_MAGIC: "ext2/ext3", /* 0xEF53 local */
|
||||||
|
EXT2_OLD_SUPER_MAGIC: "ext2", /* 0xEF51 local */
|
||||||
|
FAT_SUPER_MAGIC: "fat", /* 0x4006 local */
|
||||||
|
FHGFS_SUPER_MAGIC: "fhgfs", /* 0x19830326 remote */
|
||||||
|
FUSEBLK_SUPER_MAGIC: "fuseblk", /* 0x65735546 remote */
|
||||||
|
FUSECTL_SUPER_MAGIC: "fusectl", /* 0x65735543 remote */
|
||||||
|
FUTEXFS_SUPER_MAGIC: "futexfs", /* 0x0BAD1DEA local */
|
||||||
|
GFS_SUPER_MAGIC: "gfs/gfs2", /* 0x1161970 remote */
|
||||||
|
GPFS_SUPER_MAGIC: "gpfs", /* 0x47504653 remote */
|
||||||
|
HFS_SUPER_MAGIC: "hfs", /* 0x4244 local */
|
||||||
|
HPFS_SUPER_MAGIC: "hpfs", /* 0xF995E849 local */
|
||||||
|
HUGETLBFS_MAGIC: "hugetlbfs", /* 0x958458F6 local */
|
||||||
|
MTD_INODE_FS_SUPER_MAGIC: "inodefs", /* 0x11307854 local */
|
||||||
|
INOTIFYFS_SUPER_MAGIC: "inotifyfs", /* 0x2BAD1DEA local */
|
||||||
|
ISOFS_SUPER_MAGIC: "isofs", /* 0x9660 local */
|
||||||
|
ISOFS_R_WIN_SUPER_MAGIC: "isofs", /* 0x4004 local */
|
||||||
|
ISOFS_WIN_SUPER_MAGIC: "isofs", /* 0x4000 local */
|
||||||
|
JFFS_SUPER_MAGIC: "jffs", /* 0x07C0 local */
|
||||||
|
JFFS2_SUPER_MAGIC: "jffs2", /* 0x72B6 local */
|
||||||
|
JFS_SUPER_MAGIC: "jfs", /* 0x3153464A local */
|
||||||
|
KAFS_SUPER_MAGIC: "k-afs", /* 0x6B414653 remote */
|
||||||
|
LUSTRE_SUPER_MAGIC: "lustre", /* 0x0BD00BD0 remote */
|
||||||
|
MINIX_SUPER_MAGIC: "minix", /* 0x137F local */
|
||||||
|
MINIX_SUPER_MAGIC2: "minix (30 char.)", /* 0x138F local */
|
||||||
|
MINIX2_SUPER_MAGIC: "minix v2", /* 0x2468 local */
|
||||||
|
MINIX2_SUPER_MAGIC2: "minix v2 (30 char.)", /* 0x2478 local */
|
||||||
|
MINIX3_SUPER_MAGIC: "minix3", /* 0x4D5A local */
|
||||||
|
MQUEUE_MAGIC: "mqueue", /* 0x19800202 local */
|
||||||
|
MSDOS_SUPER_MAGIC: "msdos", /* 0x4D44 local */
|
||||||
|
NCP_SUPER_MAGIC: "novell", /* 0x564C remote */
|
||||||
|
NFS_SUPER_MAGIC: "nfs", /* 0x6969 remote */
|
||||||
|
NFSD_SUPER_MAGIC: "nfsd", /* 0x6E667364 remote */
|
||||||
|
NILFS_SUPER_MAGIC: "nilfs", /* 0x3434 local */
|
||||||
|
NTFS_SB_MAGIC: "ntfs", /* 0x5346544E local */
|
||||||
|
OPENPROM_SUPER_MAGIC: "openprom", /* 0x9FA1 local */
|
||||||
|
OCFS2_SUPER_MAGIC: "ocfs2", /* 0x7461636f remote */
|
||||||
|
PANFS_SUPER_MAGIC: "panfs", /* 0xAAD7AAEA remote */
|
||||||
|
PIPEFS_MAGIC: "pipefs", /* 0x50495045 remote */
|
||||||
|
PROC_SUPER_MAGIC: "proc", /* 0x9FA0 local */
|
||||||
|
PSTOREFS_MAGIC: "pstorefs", /* 0x6165676C local */
|
||||||
|
QNX4_SUPER_MAGIC: "qnx4", /* 0x002F local */
|
||||||
|
QNX6_SUPER_MAGIC: "qnx6", /* 0x68191122 local */
|
||||||
|
RAMFS_MAGIC: "ramfs", /* 0x858458F6 local */
|
||||||
|
REISERFS_SUPER_MAGIC: "reiserfs", /* 0x52654973 local */
|
||||||
|
ROMFS_MAGIC: "romfs", /* 0x7275 local */
|
||||||
|
RPC_PIPEFS_SUPER_MAGIC: "rpc_pipefs", /* 0x67596969 local */
|
||||||
|
SECURITYFS_SUPER_MAGIC: "securityfs", /* 0x73636673 local */
|
||||||
|
SELINUX_MAGIC: "selinux", /* 0xF97CFF8C local */
|
||||||
|
SMB_SUPER_MAGIC: "smb", /* 0x517B remote */
|
||||||
|
SOCKFS_MAGIC: "sockfs", /* 0x534F434B local */
|
||||||
|
SQUASHFS_MAGIC: "squashfs", /* 0x73717368 local */
|
||||||
|
SYSFS_MAGIC: "sysfs", /* 0x62656572 local */
|
||||||
|
SYSV2_SUPER_MAGIC: "sysv2", /* 0x012FF7B6 local */
|
||||||
|
SYSV4_SUPER_MAGIC: "sysv4", /* 0x012FF7B5 local */
|
||||||
|
TMPFS_MAGIC: "tmpfs", /* 0x01021994 local */
|
||||||
|
UDF_SUPER_MAGIC: "udf", /* 0x15013346 local */
|
||||||
|
UFS_MAGIC: "ufs", /* 0x00011954 local */
|
||||||
|
UFS_BYTESWAPPED_SUPER_MAGIC: "ufs", /* 0x54190100 local */
|
||||||
|
USBDEVICE_SUPER_MAGIC: "usbdevfs", /* 0x9FA2 local */
|
||||||
|
V9FS_MAGIC: "v9fs", /* 0x01021997 local */
|
||||||
|
VMHGFS_SUPER_MAGIC: "vmhgfs", /* 0xBACBACBC remote */
|
||||||
|
VXFS_SUPER_MAGIC: "vxfs", /* 0xA501FCF5 local */
|
||||||
|
VZFS_SUPER_MAGIC: "vzfs", /* 0x565A4653 local */
|
||||||
|
XENFS_SUPER_MAGIC: "xenfs", /* 0xABBA1974 local */
|
||||||
|
XENIX_SUPER_MAGIC: "xenix", /* 0x012FF7B4 local */
|
||||||
|
XFS_SUPER_MAGIC: "xfs", /* 0x58465342 local */
|
||||||
|
_XIAFS_SUPER_MAGIC: "xia", /* 0x012FD16D local */
|
||||||
|
ZFS_SUPER_MAGIC: "zfs", /* 0x2FC12FC1 local */
|
||||||
|
}
|
||||||
|
|
||||||
|
// Partitions returns disk partitions. If all is false, returns
|
||||||
|
// physical devices only (e.g. hard disks, cd-rom drives, USB keys)
|
||||||
|
// and ignore all others (e.g. memory partitions such as /dev/shm)
|
||||||
|
//
|
||||||
|
// should use setmntent(3) but this implement use /etc/mtab file
|
||||||
|
func Partitions(all bool) ([]PartitionStat, error) {
|
||||||
|
filename := common.HostEtc("mtab")
|
||||||
|
lines, err := common.ReadLines(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs, err := getFileSystems()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := make([]PartitionStat, 0, len(lines))
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
fields := strings.Fields(line)
|
||||||
|
d := PartitionStat{
|
||||||
|
Device: fields[0],
|
||||||
|
Mountpoint: fields[1],
|
||||||
|
Fstype: fields[2],
|
||||||
|
Opts: fields[3],
|
||||||
|
}
|
||||||
|
if all == false {
|
||||||
|
if d.Device == "none" || !common.StringsHas(fs, d.Fstype) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret = append(ret, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileSystems returns supported filesystems from /proc/filesystems
|
||||||
|
func getFileSystems() ([]string, error) {
|
||||||
|
filename := common.HostProc("filesystems")
|
||||||
|
lines, err := common.ReadLines(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var ret []string
|
||||||
|
for _, line := range lines {
|
||||||
|
if !strings.HasPrefix(line, "nodev") {
|
||||||
|
ret = append(ret, strings.TrimSpace(line))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t := strings.Split(line, "\t")
|
||||||
|
if len(t) != 2 || t[1] != "zfs" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ret = append(ret, strings.TrimSpace(t[1]))
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func IOCounters() (map[string]IOCountersStat, error) {
|
||||||
|
filename := common.HostProc("diskstats")
|
||||||
|
lines, err := common.ReadLines(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ret := make(map[string]IOCountersStat, 0)
|
||||||
|
empty := IOCountersStat{}
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
fields := strings.Fields(line)
|
||||||
|
if len(fields) < 14 {
|
||||||
|
// malformed line in /proc/diskstats, avoid panic by ignoring.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := fields[2]
|
||||||
|
reads, err := strconv.ParseUint((fields[3]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
mergedReads, err := strconv.ParseUint((fields[4]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
rbytes, err := strconv.ParseUint((fields[5]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
rtime, err := strconv.ParseUint((fields[6]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
writes, err := strconv.ParseUint((fields[7]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
mergedWrites, err := strconv.ParseUint((fields[8]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
wbytes, err := strconv.ParseUint((fields[9]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
wtime, err := strconv.ParseUint((fields[10]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
iopsInProgress, err := strconv.ParseUint((fields[11]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
iotime, err := strconv.ParseUint((fields[12]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
weightedIO, err := strconv.ParseUint((fields[13]), 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
d := IOCountersStat{
|
||||||
|
ReadBytes: rbytes * SectorSize,
|
||||||
|
WriteBytes: wbytes * SectorSize,
|
||||||
|
ReadCount: reads,
|
||||||
|
WriteCount: writes,
|
||||||
|
MergedReadCount: mergedReads,
|
||||||
|
MergedWriteCount: mergedWrites,
|
||||||
|
ReadTime: rtime,
|
||||||
|
WriteTime: wtime,
|
||||||
|
IopsInProgress: iopsInProgress,
|
||||||
|
IoTime: iotime,
|
||||||
|
WeightedIO: weightedIO,
|
||||||
|
}
|
||||||
|
if d == empty {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
d.Name = name
|
||||||
|
|
||||||
|
d.SerialNumber = GetDiskSerialNumber(name)
|
||||||
|
ret[name] = d
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDiskSerialNumber returns Serial Number of given device or empty string
|
||||||
|
// on error. Name of device is expected, eg. /dev/sda
|
||||||
|
func GetDiskSerialNumber(name string) string {
|
||||||
|
n := fmt.Sprintf("--name=%s", name)
|
||||||
|
udevadm, err := exec.LookPath("/sbin/udevadm")
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := invoke.Command(udevadm, "info", "--query=property", n)
|
||||||
|
|
||||||
|
// does not return error, just an empty string
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
lines := strings.Split(string(out), "\n")
|
||||||
|
for _, line := range lines {
|
||||||
|
values := strings.Split(line, "=")
|
||||||
|
if len(values) < 2 || values[0] != "ID_SERIAL" {
|
||||||
|
// only get ID_SERIAL, not ID_SERIAL_SHORT
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return values[1]
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFsType(stat syscall.Statfs_t) string {
|
||||||
|
t := int64(stat.Type)
|
||||||
|
ret, ok := fsTypeMap[t]
|
||||||
|
if !ok {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
|
@ -0,0 +1,158 @@
|
||||||
|
// +build openbsd
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"path"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/internal/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Partitions(all bool) ([]PartitionStat, error) {
|
||||||
|
var ret []PartitionStat
|
||||||
|
|
||||||
|
// get length
|
||||||
|
count, err := syscall.Getfsstat(nil, MNT_WAIT)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fs := make([]Statfs, count)
|
||||||
|
_, err = Getfsstat(fs, MNT_WAIT)
|
||||||
|
|
||||||
|
for _, stat := range fs {
|
||||||
|
opts := "rw"
|
||||||
|
if stat.F_flags&MNT_RDONLY != 0 {
|
||||||
|
opts = "ro"
|
||||||
|
}
|
||||||
|
if stat.F_flags&MNT_SYNCHRONOUS != 0 {
|
||||||
|
opts += ",sync"
|
||||||
|
}
|
||||||
|
if stat.F_flags&MNT_NOEXEC != 0 {
|
||||||
|
opts += ",noexec"
|
||||||
|
}
|
||||||
|
if stat.F_flags&MNT_NOSUID != 0 {
|
||||||
|
opts += ",nosuid"
|
||||||
|
}
|
||||||
|
if stat.F_flags&MNT_NODEV != 0 {
|
||||||
|
opts += ",nodev"
|
||||||
|
}
|
||||||
|
if stat.F_flags&MNT_ASYNC != 0 {
|
||||||
|
opts += ",async"
|
||||||
|
}
|
||||||
|
|
||||||
|
d := PartitionStat{
|
||||||
|
Device: common.IntToString(stat.F_mntfromname[:]),
|
||||||
|
Mountpoint: common.IntToString(stat.F_mntonname[:]),
|
||||||
|
Fstype: common.IntToString(stat.F_fstypename[:]),
|
||||||
|
Opts: opts,
|
||||||
|
}
|
||||||
|
if all == false {
|
||||||
|
if !path.IsAbs(d.Device) || !common.PathExists(d.Device) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = append(ret, d)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func IOCounters() (map[string]IOCountersStat, error) {
|
||||||
|
ret := make(map[string]IOCountersStat)
|
||||||
|
|
||||||
|
r, err := syscall.Sysctl("hw.diskstats")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buf := []byte(r)
|
||||||
|
length := len(buf)
|
||||||
|
|
||||||
|
count := int(uint64(length) / uint64(sizeOfDiskstats))
|
||||||
|
|
||||||
|
// parse buf to Diskstats
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
b := buf[i*sizeOfDiskstats : i*sizeOfDiskstats+sizeOfDiskstats]
|
||||||
|
d, err := parseDiskstats(b)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := common.IntToString(d.Name[:])
|
||||||
|
|
||||||
|
ds := IOCountersStat{
|
||||||
|
ReadCount: d.Rxfer,
|
||||||
|
WriteCount: d.Wxfer,
|
||||||
|
ReadBytes: d.Rbytes,
|
||||||
|
WriteBytes: d.Wbytes,
|
||||||
|
Name: name,
|
||||||
|
}
|
||||||
|
ret[name] = ds
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BT2LD(time) ((long double)(time).sec + (time).frac * BINTIME_SCALE)
|
||||||
|
|
||||||
|
// Getfsstat is borrowed from pkg/syscall/syscall_freebsd.go
|
||||||
|
// change Statfs_t to Statfs in order to get more information
|
||||||
|
func Getfsstat(buf []Statfs, flags int) (n int, err error) {
|
||||||
|
var _p0 unsafe.Pointer
|
||||||
|
var bufsize uintptr
|
||||||
|
if len(buf) > 0 {
|
||||||
|
_p0 = unsafe.Pointer(&buf[0])
|
||||||
|
bufsize = unsafe.Sizeof(Statfs{}) * uintptr(len(buf))
|
||||||
|
}
|
||||||
|
r0, _, e1 := syscall.Syscall(syscall.SYS_GETFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
|
||||||
|
n = int(r0)
|
||||||
|
if e1 != 0 {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseDiskstats(buf []byte) (Diskstats, error) {
|
||||||
|
var ds Diskstats
|
||||||
|
br := bytes.NewReader(buf)
|
||||||
|
// err := binary.Read(br, binary.LittleEndian, &ds)
|
||||||
|
err := common.Read(br, binary.LittleEndian, &ds)
|
||||||
|
if err != nil {
|
||||||
|
return ds, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return ds, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Usage(path string) (*UsageStat, error) {
|
||||||
|
stat := syscall.Statfs_t{}
|
||||||
|
err := syscall.Statfs(path, &stat)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bsize := stat.F_bsize
|
||||||
|
|
||||||
|
ret := &UsageStat{
|
||||||
|
Path: path,
|
||||||
|
Fstype: getFsType(stat),
|
||||||
|
Total: (uint64(stat.F_blocks) * uint64(bsize)),
|
||||||
|
Free: (uint64(stat.F_bavail) * uint64(bsize)),
|
||||||
|
InodesTotal: (uint64(stat.F_files)),
|
||||||
|
InodesFree: (uint64(stat.F_ffree)),
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.InodesUsed = (ret.InodesTotal - ret.InodesFree)
|
||||||
|
ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0
|
||||||
|
ret.Used = (uint64(stat.F_blocks) - uint64(stat.F_bfree)) * uint64(bsize)
|
||||||
|
ret.UsedPercent = (float64(ret.Used) / float64(ret.Total)) * 100.0
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFsType(stat syscall.Statfs_t) string {
|
||||||
|
return common.IntToString(stat.F_fstypename[:])
|
||||||
|
}
|
|
@ -0,0 +1,91 @@
|
||||||
|
// Created by cgo -godefs - DO NOT EDIT
|
||||||
|
// cgo -godefs types_openbsd.go
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeofPtr = 0x8
|
||||||
|
sizeofShort = 0x2
|
||||||
|
sizeofInt = 0x4
|
||||||
|
sizeofLong = 0x8
|
||||||
|
sizeofLongLong = 0x8
|
||||||
|
sizeofLongDouble = 0x8
|
||||||
|
|
||||||
|
DEVSTAT_NO_DATA = 0x00
|
||||||
|
DEVSTAT_READ = 0x01
|
||||||
|
DEVSTAT_WRITE = 0x02
|
||||||
|
DEVSTAT_FREE = 0x03
|
||||||
|
|
||||||
|
MNT_RDONLY = 0x00000001
|
||||||
|
MNT_SYNCHRONOUS = 0x00000002
|
||||||
|
MNT_NOEXEC = 0x00000004
|
||||||
|
MNT_NOSUID = 0x00000008
|
||||||
|
MNT_NODEV = 0x00000010
|
||||||
|
MNT_ASYNC = 0x00000040
|
||||||
|
|
||||||
|
MNT_WAIT = 1
|
||||||
|
MNT_NOWAIT = 2
|
||||||
|
MNT_LAZY = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
sizeOfDiskstats = 0x70
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
_C_short int16
|
||||||
|
_C_int int32
|
||||||
|
_C_long int64
|
||||||
|
_C_long_long int64
|
||||||
|
_C_long_double int64
|
||||||
|
)
|
||||||
|
|
||||||
|
type Statfs struct {
|
||||||
|
F_flags uint32
|
||||||
|
F_bsize uint32
|
||||||
|
F_iosize uint32
|
||||||
|
Pad_cgo_0 [4]byte
|
||||||
|
F_blocks uint64
|
||||||
|
F_bfree uint64
|
||||||
|
F_bavail int64
|
||||||
|
F_files uint64
|
||||||
|
F_ffree uint64
|
||||||
|
F_favail int64
|
||||||
|
F_syncwrites uint64
|
||||||
|
F_syncreads uint64
|
||||||
|
F_asyncwrites uint64
|
||||||
|
F_asyncreads uint64
|
||||||
|
F_fsid Fsid
|
||||||
|
F_namemax uint32
|
||||||
|
F_owner uint32
|
||||||
|
F_ctime uint64
|
||||||
|
F_fstypename [16]int8
|
||||||
|
F_mntonname [90]int8
|
||||||
|
F_mntfromname [90]int8
|
||||||
|
F_mntfromspec [90]int8
|
||||||
|
Pad_cgo_1 [2]byte
|
||||||
|
Mount_info [160]byte
|
||||||
|
}
|
||||||
|
type Diskstats struct {
|
||||||
|
Name [16]int8
|
||||||
|
Busy int32
|
||||||
|
Pad_cgo_0 [4]byte
|
||||||
|
Rxfer uint64
|
||||||
|
Wxfer uint64
|
||||||
|
Seek uint64
|
||||||
|
Rbytes uint64
|
||||||
|
Wbytes uint64
|
||||||
|
Attachtime Timeval
|
||||||
|
Timestamp Timeval
|
||||||
|
Time Timeval
|
||||||
|
}
|
||||||
|
type Fsid struct {
|
||||||
|
Val [2]int32
|
||||||
|
}
|
||||||
|
type Timeval struct {
|
||||||
|
Sec int64
|
||||||
|
Usec int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type Diskstat struct{}
|
||||||
|
type Bintime struct{}
|
|
@ -0,0 +1,45 @@
|
||||||
|
// +build freebsd linux darwin
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
func Usage(path string) (*UsageStat, error) {
|
||||||
|
stat := syscall.Statfs_t{}
|
||||||
|
err := syscall.Statfs(path, &stat)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
bsize := stat.Bsize
|
||||||
|
|
||||||
|
ret := &UsageStat{
|
||||||
|
Path: path,
|
||||||
|
Fstype: getFsType(stat),
|
||||||
|
Total: (uint64(stat.Blocks) * uint64(bsize)),
|
||||||
|
Free: (uint64(stat.Bavail) * uint64(bsize)),
|
||||||
|
InodesTotal: (uint64(stat.Files)),
|
||||||
|
InodesFree: (uint64(stat.Ffree)),
|
||||||
|
}
|
||||||
|
|
||||||
|
// if could not get InodesTotal, return empty
|
||||||
|
if ret.InodesTotal < ret.InodesFree {
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.InodesUsed = (ret.InodesTotal - ret.InodesFree)
|
||||||
|
ret.Used = (uint64(stat.Blocks) - uint64(stat.Bfree)) * uint64(bsize)
|
||||||
|
|
||||||
|
if ret.InodesTotal == 0 {
|
||||||
|
ret.InodesUsedPercent = 0
|
||||||
|
} else {
|
||||||
|
ret.InodesUsedPercent = (float64(ret.InodesUsed) / float64(ret.InodesTotal)) * 100.0
|
||||||
|
}
|
||||||
|
|
||||||
|
if ret.Total == 0 {
|
||||||
|
ret.UsedPercent = 0
|
||||||
|
} else {
|
||||||
|
ret.UsedPercent = (float64(ret.Used) / float64(ret.Total)) * 100.0
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
|
@ -0,0 +1,155 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package disk
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/StackExchange/wmi"
|
||||||
|
|
||||||
|
"github.com/shirou/gopsutil/internal/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
procGetDiskFreeSpaceExW = common.Modkernel32.NewProc("GetDiskFreeSpaceExW")
|
||||||
|
procGetLogicalDriveStringsW = common.Modkernel32.NewProc("GetLogicalDriveStringsW")
|
||||||
|
procGetDriveType = common.Modkernel32.NewProc("GetDriveTypeW")
|
||||||
|
provGetVolumeInformation = common.Modkernel32.NewProc("GetVolumeInformationW")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
FileFileCompression = int64(16) // 0x00000010
|
||||||
|
FileReadOnlyVolume = int64(524288) // 0x00080000
|
||||||
|
)
|
||||||
|
|
||||||
|
type Win32_PerfFormattedData struct {
|
||||||
|
Name string
|
||||||
|
AvgDiskBytesPerRead uint64
|
||||||
|
AvgDiskBytesPerWrite uint64
|
||||||
|
AvgDiskReadQueueLength uint64
|
||||||
|
AvgDiskWriteQueueLength uint64
|
||||||
|
AvgDisksecPerRead uint64
|
||||||
|
AvgDisksecPerWrite uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
const WaitMSec = 500
|
||||||
|
|
||||||
|
func Usage(path string) (*UsageStat, error) {
|
||||||
|
ret := &UsageStat{}
|
||||||
|
|
||||||
|
lpFreeBytesAvailable := int64(0)
|
||||||
|
lpTotalNumberOfBytes := int64(0)
|
||||||
|
lpTotalNumberOfFreeBytes := int64(0)
|
||||||
|
diskret, _, err := procGetDiskFreeSpaceExW.Call(
|
||||||
|
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))),
|
||||||
|
uintptr(unsafe.Pointer(&lpFreeBytesAvailable)),
|
||||||
|
uintptr(unsafe.Pointer(&lpTotalNumberOfBytes)),
|
||||||
|
uintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes)))
|
||||||
|
if diskret == 0 {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ret = &UsageStat{
|
||||||
|
Path: path,
|
||||||
|
Total: uint64(lpTotalNumberOfBytes),
|
||||||
|
Free: uint64(lpTotalNumberOfFreeBytes),
|
||||||
|
Used: uint64(lpTotalNumberOfBytes) - uint64(lpTotalNumberOfFreeBytes),
|
||||||
|
UsedPercent: (float64(lpTotalNumberOfBytes) - float64(lpTotalNumberOfFreeBytes)) / float64(lpTotalNumberOfBytes) * 100,
|
||||||
|
// InodesTotal: 0,
|
||||||
|
// InodesFree: 0,
|
||||||
|
// InodesUsed: 0,
|
||||||
|
// InodesUsedPercent: 0,
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Partitions(all bool) ([]PartitionStat, error) {
|
||||||
|
var ret []PartitionStat
|
||||||
|
lpBuffer := make([]byte, 254)
|
||||||
|
diskret, _, err := procGetLogicalDriveStringsW.Call(
|
||||||
|
uintptr(len(lpBuffer)),
|
||||||
|
uintptr(unsafe.Pointer(&lpBuffer[0])))
|
||||||
|
if diskret == 0 {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
for _, v := range lpBuffer {
|
||||||
|
if v >= 65 && v <= 90 {
|
||||||
|
path := string(v) + ":"
|
||||||
|
if path == "A:" || path == "B:" { // skip floppy drives
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
typepath, _ := syscall.UTF16PtrFromString(path)
|
||||||
|
typeret, _, _ := procGetDriveType.Call(uintptr(unsafe.Pointer(typepath)))
|
||||||
|
if typeret == 0 {
|
||||||
|
return ret, syscall.GetLastError()
|
||||||
|
}
|
||||||
|
// 2: DRIVE_REMOVABLE 3: DRIVE_FIXED 5: DRIVE_CDROM
|
||||||
|
|
||||||
|
if typeret == 2 || typeret == 3 || typeret == 5 {
|
||||||
|
lpVolumeNameBuffer := make([]byte, 256)
|
||||||
|
lpVolumeSerialNumber := int64(0)
|
||||||
|
lpMaximumComponentLength := int64(0)
|
||||||
|
lpFileSystemFlags := int64(0)
|
||||||
|
lpFileSystemNameBuffer := make([]byte, 256)
|
||||||
|
volpath, _ := syscall.UTF16PtrFromString(string(v) + ":/")
|
||||||
|
driveret, _, err := provGetVolumeInformation.Call(
|
||||||
|
uintptr(unsafe.Pointer(volpath)),
|
||||||
|
uintptr(unsafe.Pointer(&lpVolumeNameBuffer[0])),
|
||||||
|
uintptr(len(lpVolumeNameBuffer)),
|
||||||
|
uintptr(unsafe.Pointer(&lpVolumeSerialNumber)),
|
||||||
|
uintptr(unsafe.Pointer(&lpMaximumComponentLength)),
|
||||||
|
uintptr(unsafe.Pointer(&lpFileSystemFlags)),
|
||||||
|
uintptr(unsafe.Pointer(&lpFileSystemNameBuffer[0])),
|
||||||
|
uintptr(len(lpFileSystemNameBuffer)))
|
||||||
|
if driveret == 0 {
|
||||||
|
if typeret == 5 {
|
||||||
|
continue //device is not ready will happen if there is no disk in the drive
|
||||||
|
}
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
opts := "rw"
|
||||||
|
if lpFileSystemFlags&FileReadOnlyVolume != 0 {
|
||||||
|
opts = "ro"
|
||||||
|
}
|
||||||
|
if lpFileSystemFlags&FileFileCompression != 0 {
|
||||||
|
opts += ".compress"
|
||||||
|
}
|
||||||
|
|
||||||
|
d := PartitionStat{
|
||||||
|
Mountpoint: path,
|
||||||
|
Device: path,
|
||||||
|
Fstype: string(bytes.Replace(lpFileSystemNameBuffer, []byte("\x00"), []byte(""), -1)),
|
||||||
|
Opts: opts,
|
||||||
|
}
|
||||||
|
ret = append(ret, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func IOCounters() (map[string]IOCountersStat, error) {
|
||||||
|
ret := make(map[string]IOCountersStat, 0)
|
||||||
|
var dst []Win32_PerfFormattedData
|
||||||
|
|
||||||
|
err := wmi.Query("SELECT * FROM Win32_PerfFormattedData_PerfDisk_LogicalDisk ", &dst)
|
||||||
|
if err != nil {
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
for _, d := range dst {
|
||||||
|
if len(d.Name) > 3 { // not get _Total or Harddrive
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ret[d.Name] = IOCountersStat{
|
||||||
|
Name: d.Name,
|
||||||
|
ReadCount: uint64(d.AvgDiskReadQueueLength),
|
||||||
|
WriteCount: d.AvgDiskWriteQueueLength,
|
||||||
|
ReadBytes: uint64(d.AvgDiskBytesPerRead),
|
||||||
|
WriteBytes: uint64(d.AvgDiskBytesPerWrite),
|
||||||
|
ReadTime: d.AvgDisksecPerRead,
|
||||||
|
WriteTime: d.AvgDisksecPerWrite,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret, nil
|
||||||
|
}
|
|
@ -258,6 +258,7 @@
|
||||||
{"path":"github.com/ryanuber/go-glob","checksumSHA1":"6JP37UqrI0H80Gpk0Y2P+KXgn5M=","revision":"256dc444b735e061061cf46c809487313d5b0065","revisionTime":"2017-01-28T01:21:29Z"},
|
{"path":"github.com/ryanuber/go-glob","checksumSHA1":"6JP37UqrI0H80Gpk0Y2P+KXgn5M=","revision":"256dc444b735e061061cf46c809487313d5b0065","revisionTime":"2017-01-28T01:21:29Z"},
|
||||||
{"path":"github.com/sean-/seed","checksumSHA1":"A/YUMbGg1LHIeK2+NLZBt+MIAao=","revision":"3c72d44db0c567f7c901f9c5da5fe68392227750","revisionTime":"2017-02-08T16:47:21Z"},
|
{"path":"github.com/sean-/seed","checksumSHA1":"A/YUMbGg1LHIeK2+NLZBt+MIAao=","revision":"3c72d44db0c567f7c901f9c5da5fe68392227750","revisionTime":"2017-02-08T16:47:21Z"},
|
||||||
{"path":"github.com/shirou/gopsutil/cpu","checksumSHA1":"zW2k8E1gkuySzTz2eXuSEDhpffY=","revision":"32b6636de04b303274daac3ca2b10d3b0e4afc35","revisionTime":"2017-02-04T05:36:48Z"},
|
{"path":"github.com/shirou/gopsutil/cpu","checksumSHA1":"zW2k8E1gkuySzTz2eXuSEDhpffY=","revision":"32b6636de04b303274daac3ca2b10d3b0e4afc35","revisionTime":"2017-02-04T05:36:48Z"},
|
||||||
|
{"path":"github.com/shirou/gopsutil/disk","checksumSHA1":"wxkkOLGCVJ/qrh+eSSFyIW2kTd8=","revision":"b62e301a8b9958eebb7299683eb57fab229a9501","revisionTime":"2017-02-08T02:55:55Z"},
|
||||||
{"path":"github.com/shirou/gopsutil/host","checksumSHA1":"GsqEEmGv6sj8DreS2SYXRkoZ9NI=","revision":"b62e301a8b9958eebb7299683eb57fab229a9501","revisionTime":"2017-02-08T02:55:55Z"},
|
{"path":"github.com/shirou/gopsutil/host","checksumSHA1":"GsqEEmGv6sj8DreS2SYXRkoZ9NI=","revision":"b62e301a8b9958eebb7299683eb57fab229a9501","revisionTime":"2017-02-08T02:55:55Z"},
|
||||||
{"path":"github.com/shirou/gopsutil/internal/common","checksumSHA1":"hz9RxkaV3Tnju2eiHBWO/Yv7n5c=","revision":"32b6636de04b303274daac3ca2b10d3b0e4afc35","revisionTime":"2017-02-04T05:36:48Z"},
|
{"path":"github.com/shirou/gopsutil/internal/common","checksumSHA1":"hz9RxkaV3Tnju2eiHBWO/Yv7n5c=","revision":"32b6636de04b303274daac3ca2b10d3b0e4afc35","revisionTime":"2017-02-04T05:36:48Z"},
|
||||||
{"path":"github.com/shirou/gopsutil/mem","checksumSHA1":"XQwjGKI51Y3aQ3/jNyRh9Gnprgg=","revision":"32b6636de04b303274daac3ca2b10d3b0e4afc35","revisionTime":"2017-02-04T05:36:48Z"},
|
{"path":"github.com/shirou/gopsutil/mem","checksumSHA1":"XQwjGKI51Y3aQ3/jNyRh9Gnprgg=","revision":"32b6636de04b303274daac3ca2b10d3b0e4afc35","revisionTime":"2017-02-04T05:36:48Z"},
|
||||||
|
|
|
@ -936,7 +936,8 @@ default will automatically work with some tooling.
|
||||||
be checked using the agent's credentials. This was added in Consul 1.0.1 and defaults to false.
|
be checked using the agent's credentials. This was added in Consul 1.0.1 and defaults to false.
|
||||||
|
|
||||||
* <a name="enable_debug"></a><a href="#enable_debug">`enable_debug`</a> When set, enables some
|
* <a name="enable_debug"></a><a href="#enable_debug">`enable_debug`</a> When set, enables some
|
||||||
additional debugging features. Currently, this is only used to set the runtime profiling HTTP endpoints.
|
additional debugging features. Currently, this is only used to access runtime profiling HTTP endpoints, which
|
||||||
|
are available with an `operator:read` ACL regardles of the value of `enable_debug`.
|
||||||
|
|
||||||
* <a name="enable_script_checks"></a><a href="#enable_script_checks">`enable_script_checks`</a> Equivalent to the
|
* <a name="enable_script_checks"></a><a href="#enable_script_checks">`enable_script_checks`</a> Equivalent to the
|
||||||
[`-enable-script-checks` command-line flag](#_enable_script_checks).
|
[`-enable-script-checks` command-line flag](#_enable_script_checks).
|
||||||
|
|
|
@ -0,0 +1,122 @@
|
||||||
|
---
|
||||||
|
layout: "docs"
|
||||||
|
page_title: "Commands: Debug"
|
||||||
|
sidebar_current: "docs-commands-debug"
|
||||||
|
---
|
||||||
|
|
||||||
|
# Consul Debug
|
||||||
|
|
||||||
|
Command: `consul debug`
|
||||||
|
|
||||||
|
The `consul debug` command monitors a Consul agent for the specified period of
|
||||||
|
time, recording information about the agent, cluster, and environment to an archive
|
||||||
|
written to the current directory.
|
||||||
|
|
||||||
|
Providing support for complex issues encountered by Consul operators often
|
||||||
|
requires a large amount of debugging information to be retrieved. This command
|
||||||
|
aims to shortcut that coordination and provide a simple workflow for accessing
|
||||||
|
data about Consul agent, cluster, and environment to enable faster
|
||||||
|
isolation and debugging of issues.
|
||||||
|
|
||||||
|
This command requires an `operator:read` ACL token in order to retrieve the
|
||||||
|
data from the target agent, if ACLs are enabled.
|
||||||
|
|
||||||
|
If the command is interrupted, as it could be given a long duration but
|
||||||
|
require less time than expected, it will attempt to archive the current
|
||||||
|
captured data.
|
||||||
|
|
||||||
|
## Security and Privacy
|
||||||
|
|
||||||
|
By default, ACL tokens, private keys, and other sensitive material related
|
||||||
|
to Consul is sanitized and not available in this archive. However, other
|
||||||
|
information about the environment the target agent is running in is available
|
||||||
|
in plain text within the archive.
|
||||||
|
|
||||||
|
It is recommended to validate the contents of the archive and redact any
|
||||||
|
material classified as sensitive to the target environment, or use the `-capture`
|
||||||
|
flag to not retrieve it initially.
|
||||||
|
|
||||||
|
Additionally, we recommend securely transmitting this archive via encryption
|
||||||
|
or otherwise.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
`Usage: consul debug [options]`
|
||||||
|
|
||||||
|
By default, the debug command will capture an archive at the current path for
|
||||||
|
all targets for 2 minutes.
|
||||||
|
|
||||||
|
#### API Options
|
||||||
|
|
||||||
|
<%= partial "docs/commands/http_api_options_client" %>
|
||||||
|
|
||||||
|
#### Command Options
|
||||||
|
|
||||||
|
* `-duration` - Optional, the total time to capture data for from the target agent. Must
|
||||||
|
be greater than the interval and longer than 10 seconds. Defaults to 2 minutes.
|
||||||
|
|
||||||
|
* `-interval` - Optional, the interval at which to capture dynamic data, such as logs
|
||||||
|
and metrics. Must be longer than 5 seconds. Defaults to 30 seconds.
|
||||||
|
|
||||||
|
* `-capture` - Optional, can be specified multiple times for each [capture target](#capture-targets)
|
||||||
|
and will only record that information in the archive.
|
||||||
|
|
||||||
|
* `-output` - Optional, the full path of where to write the directory of data and
|
||||||
|
resulting archive. Defaults to the current directory.
|
||||||
|
|
||||||
|
* `-archive` - Optional, if the tool show archive the directory of data into a
|
||||||
|
compressed tar file. Defaults to true.
|
||||||
|
|
||||||
|
## Capture Targets
|
||||||
|
|
||||||
|
The `-capture` flag can be specified multiple times to capture specific
|
||||||
|
information when `debug` is running. By default, it captures all information.
|
||||||
|
|
||||||
|
| Target | Description |
|
||||||
|
| ------ | ---------------------------- |
|
||||||
|
| `agent` | Version and configuration information about the agent. |
|
||||||
|
| `host` | Information about resources on the host running the target agent such as CPU, memory, and disk. |
|
||||||
|
| `cluster` | A list of all the WAN and LAN members in the cluster. |
|
||||||
|
| `metrics` | Metrics from the in-memory metrics endpoint in the target, captured at the interval. |
|
||||||
|
| `logs` | `DEBUG` level logs for the target agent, captured for the interval. |
|
||||||
|
| `pprof` | Golang heap, CPU, goroutine, and trace profiling. This information is not retrieved unless [`enable_debug`](/docs/agent/options.html#enable_debug) is set to `true` on the target agent. |
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
This command can be run from any host with the Consul binary, but requires
|
||||||
|
network access to the target agent in order to retrieve data. Once retrieved,
|
||||||
|
the data is written to the the specified path (defaulting to the current
|
||||||
|
directory) on the host where the command runs.
|
||||||
|
|
||||||
|
By default the command will capture all available data from the default
|
||||||
|
agent address on loopback for 2 minutes at 30 second intervals.
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ consul debug
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
In this example, the archive is collected from a different agent on the
|
||||||
|
network using the standard Consul CLI flag to change the API address.
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ consul debug -http-addr=10.0.1.10:8500
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
The capture flag can be specified to only record a subset of data
|
||||||
|
about the agent and environment.
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ consul debug -capture agent -capture host -capture logs
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
The duration of the command and interval of capturing dynamic
|
||||||
|
information (such as metrics) can be specified with the `-interval`
|
||||||
|
and `-duration` flags.
|
||||||
|
|
||||||
|
```text
|
||||||
|
$ consul debug -interval=15s -duration=1m
|
||||||
|
...
|
||||||
|
```
|
|
@ -84,6 +84,9 @@
|
||||||
</li>
|
</li>
|
||||||
</ul>
|
</ul>
|
||||||
</li>
|
</li>
|
||||||
|
<li<%= sidebar_current("docs-commands-debug") %>>
|
||||||
|
<a href="/docs/commands/debug.html">debug</a>
|
||||||
|
</li>
|
||||||
<li<%= sidebar_current("docs-commands-event") %>>
|
<li<%= sidebar_current("docs-commands-event") %>>
|
||||||
<a href="/docs/commands/event.html">event</a>
|
<a href="/docs/commands/event.html">event</a>
|
||||||
</li>
|
</li>
|
||||||
|
|
Loading…
Reference in New Issue