commit
65d2fb5e32
|
@ -3,7 +3,7 @@
|
||||||
package docker
|
package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/docker/daemon/caps"
|
"github.com/docker/docker/oci/caps"
|
||||||
docker "github.com/fsouza/go-dockerclient"
|
docker "github.com/fsouza/go-dockerclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@ func tweakCapabilities(basics, adds, drops []string) ([]string, error) {
|
||||||
basics[i] = "CAP_" + cap
|
basics[i] = "CAP_" + cap
|
||||||
}
|
}
|
||||||
|
|
||||||
effectiveCaps, err := caps.TweakCapabilities(basics, adds, drops)
|
effectiveCaps, err := caps.TweakCapabilities(basics, adds, drops, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return effectiveCaps, err
|
return effectiveCaps, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,9 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/config/configfile"
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
|
registrytypes "github.com/docker/docker/api/types/registry"
|
||||||
"github.com/docker/docker/registry"
|
"github.com/docker/docker/registry"
|
||||||
docker "github.com/fsouza/go-dockerclient"
|
docker "github.com/fsouza/go-dockerclient"
|
||||||
)
|
)
|
||||||
|
@ -117,7 +119,7 @@ func authFromDockerConfig(file string) authBackend {
|
||||||
|
|
||||||
return firstValidAuth(repo, []authBackend{
|
return firstValidAuth(repo, []authBackend{
|
||||||
func(string) (*docker.AuthConfiguration, error) {
|
func(string) (*docker.AuthConfiguration, error) {
|
||||||
dockerAuthConfig := registry.ResolveAuthConfig(cfile.AuthConfigs, repoInfo.Index)
|
dockerAuthConfig := registryResolveAuthConfig(cfile.AuthConfigs, repoInfo.Index)
|
||||||
auth := &docker.AuthConfiguration{
|
auth := &docker.AuthConfiguration{
|
||||||
Username: dockerAuthConfig.Username,
|
Username: dockerAuthConfig.Username,
|
||||||
Password: dockerAuthConfig.Password,
|
Password: dockerAuthConfig.Password,
|
||||||
|
@ -280,3 +282,25 @@ func parseVolumeSpecLinux(volBind string) (hostPath string, containerPath string
|
||||||
|
|
||||||
return parts[0], parts[1], m, nil
|
return parts[0], parts[1], m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResolveAuthConfig matches an auth configuration to a server address or a URL
|
||||||
|
// copied from https://github.com/moby/moby/blob/ca20bc4214e6a13a5f134fb0d2f67c38065283a8/registry/auth.go#L217-L235
|
||||||
|
// but with the CLI types.AuthConfig type rather than api/types
|
||||||
|
func registryResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig {
|
||||||
|
configKey := registry.GetAuthConfigKey(index)
|
||||||
|
// First try the happy case
|
||||||
|
if c, found := authConfigs[configKey]; found || index.Official {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Maybe they have a legacy config file, we will iterate the keys converting
|
||||||
|
// them to the new format and testing
|
||||||
|
for r, ac := range authConfigs {
|
||||||
|
if configKey == registry.ConvertToHostname(r) {
|
||||||
|
return ac
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// When all else fails, return an empty auth config
|
||||||
|
return types.AuthConfig{}
|
||||||
|
}
|
||||||
|
|
120
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
120
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
|
@ -11,8 +11,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/config/credentials"
|
"github.com/docker/cli/cli/config/credentials"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/cli/config/types"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,29 +24,33 @@ const (
|
||||||
|
|
||||||
// ConfigFile ~/.docker/config.json file info
|
// ConfigFile ~/.docker/config.json file info
|
||||||
type ConfigFile struct {
|
type ConfigFile struct {
|
||||||
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
||||||
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
||||||
PsFormat string `json:"psFormat,omitempty"`
|
PsFormat string `json:"psFormat,omitempty"`
|
||||||
ImagesFormat string `json:"imagesFormat,omitempty"`
|
ImagesFormat string `json:"imagesFormat,omitempty"`
|
||||||
NetworksFormat string `json:"networksFormat,omitempty"`
|
NetworksFormat string `json:"networksFormat,omitempty"`
|
||||||
PluginsFormat string `json:"pluginsFormat,omitempty"`
|
PluginsFormat string `json:"pluginsFormat,omitempty"`
|
||||||
VolumesFormat string `json:"volumesFormat,omitempty"`
|
VolumesFormat string `json:"volumesFormat,omitempty"`
|
||||||
StatsFormat string `json:"statsFormat,omitempty"`
|
StatsFormat string `json:"statsFormat,omitempty"`
|
||||||
DetachKeys string `json:"detachKeys,omitempty"`
|
DetachKeys string `json:"detachKeys,omitempty"`
|
||||||
CredentialsStore string `json:"credsStore,omitempty"`
|
CredentialsStore string `json:"credsStore,omitempty"`
|
||||||
CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
|
CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||||
Filename string `json:"-"` // Note: for internal use only
|
Filename string `json:"-"` // Note: for internal use only
|
||||||
ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
|
ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
|
||||||
ServicesFormat string `json:"servicesFormat,omitempty"`
|
ServicesFormat string `json:"servicesFormat,omitempty"`
|
||||||
TasksFormat string `json:"tasksFormat,omitempty"`
|
TasksFormat string `json:"tasksFormat,omitempty"`
|
||||||
SecretFormat string `json:"secretFormat,omitempty"`
|
SecretFormat string `json:"secretFormat,omitempty"`
|
||||||
ConfigFormat string `json:"configFormat,omitempty"`
|
ConfigFormat string `json:"configFormat,omitempty"`
|
||||||
NodesFormat string `json:"nodesFormat,omitempty"`
|
NodesFormat string `json:"nodesFormat,omitempty"`
|
||||||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||||
Experimental string `json:"experimental,omitempty"`
|
Experimental string `json:"experimental,omitempty"`
|
||||||
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
||||||
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
||||||
|
CurrentContext string `json:"currentContext,omitempty"`
|
||||||
|
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||||
|
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||||
|
Aliases map[string]string `json:"aliases,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProxyConfig contains proxy configuration settings
|
// ProxyConfig contains proxy configuration settings
|
||||||
|
@ -69,6 +72,8 @@ func New(fn string) *ConfigFile {
|
||||||
AuthConfigs: make(map[string]types.AuthConfig),
|
AuthConfigs: make(map[string]types.AuthConfig),
|
||||||
HTTPHeaders: make(map[string]string),
|
HTTPHeaders: make(map[string]string),
|
||||||
Filename: fn,
|
Filename: fn,
|
||||||
|
Plugins: make(map[string]map[string]string),
|
||||||
|
Aliases: make(map[string]string),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,9 +123,11 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
for addr, ac := range configFile.AuthConfigs {
|
for addr, ac := range configFile.AuthConfigs {
|
||||||
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
if ac.Auth != "" {
|
||||||
if err != nil {
|
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ac.Auth = ""
|
ac.Auth = ""
|
||||||
ac.ServerAddress = addr
|
ac.ServerAddress = addr
|
||||||
|
@ -175,20 +182,26 @@ func (configFile *ConfigFile) Save() error {
|
||||||
return errors.Errorf("Can't save config with empty filename")
|
return errors.Errorf("Can't save config with empty filename")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil {
|
dir := filepath.Dir(configFile.Filename)
|
||||||
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer f.Close()
|
err = configFile.SaveToWriter(temp)
|
||||||
return configFile.SaveToWriter(f)
|
temp.Close()
|
||||||
|
if err != nil {
|
||||||
|
os.Remove(temp.Name())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Rename(temp.Name(), configFile.Filename)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
||||||
// then checking this against any environment variables provided to the container
|
// then checking this against any environment variables provided to the container
|
||||||
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) map[string]*string {
|
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string {
|
||||||
var cfgKey string
|
var cfgKey string
|
||||||
|
|
||||||
if _, ok := configFile.Proxies[host]; !ok {
|
if _, ok := configFile.Proxies[host]; !ok {
|
||||||
|
@ -204,7 +217,10 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts []string) ma
|
||||||
"NO_PROXY": &config.NoProxy,
|
"NO_PROXY": &config.NoProxy,
|
||||||
"FTP_PROXY": &config.FTPProxy,
|
"FTP_PROXY": &config.FTPProxy,
|
||||||
}
|
}
|
||||||
m := opts.ConvertKVStringsToMapWithNil(runOpts)
|
m := runOpts
|
||||||
|
if m == nil {
|
||||||
|
m = make(map[string]*string)
|
||||||
|
}
|
||||||
for k := range permitted {
|
for k := range permitted {
|
||||||
if *permitted[k] == "" {
|
if *permitted[k] == "" {
|
||||||
continue
|
continue
|
||||||
|
@ -320,6 +336,42 @@ func (configFile *ConfigFile) GetFilename() string {
|
||||||
return configFile.Filename
|
return configFile.Filename
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PluginConfig retrieves the requested option for the given plugin.
|
||||||
|
func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) {
|
||||||
|
if configFile.Plugins == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
value, ok := pluginConfig[option]
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPluginConfig sets the option to the given value for the given
|
||||||
|
// plugin. Passing a value of "" will remove the option. If removing
|
||||||
|
// the final config item for a given plugin then also cleans up the
|
||||||
|
// overall plugin entry.
|
||||||
|
func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) {
|
||||||
|
if configFile.Plugins == nil {
|
||||||
|
configFile.Plugins = make(map[string]map[string]string)
|
||||||
|
}
|
||||||
|
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||||
|
if !ok {
|
||||||
|
pluginConfig = make(map[string]string)
|
||||||
|
configFile.Plugins[pluginname] = pluginConfig
|
||||||
|
}
|
||||||
|
if value != "" {
|
||||||
|
pluginConfig[option] = value
|
||||||
|
} else {
|
||||||
|
delete(pluginConfig, option)
|
||||||
|
}
|
||||||
|
if len(pluginConfig) == 0 {
|
||||||
|
delete(configFile.Plugins, pluginname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
||||||
if kubeConfig == nil {
|
if kubeConfig == nil {
|
||||||
return nil
|
return nil
|
||||||
|
|
2
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
|
@ -1,7 +1,7 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/cli/cli/config/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Store is the interface that any credentials store must implement.
|
// Store is the interface that any credentials store must implement.
|
||||||
|
|
23
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
23
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
|
@ -1,8 +1,9 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/docker/api/types"
|
"strings"
|
||||||
"github.com/docker/docker/registry"
|
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
type store interface {
|
type store interface {
|
||||||
|
@ -35,7 +36,7 @@ func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||||
// Maybe they have a legacy config file, we will iterate the keys converting
|
// Maybe they have a legacy config file, we will iterate the keys converting
|
||||||
// them to the new format and testing
|
// them to the new format and testing
|
||||||
for r, ac := range c.file.GetAuthConfigs() {
|
for r, ac := range c.file.GetAuthConfigs() {
|
||||||
if serverAddress == registry.ConvertToHostname(r) {
|
if serverAddress == ConvertToHostname(r) {
|
||||||
return ac, nil
|
return ac, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,3 +63,19 @@ func (c *fileStore) GetFilename() string {
|
||||||
func (c *fileStore) IsFileStore() bool {
|
func (c *fileStore) IsFileStore() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConvertToHostname converts a registry url which has http|https prepended
|
||||||
|
// to just an hostname.
|
||||||
|
// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies.
|
||||||
|
func ConvertToHostname(url string) string {
|
||||||
|
stripped := url
|
||||||
|
if strings.HasPrefix(url, "http://") {
|
||||||
|
stripped = strings.TrimPrefix(url, "http://")
|
||||||
|
} else if strings.HasPrefix(url, "https://") {
|
||||||
|
stripped = strings.TrimPrefix(url, "https://")
|
||||||
|
}
|
||||||
|
|
||||||
|
nameParts := strings.SplitN(stripped, "/", 2)
|
||||||
|
|
||||||
|
return nameParts[0]
|
||||||
|
}
|
||||||
|
|
2
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
|
@ -1,9 +1,9 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
"github.com/docker/docker-credential-helpers/client"
|
"github.com/docker/docker-credential-helpers/client"
|
||||||
"github.com/docker/docker-credential-helpers/credentials"
|
"github.com/docker/docker-credential-helpers/credentials"
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package types
|
||||||
|
|
||||||
|
// AuthConfig contains authorization information for connecting to a Registry
|
||||||
|
type AuthConfig struct {
|
||||||
|
Username string `json:"username,omitempty"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Auth string `json:"auth,omitempty"`
|
||||||
|
|
||||||
|
// Email is an optional value associated with the username.
|
||||||
|
// This field is deprecated and will be removed in a later
|
||||||
|
// version of docker.
|
||||||
|
Email string `json:"email,omitempty"`
|
||||||
|
|
||||||
|
ServerAddress string `json:"serveraddress,omitempty"`
|
||||||
|
|
||||||
|
// IdentityToken is used to authenticate the user and get
|
||||||
|
// an access token for the registry.
|
||||||
|
IdentityToken string `json:"identitytoken,omitempty"`
|
||||||
|
|
||||||
|
// RegistryToken is a bearer token to be sent to a registry
|
||||||
|
RegistryToken string `json:"registrytoken,omitempty"`
|
||||||
|
}
|
98
vendor/github.com/docker/cli/opts/config.go
generated
vendored
98
vendor/github.com/docker/cli/opts/config.go
generated
vendored
|
@ -1,98 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ConfigOpt is a Value type for parsing configs
|
|
||||||
type ConfigOpt struct {
|
|
||||||
values []*swarmtypes.ConfigReference
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new config value
|
|
||||||
func (o *ConfigOpt) Set(value string) error {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
options := &swarmtypes.ConfigReference{
|
|
||||||
File: &swarmtypes.ConfigReferenceFileTarget{
|
|
||||||
UID: "0",
|
|
||||||
GID: "0",
|
|
||||||
Mode: 0444,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// support a simple syntax of --config foo
|
|
||||||
if len(fields) == 1 {
|
|
||||||
options.File.Name = fields[0]
|
|
||||||
options.ConfigName = fields[0]
|
|
||||||
o.values = append(o.values, options)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "source", "src":
|
|
||||||
options.ConfigName = value
|
|
||||||
case "target":
|
|
||||||
options.File.Name = value
|
|
||||||
case "uid":
|
|
||||||
options.File.UID = value
|
|
||||||
case "gid":
|
|
||||||
options.File.GID = value
|
|
||||||
case "mode":
|
|
||||||
m, err := strconv.ParseUint(value, 0, 32)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid mode specified: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
options.File.Mode = os.FileMode(m)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid field in config request: %s", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.ConfigName == "" {
|
|
||||||
return fmt.Errorf("source is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
o.values = append(o.values, options)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (o *ConfigOpt) Type() string {
|
|
||||||
return "config"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (o *ConfigOpt) String() string {
|
|
||||||
configs := []string{}
|
|
||||||
for _, config := range o.values {
|
|
||||||
repr := fmt.Sprintf("%s -> %s", config.ConfigName, config.File.Name)
|
|
||||||
configs = append(configs, repr)
|
|
||||||
}
|
|
||||||
return strings.Join(configs, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the config requests
|
|
||||||
func (o *ConfigOpt) Value() []*swarmtypes.ConfigReference {
|
|
||||||
return o.values
|
|
||||||
}
|
|
64
vendor/github.com/docker/cli/opts/duration.go
generated
vendored
64
vendor/github.com/docker/cli/opts/duration.go
generated
vendored
|
@ -1,64 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PositiveDurationOpt is an option type for time.Duration that uses a pointer.
|
|
||||||
// It behave similarly to DurationOpt but only allows positive duration values.
|
|
||||||
type PositiveDurationOpt struct {
|
|
||||||
DurationOpt
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new value on the option. Setting a negative duration value will cause
|
|
||||||
// an error to be returned.
|
|
||||||
func (d *PositiveDurationOpt) Set(s string) error {
|
|
||||||
err := d.DurationOpt.Set(s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if *d.DurationOpt.value < 0 {
|
|
||||||
return errors.Errorf("duration cannot be negative")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DurationOpt is an option type for time.Duration that uses a pointer. This
|
|
||||||
// allows us to get nil values outside, instead of defaulting to 0
|
|
||||||
type DurationOpt struct {
|
|
||||||
value *time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDurationOpt creates a DurationOpt with the specified duration
|
|
||||||
func NewDurationOpt(value *time.Duration) *DurationOpt {
|
|
||||||
return &DurationOpt{
|
|
||||||
value: value,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new value on the option
|
|
||||||
func (d *DurationOpt) Set(s string) error {
|
|
||||||
v, err := time.ParseDuration(s)
|
|
||||||
d.value = &v
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option, which will be displayed in `--help` output
|
|
||||||
func (d *DurationOpt) Type() string {
|
|
||||||
return "duration"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (d *DurationOpt) String() string {
|
|
||||||
if d.value != nil {
|
|
||||||
return d.value.String()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the time.Duration
|
|
||||||
func (d *DurationOpt) Value() *time.Duration {
|
|
||||||
return d.value
|
|
||||||
}
|
|
46
vendor/github.com/docker/cli/opts/env.go
generated
vendored
46
vendor/github.com/docker/cli/opts/env.go
generated
vendored
|
@ -1,46 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidateEnv validates an environment variable and returns it.
|
|
||||||
// If no value is specified, it returns the current value using os.Getenv.
|
|
||||||
//
|
|
||||||
// As on ParseEnvFile and related to #16585, environment variable names
|
|
||||||
// are not validate what so ever, it's up to application inside docker
|
|
||||||
// to validate them or not.
|
|
||||||
//
|
|
||||||
// The only validation here is to check if name is empty, per #25099
|
|
||||||
func ValidateEnv(val string) (string, error) {
|
|
||||||
arr := strings.Split(val, "=")
|
|
||||||
if arr[0] == "" {
|
|
||||||
return "", fmt.Errorf("invalid environment variable: %s", val)
|
|
||||||
}
|
|
||||||
if len(arr) > 1 {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
if !doesEnvExist(val) {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func doesEnvExist(name string) bool {
|
|
||||||
for _, entry := range os.Environ() {
|
|
||||||
parts := strings.SplitN(entry, "=", 2)
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
|
|
||||||
if strings.EqualFold(parts[0], name) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if parts[0] == name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
22
vendor/github.com/docker/cli/opts/envfile.go
generated
vendored
22
vendor/github.com/docker/cli/opts/envfile.go
generated
vendored
|
@ -1,22 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseEnvFile reads a file with environment variables enumerated by lines
|
|
||||||
//
|
|
||||||
// ``Environment variable names used by the utilities in the Shell and
|
|
||||||
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
|
||||||
// letters, digits, and the '_' (underscore) from the characters defined in
|
|
||||||
// Portable Character Set and do not begin with a digit. *But*, other
|
|
||||||
// characters may be permitted by an implementation; applications shall
|
|
||||||
// tolerate the presence of such names.''
|
|
||||||
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
|
||||||
//
|
|
||||||
// As of #16585, it's up to application inside docker to validate or not
|
|
||||||
// environment variables, that's why we just strip leading whitespace and
|
|
||||||
// nothing more.
|
|
||||||
func ParseEnvFile(filename string) ([]string, error) {
|
|
||||||
return parseKeyValueFile(filename, os.Getenv)
|
|
||||||
}
|
|
71
vendor/github.com/docker/cli/opts/file.go
generated
vendored
71
vendor/github.com/docker/cli/opts/file.go
generated
vendored
|
@ -1,71 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
var whiteSpaces = " \t"
|
|
||||||
|
|
||||||
// ErrBadKey typed error for bad environment variable
|
|
||||||
type ErrBadKey struct {
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ErrBadKey) Error() string {
|
|
||||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseKeyValueFile(filename string, emptyFn func(string) string) ([]string, error) {
|
|
||||||
fh, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
defer fh.Close()
|
|
||||||
|
|
||||||
lines := []string{}
|
|
||||||
scanner := bufio.NewScanner(fh)
|
|
||||||
currentLine := 0
|
|
||||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
|
||||||
for scanner.Scan() {
|
|
||||||
scannedBytes := scanner.Bytes()
|
|
||||||
if !utf8.Valid(scannedBytes) {
|
|
||||||
return []string{}, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
|
|
||||||
}
|
|
||||||
// We trim UTF8 BOM
|
|
||||||
if currentLine == 0 {
|
|
||||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
|
||||||
}
|
|
||||||
// trim the line from all leading whitespace first
|
|
||||||
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
|
||||||
currentLine++
|
|
||||||
// line is not empty, and not starting with '#'
|
|
||||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
|
||||||
data := strings.SplitN(line, "=", 2)
|
|
||||||
|
|
||||||
// trim the front of a variable, but nothing else
|
|
||||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
|
||||||
if strings.ContainsAny(variable, whiteSpaces) {
|
|
||||||
return []string{}, ErrBadKey{fmt.Sprintf("variable '%s' has white spaces", variable)}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(data) > 1 {
|
|
||||||
// pass the value through, no trimming
|
|
||||||
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
|
|
||||||
} else {
|
|
||||||
var value string
|
|
||||||
if emptyFn != nil {
|
|
||||||
value = emptyFn(line)
|
|
||||||
}
|
|
||||||
// if only a pass-through variable is given, clean it up.
|
|
||||||
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lines, scanner.Err()
|
|
||||||
}
|
|
165
vendor/github.com/docker/cli/opts/hosts.go
generated
vendored
165
vendor/github.com/docker/cli/opts/hosts.go
generated
vendored
|
@ -1,165 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
|
|
||||||
// These are the IANA registered port numbers for use with Docker
|
|
||||||
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
|
|
||||||
DefaultHTTPPort = 2375 // Default HTTP Port
|
|
||||||
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
|
|
||||||
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
|
|
||||||
// DefaultUnixSocket Path for the unix socket.
|
|
||||||
// Docker daemon by default always listens on the default unix socket
|
|
||||||
DefaultUnixSocket = "/var/run/docker.sock"
|
|
||||||
// DefaultTCPHost constant defines the default host string used by docker on Windows
|
|
||||||
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
|
||||||
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
|
|
||||||
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
|
|
||||||
// DefaultNamedPipe defines the default named pipe used by docker on Windows
|
|
||||||
DefaultNamedPipe = `//./pipe/docker_engine`
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidateHost validates that the specified string is a valid host and returns it.
|
|
||||||
func ValidateHost(val string) (string, error) {
|
|
||||||
host := strings.TrimSpace(val)
|
|
||||||
// The empty string means default and is not handled by parseDockerDaemonHost
|
|
||||||
if host != "" {
|
|
||||||
_, err := parseDockerDaemonHost(host)
|
|
||||||
if err != nil {
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Note: unlike most flag validators, we don't return the mutated value here
|
|
||||||
// we need to know what the user entered later (using ParseHost) to adjust for TLS
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseHost and set defaults for a Daemon host string
|
|
||||||
func ParseHost(defaultToTLS bool, val string) (string, error) {
|
|
||||||
host := strings.TrimSpace(val)
|
|
||||||
if host == "" {
|
|
||||||
if defaultToTLS {
|
|
||||||
host = DefaultTLSHost
|
|
||||||
} else {
|
|
||||||
host = DefaultHost
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var err error
|
|
||||||
host, err = parseDockerDaemonHost(host)
|
|
||||||
if err != nil {
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return host, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
|
|
||||||
// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
|
|
||||||
func parseDockerDaemonHost(addr string) (string, error) {
|
|
||||||
addrParts := strings.SplitN(addr, "://", 2)
|
|
||||||
if len(addrParts) == 1 && addrParts[0] != "" {
|
|
||||||
addrParts = []string{"tcp", addrParts[0]}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch addrParts[0] {
|
|
||||||
case "tcp":
|
|
||||||
return ParseTCPAddr(addrParts[1], DefaultTCPHost)
|
|
||||||
case "unix":
|
|
||||||
return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
|
|
||||||
case "npipe":
|
|
||||||
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
|
|
||||||
case "fd":
|
|
||||||
return addr, nil
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseSimpleProtoAddr parses and validates that the specified address is a valid
|
|
||||||
// socket address for simple protocols like unix and npipe. It returns a formatted
|
|
||||||
// socket address, either using the address parsed from addr, or the contents of
|
|
||||||
// defaultAddr if addr is a blank string.
|
|
||||||
func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
|
|
||||||
addr = strings.TrimPrefix(addr, proto+"://")
|
|
||||||
if strings.Contains(addr, "://") {
|
|
||||||
return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
|
|
||||||
}
|
|
||||||
if addr == "" {
|
|
||||||
addr = defaultAddr
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s://%s", proto, addr), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseTCPAddr parses and validates that the specified address is a valid TCP
|
|
||||||
// address. It returns a formatted TCP address, either using the address parsed
|
|
||||||
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
|
|
||||||
// tryAddr is expected to have already been Trim()'d
|
|
||||||
// defaultAddr must be in the full `tcp://host:port` form
|
|
||||||
func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
|
|
||||||
if tryAddr == "" || tryAddr == "tcp://" {
|
|
||||||
return defaultAddr, nil
|
|
||||||
}
|
|
||||||
addr := strings.TrimPrefix(tryAddr, "tcp://")
|
|
||||||
if strings.Contains(addr, "://") || addr == "" {
|
|
||||||
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
|
|
||||||
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
|
|
||||||
// not 1.4. See https://github.com/golang/go/issues/12200 and
|
|
||||||
// https://github.com/golang/go/issues/6530.
|
|
||||||
if strings.HasSuffix(addr, "]:") {
|
|
||||||
addr += defaultPort
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := url.Parse("tcp://" + addr)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
host, port, err := net.SplitHostPort(u.Host)
|
|
||||||
if err != nil {
|
|
||||||
// try port addition once
|
|
||||||
host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if host == "" {
|
|
||||||
host = defaultHost
|
|
||||||
}
|
|
||||||
if port == "" {
|
|
||||||
port = defaultPort
|
|
||||||
}
|
|
||||||
p, err := strconv.Atoi(port)
|
|
||||||
if err != nil && p == 0 {
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
|
|
||||||
// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
|
|
||||||
func ValidateExtraHost(val string) (string, error) {
|
|
||||||
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
|
|
||||||
arr := strings.SplitN(val, ":", 2)
|
|
||||||
if len(arr) != 2 || len(arr[0]) == 0 {
|
|
||||||
return "", fmt.Errorf("bad format for add-host: %q", val)
|
|
||||||
}
|
|
||||||
if _, err := ValidateIPAddress(arr[1]); err != nil {
|
|
||||||
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
8
vendor/github.com/docker/cli/opts/hosts_unix.go
generated
vendored
8
vendor/github.com/docker/cli/opts/hosts_unix.go
generated
vendored
|
@ -1,8 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
|
|
||||||
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
|
|
6
vendor/github.com/docker/cli/opts/hosts_windows.go
generated
vendored
6
vendor/github.com/docker/cli/opts/hosts_windows.go
generated
vendored
|
@ -1,6 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
// DefaultHost constant defines the default host string used by docker on Windows
|
|
||||||
var DefaultHost = "npipe://" + DefaultNamedPipe
|
|
47
vendor/github.com/docker/cli/opts/ip.go
generated
vendored
47
vendor/github.com/docker/cli/opts/ip.go
generated
vendored
|
@ -1,47 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IPOpt holds an IP. It is used to store values from CLI flags.
|
|
||||||
type IPOpt struct {
|
|
||||||
*net.IP
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPOpt creates a new IPOpt from a reference net.IP and a
|
|
||||||
// string representation of an IP. If the string is not a valid
|
|
||||||
// IP it will fallback to the specified reference.
|
|
||||||
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
|
|
||||||
o := &IPOpt{
|
|
||||||
IP: ref,
|
|
||||||
}
|
|
||||||
o.Set(defaultVal)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets an IPv4 or IPv6 address from a given string. If the given
|
|
||||||
// string is not parseable as an IP address it returns an error.
|
|
||||||
func (o *IPOpt) Set(val string) error {
|
|
||||||
ip := net.ParseIP(val)
|
|
||||||
if ip == nil {
|
|
||||||
return fmt.Errorf("%s is not an ip address", val)
|
|
||||||
}
|
|
||||||
*o.IP = ip
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the IP address stored in the IPOpt. If stored IP is a
|
|
||||||
// nil pointer, it returns an empty string.
|
|
||||||
func (o *IPOpt) String() string {
|
|
||||||
if *o.IP == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return o.IP.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of the option
|
|
||||||
func (o *IPOpt) Type() string {
|
|
||||||
return "ip"
|
|
||||||
}
|
|
174
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
174
vendor/github.com/docker/cli/opts/mount.go
generated
vendored
|
@ -1,174 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
mounttypes "github.com/docker/docker/api/types/mount"
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MountOpt is a Value type for parsing mounts
|
|
||||||
type MountOpt struct {
|
|
||||||
values []mounttypes.Mount
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new mount value
|
|
||||||
// nolint: gocyclo
|
|
||||||
func (m *MountOpt) Set(value string) error {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
mount := mounttypes.Mount{}
|
|
||||||
|
|
||||||
volumeOptions := func() *mounttypes.VolumeOptions {
|
|
||||||
if mount.VolumeOptions == nil {
|
|
||||||
mount.VolumeOptions = &mounttypes.VolumeOptions{
|
|
||||||
Labels: make(map[string]string),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if mount.VolumeOptions.DriverConfig == nil {
|
|
||||||
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{}
|
|
||||||
}
|
|
||||||
return mount.VolumeOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
bindOptions := func() *mounttypes.BindOptions {
|
|
||||||
if mount.BindOptions == nil {
|
|
||||||
mount.BindOptions = new(mounttypes.BindOptions)
|
|
||||||
}
|
|
||||||
return mount.BindOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpfsOptions := func() *mounttypes.TmpfsOptions {
|
|
||||||
if mount.TmpfsOptions == nil {
|
|
||||||
mount.TmpfsOptions = new(mounttypes.TmpfsOptions)
|
|
||||||
}
|
|
||||||
return mount.TmpfsOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
setValueOnMap := func(target map[string]string, value string) {
|
|
||||||
parts := strings.SplitN(value, "=", 2)
|
|
||||||
if len(parts) == 1 {
|
|
||||||
target[value] = ""
|
|
||||||
} else {
|
|
||||||
target[parts[0]] = parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mount.Type = mounttypes.TypeVolume // default to volume mounts
|
|
||||||
// Set writable as the default
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
|
|
||||||
if len(parts) == 1 {
|
|
||||||
switch key {
|
|
||||||
case "readonly", "ro":
|
|
||||||
mount.ReadOnly = true
|
|
||||||
continue
|
|
||||||
case "volume-nocopy":
|
|
||||||
volumeOptions().NoCopy = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "type":
|
|
||||||
mount.Type = mounttypes.Type(strings.ToLower(value))
|
|
||||||
case "source", "src":
|
|
||||||
mount.Source = value
|
|
||||||
case "target", "dst", "destination":
|
|
||||||
mount.Target = value
|
|
||||||
case "readonly", "ro":
|
|
||||||
mount.ReadOnly, err = strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
|
||||||
}
|
|
||||||
case "consistency":
|
|
||||||
mount.Consistency = mounttypes.Consistency(strings.ToLower(value))
|
|
||||||
case "bind-propagation":
|
|
||||||
bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
|
|
||||||
case "volume-nocopy":
|
|
||||||
volumeOptions().NoCopy, err = strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for volume-nocopy: %s", value)
|
|
||||||
}
|
|
||||||
case "volume-label":
|
|
||||||
setValueOnMap(volumeOptions().Labels, value)
|
|
||||||
case "volume-driver":
|
|
||||||
volumeOptions().DriverConfig.Name = value
|
|
||||||
case "volume-opt":
|
|
||||||
if volumeOptions().DriverConfig.Options == nil {
|
|
||||||
volumeOptions().DriverConfig.Options = make(map[string]string)
|
|
||||||
}
|
|
||||||
setValueOnMap(volumeOptions().DriverConfig.Options, value)
|
|
||||||
case "tmpfs-size":
|
|
||||||
sizeBytes, err := units.RAMInBytes(value)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
|
||||||
}
|
|
||||||
tmpfsOptions().SizeBytes = sizeBytes
|
|
||||||
case "tmpfs-mode":
|
|
||||||
ui64, err := strconv.ParseUint(value, 8, 32)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid value for %s: %s", key, value)
|
|
||||||
}
|
|
||||||
tmpfsOptions().Mode = os.FileMode(ui64)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.Type == "" {
|
|
||||||
return fmt.Errorf("type is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.Target == "" {
|
|
||||||
return fmt.Errorf("target is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume {
|
|
||||||
return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type)
|
|
||||||
}
|
|
||||||
if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind {
|
|
||||||
return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type)
|
|
||||||
}
|
|
||||||
if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs {
|
|
||||||
return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.values = append(m.values, mount)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (m *MountOpt) Type() string {
|
|
||||||
return "mount"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (m *MountOpt) String() string {
|
|
||||||
mounts := []string{}
|
|
||||||
for _, mount := range m.values {
|
|
||||||
repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
|
|
||||||
mounts = append(mounts, repr)
|
|
||||||
}
|
|
||||||
return strings.Join(mounts, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the mounts
|
|
||||||
func (m *MountOpt) Value() []mounttypes.Mount {
|
|
||||||
return m.values
|
|
||||||
}
|
|
106
vendor/github.com/docker/cli/opts/network.go
generated
vendored
106
vendor/github.com/docker/cli/opts/network.go
generated
vendored
|
@ -1,106 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
networkOptName = "name"
|
|
||||||
networkOptAlias = "alias"
|
|
||||||
driverOpt = "driver-opt"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NetworkAttachmentOpts represents the network options for endpoint creation
|
|
||||||
type NetworkAttachmentOpts struct {
|
|
||||||
Target string
|
|
||||||
Aliases []string
|
|
||||||
DriverOpts map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NetworkOpt represents a network config in swarm mode.
|
|
||||||
type NetworkOpt struct {
|
|
||||||
options []NetworkAttachmentOpts
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set networkopts value
|
|
||||||
func (n *NetworkOpt) Set(value string) error {
|
|
||||||
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var netOpt NetworkAttachmentOpts
|
|
||||||
if longSyntax {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
netOpt.Aliases = []string{}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
|
|
||||||
if len(parts) < 2 {
|
|
||||||
return fmt.Errorf("invalid field %s", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
|
||||||
value := strings.TrimSpace(strings.ToLower(parts[1]))
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case networkOptName:
|
|
||||||
netOpt.Target = value
|
|
||||||
case networkOptAlias:
|
|
||||||
netOpt.Aliases = append(netOpt.Aliases, value)
|
|
||||||
case driverOpt:
|
|
||||||
key, value, err = parseDriverOpt(value)
|
|
||||||
if err == nil {
|
|
||||||
if netOpt.DriverOpts == nil {
|
|
||||||
netOpt.DriverOpts = make(map[string]string)
|
|
||||||
}
|
|
||||||
netOpt.DriverOpts[key] = value
|
|
||||||
} else {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid field key %s", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(netOpt.Target) == 0 {
|
|
||||||
return fmt.Errorf("network name/id is not specified")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
netOpt.Target = value
|
|
||||||
}
|
|
||||||
n.options = append(n.options, netOpt)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (n *NetworkOpt) Type() string {
|
|
||||||
return "network"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the networkopts
|
|
||||||
func (n *NetworkOpt) Value() []NetworkAttachmentOpts {
|
|
||||||
return n.options
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the network opts as a string
|
|
||||||
func (n *NetworkOpt) String() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDriverOpt(driverOpt string) (string, string, error) {
|
|
||||||
parts := strings.SplitN(driverOpt, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return "", "", fmt.Errorf("invalid key value pair format in driver options")
|
|
||||||
}
|
|
||||||
key := strings.TrimSpace(strings.ToLower(parts[0]))
|
|
||||||
value := strings.TrimSpace(strings.ToLower(parts[1]))
|
|
||||||
return key, value, nil
|
|
||||||
}
|
|
524
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
524
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
|
@ -1,524 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math/big"
|
|
||||||
"net"
|
|
||||||
"path"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/filters"
|
|
||||||
units "github.com/docker/go-units"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
|
|
||||||
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListOpts holds a list of values and a validation function.
|
|
||||||
type ListOpts struct {
|
|
||||||
values *[]string
|
|
||||||
validator ValidatorFctType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListOpts creates a new ListOpts with the specified validator.
|
|
||||||
func NewListOpts(validator ValidatorFctType) ListOpts {
|
|
||||||
var values []string
|
|
||||||
return *NewListOptsRef(&values, validator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListOptsRef creates a new ListOpts with the specified values and validator.
|
|
||||||
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
|
|
||||||
return &ListOpts{
|
|
||||||
values: values,
|
|
||||||
validator: validator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts *ListOpts) String() string {
|
|
||||||
if len(*opts.values) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%v", *opts.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates if needed the input value and adds it to the
|
|
||||||
// internal slice.
|
|
||||||
func (opts *ListOpts) Set(value string) error {
|
|
||||||
if opts.validator != nil {
|
|
||||||
v, err := opts.validator(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
}
|
|
||||||
(*opts.values) = append((*opts.values), value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the specified element from the slice.
|
|
||||||
func (opts *ListOpts) Delete(key string) {
|
|
||||||
for i, k := range *opts.values {
|
|
||||||
if k == key {
|
|
||||||
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMap returns the content of values in a map in order to avoid
|
|
||||||
// duplicates.
|
|
||||||
func (opts *ListOpts) GetMap() map[string]struct{} {
|
|
||||||
ret := make(map[string]struct{})
|
|
||||||
for _, k := range *opts.values {
|
|
||||||
ret[k] = struct{}{}
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAll returns the values of slice.
|
|
||||||
func (opts *ListOpts) GetAll() []string {
|
|
||||||
return (*opts.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllOrEmpty returns the values of the slice
|
|
||||||
// or an empty slice when there are no values.
|
|
||||||
func (opts *ListOpts) GetAllOrEmpty() []string {
|
|
||||||
v := *opts.values
|
|
||||||
if v == nil {
|
|
||||||
return make([]string, 0)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get checks the existence of the specified key.
|
|
||||||
func (opts *ListOpts) Get(key string) bool {
|
|
||||||
for _, k := range *opts.values {
|
|
||||||
if k == key {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the amount of element in the slice.
|
|
||||||
func (opts *ListOpts) Len() int {
|
|
||||||
return len((*opts.values))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns a string name for this Option type
|
|
||||||
func (opts *ListOpts) Type() string {
|
|
||||||
return "list"
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithValidator returns the ListOpts with validator set.
|
|
||||||
func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
|
|
||||||
opts.validator = validator
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedOption is an interface that list and map options
|
|
||||||
// with names implement.
|
|
||||||
type NamedOption interface {
|
|
||||||
Name() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedListOpts is a ListOpts with a configuration name.
|
|
||||||
// This struct is useful to keep reference to the assigned
|
|
||||||
// field name in the internal configuration struct.
|
|
||||||
type NamedListOpts struct {
|
|
||||||
name string
|
|
||||||
ListOpts
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ NamedOption = &NamedListOpts{}
|
|
||||||
|
|
||||||
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
|
|
||||||
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
|
|
||||||
return &NamedListOpts{
|
|
||||||
name: name,
|
|
||||||
ListOpts: *NewListOptsRef(values, validator),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the NamedListOpts in the configuration.
|
|
||||||
func (o *NamedListOpts) Name() string {
|
|
||||||
return o.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapOpts holds a map of values and a validation function.
|
|
||||||
type MapOpts struct {
|
|
||||||
values map[string]string
|
|
||||||
validator ValidatorFctType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates if needed the input value and add it to the
|
|
||||||
// internal map, by splitting on '='.
|
|
||||||
func (opts *MapOpts) Set(value string) error {
|
|
||||||
if opts.validator != nil {
|
|
||||||
v, err := opts.validator(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
}
|
|
||||||
vals := strings.SplitN(value, "=", 2)
|
|
||||||
if len(vals) == 1 {
|
|
||||||
(opts.values)[vals[0]] = ""
|
|
||||||
} else {
|
|
||||||
(opts.values)[vals[0]] = vals[1]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAll returns the values of MapOpts as a map.
|
|
||||||
func (opts *MapOpts) GetAll() map[string]string {
|
|
||||||
return opts.values
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts *MapOpts) String() string {
|
|
||||||
return fmt.Sprintf("%v", opts.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns a string name for this Option type
|
|
||||||
func (opts *MapOpts) Type() string {
|
|
||||||
return "map"
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
|
|
||||||
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
|
||||||
if values == nil {
|
|
||||||
values = make(map[string]string)
|
|
||||||
}
|
|
||||||
return &MapOpts{
|
|
||||||
values: values,
|
|
||||||
validator: validator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NamedMapOpts is a MapOpts struct with a configuration name.
|
|
||||||
// This struct is useful to keep reference to the assigned
|
|
||||||
// field name in the internal configuration struct.
|
|
||||||
type NamedMapOpts struct {
|
|
||||||
name string
|
|
||||||
MapOpts
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ NamedOption = &NamedMapOpts{}
|
|
||||||
|
|
||||||
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
|
|
||||||
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
|
|
||||||
return &NamedMapOpts{
|
|
||||||
name: name,
|
|
||||||
MapOpts: *NewMapOpts(values, validator),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the NamedMapOpts in the configuration.
|
|
||||||
func (o *NamedMapOpts) Name() string {
|
|
||||||
return o.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
|
|
||||||
type ValidatorFctType func(val string) (string, error)
|
|
||||||
|
|
||||||
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
|
|
||||||
type ValidatorFctListType func(val string) ([]string, error)
|
|
||||||
|
|
||||||
// ValidateIPAddress validates an Ip address.
|
|
||||||
func ValidateIPAddress(val string) (string, error) {
|
|
||||||
var ip = net.ParseIP(strings.TrimSpace(val))
|
|
||||||
if ip != nil {
|
|
||||||
return ip.String(), nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("%s is not an ip address", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateMACAddress validates a MAC address.
|
|
||||||
func ValidateMACAddress(val string) (string, error) {
|
|
||||||
_, err := net.ParseMAC(strings.TrimSpace(val))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateDNSSearch validates domain for resolvconf search configuration.
|
|
||||||
// A zero length domain is represented by a dot (.).
|
|
||||||
func ValidateDNSSearch(val string) (string, error) {
|
|
||||||
if val = strings.Trim(val, " "); val == "." {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
return validateDomain(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateDomain(val string) (string, error) {
|
|
||||||
if alphaRegexp.FindString(val) == "" {
|
|
||||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
|
||||||
}
|
|
||||||
ns := domainRegexp.FindSubmatch([]byte(val))
|
|
||||||
if len(ns) > 0 && len(ns[1]) < 255 {
|
|
||||||
return string(ns[1]), nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLabel validates that the specified string is a valid label, and returns it.
|
|
||||||
// Labels are in the form on key=value.
|
|
||||||
func ValidateLabel(val string) (string, error) {
|
|
||||||
if strings.Count(val, "=") < 1 {
|
|
||||||
return "", fmt.Errorf("bad attribute format: %s", val)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateSysctl validates a sysctl and returns it.
|
|
||||||
func ValidateSysctl(val string) (string, error) {
|
|
||||||
validSysctlMap := map[string]bool{
|
|
||||||
"kernel.msgmax": true,
|
|
||||||
"kernel.msgmnb": true,
|
|
||||||
"kernel.msgmni": true,
|
|
||||||
"kernel.sem": true,
|
|
||||||
"kernel.shmall": true,
|
|
||||||
"kernel.shmmax": true,
|
|
||||||
"kernel.shmmni": true,
|
|
||||||
"kernel.shm_rmid_forced": true,
|
|
||||||
}
|
|
||||||
validSysctlPrefixes := []string{
|
|
||||||
"net.",
|
|
||||||
"fs.mqueue.",
|
|
||||||
}
|
|
||||||
arr := strings.Split(val, "=")
|
|
||||||
if len(arr) < 2 {
|
|
||||||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
|
||||||
}
|
|
||||||
if validSysctlMap[arr[0]] {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, vp := range validSysctlPrefixes {
|
|
||||||
if strings.HasPrefix(arr[0], vp) {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterOpt is a flag type for validating filters
|
|
||||||
type FilterOpt struct {
|
|
||||||
filter filters.Args
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFilterOpt returns a new FilterOpt
|
|
||||||
func NewFilterOpt() FilterOpt {
|
|
||||||
return FilterOpt{filter: filters.NewArgs()}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *FilterOpt) String() string {
|
|
||||||
repr, err := filters.ToParam(o.filter)
|
|
||||||
if err != nil {
|
|
||||||
return "invalid filters"
|
|
||||||
}
|
|
||||||
return repr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the value of the opt by parsing the command line value
|
|
||||||
func (o *FilterOpt) Set(value string) error {
|
|
||||||
var err error
|
|
||||||
o.filter, err = filters.ParseFlag(value, o.filter)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the option type
|
|
||||||
func (o *FilterOpt) Type() string {
|
|
||||||
return "filter"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value of this option
|
|
||||||
func (o *FilterOpt) Value() filters.Args {
|
|
||||||
return o.filter
|
|
||||||
}
|
|
||||||
|
|
||||||
// NanoCPUs is a type for fixed point fractional number.
|
|
||||||
type NanoCPUs int64
|
|
||||||
|
|
||||||
// String returns the string format of the number
|
|
||||||
func (c *NanoCPUs) String() string {
|
|
||||||
if *c == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return big.NewRat(c.Value(), 1e9).FloatString(3)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the value of the NanoCPU by passing a string
|
|
||||||
func (c *NanoCPUs) Set(value string) error {
|
|
||||||
cpus, err := ParseCPUs(value)
|
|
||||||
*c = NanoCPUs(cpus)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type
|
|
||||||
func (c *NanoCPUs) Type() string {
|
|
||||||
return "decimal"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value in int64
|
|
||||||
func (c *NanoCPUs) Value() int64 {
|
|
||||||
return int64(*c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseCPUs takes a string ratio and returns an integer value of nano cpus
|
|
||||||
func ParseCPUs(value string) (int64, error) {
|
|
||||||
cpu, ok := new(big.Rat).SetString(value)
|
|
||||||
if !ok {
|
|
||||||
return 0, fmt.Errorf("failed to parse %v as a rational number", value)
|
|
||||||
}
|
|
||||||
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
|
|
||||||
if !nano.IsInt() {
|
|
||||||
return 0, fmt.Errorf("value is too precise")
|
|
||||||
}
|
|
||||||
return nano.Num().Int64(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseLink parses and validates the specified string as a link format (name:alias)
|
|
||||||
func ParseLink(val string) (string, string, error) {
|
|
||||||
if val == "" {
|
|
||||||
return "", "", fmt.Errorf("empty string specified for links")
|
|
||||||
}
|
|
||||||
arr := strings.Split(val, ":")
|
|
||||||
if len(arr) > 2 {
|
|
||||||
return "", "", fmt.Errorf("bad format for links: %s", val)
|
|
||||||
}
|
|
||||||
if len(arr) == 1 {
|
|
||||||
return val, val, nil
|
|
||||||
}
|
|
||||||
// This is kept because we can actually get a HostConfig with links
|
|
||||||
// from an already created container and the format is not `foo:bar`
|
|
||||||
// but `/foo:/c1/bar`
|
|
||||||
if strings.HasPrefix(arr[0], "/") {
|
|
||||||
_, alias := path.Split(arr[1])
|
|
||||||
return arr[0][1:], alias, nil
|
|
||||||
}
|
|
||||||
return arr[0], arr[1], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
|
|
||||||
func ValidateLink(val string) (string, error) {
|
|
||||||
_, _, err := ParseLink(val)
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
|
|
||||||
type MemBytes int64
|
|
||||||
|
|
||||||
// String returns the string format of the human readable memory bytes
|
|
||||||
func (m *MemBytes) String() string {
|
|
||||||
// NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
|
|
||||||
// We return "0" in case value is 0 here so that the default value is hidden.
|
|
||||||
// (Sometimes "default 0 B" is actually misleading)
|
|
||||||
if m.Value() != 0 {
|
|
||||||
return units.BytesSize(float64(m.Value()))
|
|
||||||
}
|
|
||||||
return "0"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the value of the MemBytes by passing a string
|
|
||||||
func (m *MemBytes) Set(value string) error {
|
|
||||||
val, err := units.RAMInBytes(value)
|
|
||||||
*m = MemBytes(val)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type
|
|
||||||
func (m *MemBytes) Type() string {
|
|
||||||
return "bytes"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value in int64
|
|
||||||
func (m *MemBytes) Value() int64 {
|
|
||||||
return int64(*m)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON is the customized unmarshaler for MemBytes
|
|
||||||
func (m *MemBytes) UnmarshalJSON(s []byte) error {
|
|
||||||
if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
|
|
||||||
return fmt.Errorf("invalid size: %q", s)
|
|
||||||
}
|
|
||||||
val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
|
|
||||||
*m = MemBytes(val)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc).
|
|
||||||
// It differs from MemBytes in that -1 is valid and the default.
|
|
||||||
type MemSwapBytes int64
|
|
||||||
|
|
||||||
// Set sets the value of the MemSwapBytes by passing a string
|
|
||||||
func (m *MemSwapBytes) Set(value string) error {
|
|
||||||
if value == "-1" {
|
|
||||||
*m = MemSwapBytes(-1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
val, err := units.RAMInBytes(value)
|
|
||||||
*m = MemSwapBytes(val)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type
|
|
||||||
func (m *MemSwapBytes) Type() string {
|
|
||||||
return "bytes"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value in int64
|
|
||||||
func (m *MemSwapBytes) Value() int64 {
|
|
||||||
return int64(*m)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MemSwapBytes) String() string {
|
|
||||||
b := MemBytes(*m)
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON is the customized unmarshaler for MemSwapBytes
|
|
||||||
func (m *MemSwapBytes) UnmarshalJSON(s []byte) error {
|
|
||||||
b := MemBytes(*m)
|
|
||||||
return b.UnmarshalJSON(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NullableBool is a type for tri-state boolean options
|
|
||||||
type NullableBool struct {
|
|
||||||
b *bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type
|
|
||||||
func (n *NullableBool) Type() string {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the value in *bool
|
|
||||||
func (n *NullableBool) Value() *bool {
|
|
||||||
return n.b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the value. If value is empty string or "auto", nil is set.
|
|
||||||
// Otherwise true or false are set based on flag.Bool behavior.
|
|
||||||
func (n *NullableBool) Set(value string) error {
|
|
||||||
if value != "auto" && value != "" {
|
|
||||||
b, err := strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n.b = &b
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *NullableBool) String() string {
|
|
||||||
if n.b == nil {
|
|
||||||
return "auto"
|
|
||||||
}
|
|
||||||
return strconv.FormatBool(*n.b)
|
|
||||||
}
|
|
6
vendor/github.com/docker/cli/opts/opts_unix.go
generated
vendored
6
vendor/github.com/docker/cli/opts/opts_unix.go
generated
vendored
|
@ -1,6 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
|
|
||||||
const DefaultHTTPHost = "localhost"
|
|
56
vendor/github.com/docker/cli/opts/opts_windows.go
generated
vendored
56
vendor/github.com/docker/cli/opts/opts_windows.go
generated
vendored
|
@ -1,56 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
|
|
||||||
// @jhowardmsft, @swernli.
|
|
||||||
//
|
|
||||||
// On Windows, this mitigates a problem with the default options of running
|
|
||||||
// a docker client against a local docker daemon on TP5.
|
|
||||||
//
|
|
||||||
// What was found that if the default host is "localhost", even if the client
|
|
||||||
// (and daemon as this is local) is not physically on a network, and the DNS
|
|
||||||
// cache is flushed (ipconfig /flushdns), then the client will pause for
|
|
||||||
// exactly one second when connecting to the daemon for calls. For example
|
|
||||||
// using docker run windowsservercore cmd, the CLI will send a create followed
|
|
||||||
// by an attach. You see the delay between the attach finishing and the attach
|
|
||||||
// being seen by the daemon.
|
|
||||||
//
|
|
||||||
// Here's some daemon debug logs with additional debug spew put in. The
|
|
||||||
// AfterWriteJSON log is the very last thing the daemon does as part of the
|
|
||||||
// create call. The POST /attach is the second CLI call. Notice the second
|
|
||||||
// time gap.
|
|
||||||
//
|
|
||||||
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
|
|
||||||
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
|
|
||||||
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
|
|
||||||
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
|
|
||||||
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
|
|
||||||
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
|
|
||||||
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
|
|
||||||
// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
|
|
||||||
// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
|
|
||||||
// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
|
|
||||||
// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
|
|
||||||
// ... 1 second gap here....
|
|
||||||
// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
|
|
||||||
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
|
|
||||||
//
|
|
||||||
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
|
|
||||||
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
|
|
||||||
// the Windows networking stack is supposed to resolve "localhost" internally,
|
|
||||||
// without hitting DNS, or even reading the hosts file (which is why localhost
|
|
||||||
// is commented out in the hosts file on Windows).
|
|
||||||
//
|
|
||||||
// We have validated that working around this using the actual IPv4 localhost
|
|
||||||
// address does not cause the delay.
|
|
||||||
//
|
|
||||||
// This does not occur with the docker client built with 1.4.3 on the same
|
|
||||||
// Windows build, regardless of whether the daemon is built using 1.5.1
|
|
||||||
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
|
|
||||||
// on a cross-compiled Windows binary (from Linux).
|
|
||||||
//
|
|
||||||
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
|
|
||||||
// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
|
|
||||||
// explicitly.
|
|
||||||
|
|
||||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
|
|
||||||
const DefaultHTTPHost = "127.0.0.1"
|
|
99
vendor/github.com/docker/cli/opts/parse.go
generated
vendored
99
vendor/github.com/docker/cli/opts/parse.go
generated
vendored
|
@ -1,99 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/container"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReadKVStrings reads a file of line terminated key=value pairs, and overrides any keys
|
|
||||||
// present in the file with additional pairs specified in the override parameter
|
|
||||||
func ReadKVStrings(files []string, override []string) ([]string, error) {
|
|
||||||
return readKVStrings(files, override, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadKVEnvStrings reads a file of line terminated key=value pairs, and overrides any keys
|
|
||||||
// present in the file with additional pairs specified in the override parameter.
|
|
||||||
// If a key has no value, it will get the value from the environment.
|
|
||||||
func ReadKVEnvStrings(files []string, override []string) ([]string, error) {
|
|
||||||
return readKVStrings(files, override, os.Getenv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func readKVStrings(files []string, override []string, emptyFn func(string) string) ([]string, error) {
|
|
||||||
variables := []string{}
|
|
||||||
for _, ef := range files {
|
|
||||||
parsedVars, err := parseKeyValueFile(ef, emptyFn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
variables = append(variables, parsedVars...)
|
|
||||||
}
|
|
||||||
// parse the '-e' and '--env' after, to allow override
|
|
||||||
variables = append(variables, override...)
|
|
||||||
|
|
||||||
return variables, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"}
|
|
||||||
func ConvertKVStringsToMap(values []string) map[string]string {
|
|
||||||
result := make(map[string]string, len(values))
|
|
||||||
for _, value := range values {
|
|
||||||
kv := strings.SplitN(value, "=", 2)
|
|
||||||
if len(kv) == 1 {
|
|
||||||
result[kv[0]] = ""
|
|
||||||
} else {
|
|
||||||
result[kv[0]] = kv[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertKVStringsToMapWithNil converts ["key=value"] to {"key":"value"}
|
|
||||||
// but set unset keys to nil - meaning the ones with no "=" in them.
|
|
||||||
// We use this in cases where we need to distinguish between
|
|
||||||
// FOO= and FOO
|
|
||||||
// where the latter case just means FOO was mentioned but not given a value
|
|
||||||
func ConvertKVStringsToMapWithNil(values []string) map[string]*string {
|
|
||||||
result := make(map[string]*string, len(values))
|
|
||||||
for _, value := range values {
|
|
||||||
kv := strings.SplitN(value, "=", 2)
|
|
||||||
if len(kv) == 1 {
|
|
||||||
result[kv[0]] = nil
|
|
||||||
} else {
|
|
||||||
result[kv[0]] = &kv[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect
|
|
||||||
func ParseRestartPolicy(policy string) (container.RestartPolicy, error) {
|
|
||||||
p := container.RestartPolicy{}
|
|
||||||
|
|
||||||
if policy == "" {
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
parts := strings.Split(policy, ":")
|
|
||||||
|
|
||||||
if len(parts) > 2 {
|
|
||||||
return p, fmt.Errorf("invalid restart policy format")
|
|
||||||
}
|
|
||||||
if len(parts) == 2 {
|
|
||||||
count, err := strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return p, fmt.Errorf("maximum retry count must be an integer")
|
|
||||||
}
|
|
||||||
|
|
||||||
p.MaximumRetryCount = count
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Name = parts[0]
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
167
vendor/github.com/docker/cli/opts/port.go
generated
vendored
167
vendor/github.com/docker/cli/opts/port.go
generated
vendored
|
@ -1,167 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/swarm"
|
|
||||||
"github.com/docker/go-connections/nat"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
portOptTargetPort = "target"
|
|
||||||
portOptPublishedPort = "published"
|
|
||||||
portOptProtocol = "protocol"
|
|
||||||
portOptMode = "mode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PortOpt represents a port config in swarm mode.
|
|
||||||
type PortOpt struct {
|
|
||||||
ports []swarm.PortConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new port value
|
|
||||||
// nolint: gocyclo
|
|
||||||
func (p *PortOpt) Set(value string) error {
|
|
||||||
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if longSyntax {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig := swarm.PortConfig{}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid field %s", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
value := strings.ToLower(parts[1])
|
|
||||||
|
|
||||||
switch key {
|
|
||||||
case portOptProtocol:
|
|
||||||
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) && value != string(swarm.PortConfigProtocolSCTP) {
|
|
||||||
return fmt.Errorf("invalid protocol value %s", value)
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig.Protocol = swarm.PortConfigProtocol(value)
|
|
||||||
case portOptMode:
|
|
||||||
if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) {
|
|
||||||
return fmt.Errorf("invalid publish mode value %s", value)
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig.PublishMode = swarm.PortConfigPublishMode(value)
|
|
||||||
case portOptTargetPort:
|
|
||||||
tPort, err := strconv.ParseUint(value, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig.TargetPort = uint32(tPort)
|
|
||||||
case portOptPublishedPort:
|
|
||||||
pPort, err := strconv.ParseUint(value, 10, 16)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pConfig.PublishedPort = uint32(pPort)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid field key %s", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pConfig.TargetPort == 0 {
|
|
||||||
return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pConfig.PublishMode == "" {
|
|
||||||
pConfig.PublishMode = swarm.PortConfigPublishModeIngress
|
|
||||||
}
|
|
||||||
|
|
||||||
if pConfig.Protocol == "" {
|
|
||||||
pConfig.Protocol = swarm.PortConfigProtocolTCP
|
|
||||||
}
|
|
||||||
|
|
||||||
p.ports = append(p.ports, pConfig)
|
|
||||||
} else {
|
|
||||||
// short syntax
|
|
||||||
portConfigs := []swarm.PortConfig{}
|
|
||||||
ports, portBindingMap, err := nat.ParsePortSpecs([]string{value})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, portBindings := range portBindingMap {
|
|
||||||
for _, portBinding := range portBindings {
|
|
||||||
if portBinding.HostIP != "" {
|
|
||||||
return fmt.Errorf("hostip is not supported")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for port := range ports {
|
|
||||||
portConfig, err := ConvertPortToPortConfig(port, portBindingMap)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
portConfigs = append(portConfigs, portConfig...)
|
|
||||||
}
|
|
||||||
p.ports = append(p.ports, portConfigs...)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (p *PortOpt) Type() string {
|
|
||||||
return "port"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (p *PortOpt) String() string {
|
|
||||||
ports := []string{}
|
|
||||||
for _, port := range p.ports {
|
|
||||||
repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode)
|
|
||||||
ports = append(ports, repr)
|
|
||||||
}
|
|
||||||
return strings.Join(ports, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the ports
|
|
||||||
func (p *PortOpt) Value() []swarm.PortConfig {
|
|
||||||
return p.ports
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConvertPortToPortConfig converts ports to the swarm type
|
|
||||||
func ConvertPortToPortConfig(
|
|
||||||
port nat.Port,
|
|
||||||
portBindings map[nat.Port][]nat.PortBinding,
|
|
||||||
) ([]swarm.PortConfig, error) {
|
|
||||||
ports := []swarm.PortConfig{}
|
|
||||||
|
|
||||||
for _, binding := range portBindings[port] {
|
|
||||||
if binding.HostIP != "" && binding.HostIP != "0.0.0.0" {
|
|
||||||
logrus.Warnf("ignoring IP-address (%s:%s:%s) service will listen on '0.0.0.0'", binding.HostIP, binding.HostPort, port)
|
|
||||||
}
|
|
||||||
hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16)
|
|
||||||
if err != nil && binding.HostPort != "" {
|
|
||||||
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
|
|
||||||
}
|
|
||||||
ports = append(ports, swarm.PortConfig{
|
|
||||||
//TODO Name: ?
|
|
||||||
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
|
|
||||||
TargetPort: uint32(port.Int()),
|
|
||||||
PublishedPort: uint32(hostPort),
|
|
||||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return ports, nil
|
|
||||||
}
|
|
37
vendor/github.com/docker/cli/opts/quotedstring.go
generated
vendored
37
vendor/github.com/docker/cli/opts/quotedstring.go
generated
vendored
|
@ -1,37 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
// QuotedString is a string that may have extra quotes around the value. The
|
|
||||||
// quotes are stripped from the value.
|
|
||||||
type QuotedString struct {
|
|
||||||
value *string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets a new value
|
|
||||||
func (s *QuotedString) Set(val string) error {
|
|
||||||
*s.value = trimQuotes(val)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of the value
|
|
||||||
func (s *QuotedString) Type() string {
|
|
||||||
return "string"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *QuotedString) String() string {
|
|
||||||
return *s.value
|
|
||||||
}
|
|
||||||
|
|
||||||
func trimQuotes(value string) string {
|
|
||||||
lastIndex := len(value) - 1
|
|
||||||
for _, char := range []byte{'\'', '"'} {
|
|
||||||
if value[0] == char && value[lastIndex] == char {
|
|
||||||
return value[1:lastIndex]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewQuotedString returns a new quoted string option
|
|
||||||
func NewQuotedString(value *string) *QuotedString {
|
|
||||||
return &QuotedString{value: value}
|
|
||||||
}
|
|
79
vendor/github.com/docker/cli/opts/runtime.go
generated
vendored
79
vendor/github.com/docker/cli/opts/runtime.go
generated
vendored
|
@ -1,79 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RuntimeOpt defines a map of Runtimes
|
|
||||||
type RuntimeOpt struct {
|
|
||||||
name string
|
|
||||||
stockRuntimeName string
|
|
||||||
values *map[string]types.Runtime
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewNamedRuntimeOpt creates a new RuntimeOpt
|
|
||||||
func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt {
|
|
||||||
if ref == nil {
|
|
||||||
ref = &map[string]types.Runtime{}
|
|
||||||
}
|
|
||||||
return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns the name of the NamedListOpts in the configuration.
|
|
||||||
func (o *RuntimeOpt) Name() string {
|
|
||||||
return o.name
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates and updates the list of Runtimes
|
|
||||||
func (o *RuntimeOpt) Set(val string) error {
|
|
||||||
parts := strings.SplitN(val, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid runtime argument: %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
parts[0] = strings.TrimSpace(parts[0])
|
|
||||||
parts[1] = strings.TrimSpace(parts[1])
|
|
||||||
if parts[0] == "" || parts[1] == "" {
|
|
||||||
return fmt.Errorf("invalid runtime argument: %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
parts[0] = strings.ToLower(parts[0])
|
|
||||||
if parts[0] == o.stockRuntimeName {
|
|
||||||
return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := (*o.values)[parts[0]]; ok {
|
|
||||||
return fmt.Errorf("runtime '%s' was already defined", parts[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
(*o.values)[parts[0]] = types.Runtime{Path: parts[1]}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns Runtime values as a string.
|
|
||||||
func (o *RuntimeOpt) String() string {
|
|
||||||
var out []string
|
|
||||||
for k := range *o.values {
|
|
||||||
out = append(out, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%v", out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMap returns a map of Runtimes (name: path)
|
|
||||||
func (o *RuntimeOpt) GetMap() map[string]types.Runtime {
|
|
||||||
if o.values != nil {
|
|
||||||
return *o.values
|
|
||||||
}
|
|
||||||
|
|
||||||
return map[string]types.Runtime{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of the option
|
|
||||||
func (o *RuntimeOpt) Type() string {
|
|
||||||
return "runtime"
|
|
||||||
}
|
|
98
vendor/github.com/docker/cli/opts/secret.go
generated
vendored
98
vendor/github.com/docker/cli/opts/secret.go
generated
vendored
|
@ -1,98 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
swarmtypes "github.com/docker/docker/api/types/swarm"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SecretOpt is a Value type for parsing secrets
|
|
||||||
type SecretOpt struct {
|
|
||||||
values []*swarmtypes.SecretReference
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set a new secret value
|
|
||||||
func (o *SecretOpt) Set(value string) error {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
options := &swarmtypes.SecretReference{
|
|
||||||
File: &swarmtypes.SecretReferenceFileTarget{
|
|
||||||
UID: "0",
|
|
||||||
GID: "0",
|
|
||||||
Mode: 0444,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// support a simple syntax of --secret foo
|
|
||||||
if len(fields) == 1 {
|
|
||||||
options.File.Name = fields[0]
|
|
||||||
options.SecretName = fields[0]
|
|
||||||
o.values = append(o.values, options)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "source", "src":
|
|
||||||
options.SecretName = value
|
|
||||||
case "target":
|
|
||||||
options.File.Name = value
|
|
||||||
case "uid":
|
|
||||||
options.File.UID = value
|
|
||||||
case "gid":
|
|
||||||
options.File.GID = value
|
|
||||||
case "mode":
|
|
||||||
m, err := strconv.ParseUint(value, 0, 32)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("invalid mode specified: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
options.File.Mode = os.FileMode(m)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("invalid field in secret request: %s", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.SecretName == "" {
|
|
||||||
return fmt.Errorf("source is required")
|
|
||||||
}
|
|
||||||
|
|
||||||
o.values = append(o.values, options)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the type of this option
|
|
||||||
func (o *SecretOpt) Type() string {
|
|
||||||
return "secret"
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string repr of this option
|
|
||||||
func (o *SecretOpt) String() string {
|
|
||||||
secrets := []string{}
|
|
||||||
for _, secret := range o.values {
|
|
||||||
repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name)
|
|
||||||
secrets = append(secrets, repr)
|
|
||||||
}
|
|
||||||
return strings.Join(secrets, ", ")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Value returns the secret requests
|
|
||||||
func (o *SecretOpt) Value() []*swarmtypes.SecretReference {
|
|
||||||
return o.values
|
|
||||||
}
|
|
108
vendor/github.com/docker/cli/opts/throttledevice.go
generated
vendored
108
vendor/github.com/docker/cli/opts/throttledevice.go
generated
vendored
|
@ -1,108 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/blkiodev"
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error.
|
|
||||||
type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error)
|
|
||||||
|
|
||||||
// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format.
|
|
||||||
func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
|
|
||||||
split := strings.SplitN(val, ":", 2)
|
|
||||||
if len(split) != 2 {
|
|
||||||
return nil, fmt.Errorf("bad format: %s", val)
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(split[0], "/dev/") {
|
|
||||||
return nil, fmt.Errorf("bad format for device path: %s", val)
|
|
||||||
}
|
|
||||||
rate, err := units.RAMInBytes(split[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
|
|
||||||
}
|
|
||||||
if rate < 0 {
|
|
||||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &blkiodev.ThrottleDevice{
|
|
||||||
Path: split[0],
|
|
||||||
Rate: uint64(rate),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format.
|
|
||||||
func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
|
|
||||||
split := strings.SplitN(val, ":", 2)
|
|
||||||
if len(split) != 2 {
|
|
||||||
return nil, fmt.Errorf("bad format: %s", val)
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(split[0], "/dev/") {
|
|
||||||
return nil, fmt.Errorf("bad format for device path: %s", val)
|
|
||||||
}
|
|
||||||
rate, err := strconv.ParseUint(split[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
|
|
||||||
}
|
|
||||||
if rate < 0 {
|
|
||||||
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &blkiodev.ThrottleDevice{Path: split[0], Rate: rate}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ThrottledeviceOpt defines a map of ThrottleDevices
|
|
||||||
type ThrottledeviceOpt struct {
|
|
||||||
values []*blkiodev.ThrottleDevice
|
|
||||||
validator ValidatorThrottleFctType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewThrottledeviceOpt creates a new ThrottledeviceOpt
|
|
||||||
func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt {
|
|
||||||
values := []*blkiodev.ThrottleDevice{}
|
|
||||||
return ThrottledeviceOpt{
|
|
||||||
values: values,
|
|
||||||
validator: validator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt
|
|
||||||
func (opt *ThrottledeviceOpt) Set(val string) error {
|
|
||||||
var value *blkiodev.ThrottleDevice
|
|
||||||
if opt.validator != nil {
|
|
||||||
v, err := opt.validator(val)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
}
|
|
||||||
(opt.values) = append((opt.values), value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns ThrottledeviceOpt values as a string.
|
|
||||||
func (opt *ThrottledeviceOpt) String() string {
|
|
||||||
var out []string
|
|
||||||
for _, v := range opt.values {
|
|
||||||
out = append(out, v.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%v", out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetList returns a slice of pointers to ThrottleDevices.
|
|
||||||
func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice {
|
|
||||||
var throttledevice []*blkiodev.ThrottleDevice
|
|
||||||
throttledevice = append(throttledevice, opt.values...)
|
|
||||||
|
|
||||||
return throttledevice
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the option type
|
|
||||||
func (opt *ThrottledeviceOpt) Type() string {
|
|
||||||
return "list"
|
|
||||||
}
|
|
57
vendor/github.com/docker/cli/opts/ulimit.go
generated
vendored
57
vendor/github.com/docker/cli/opts/ulimit.go
generated
vendored
|
@ -1,57 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/docker/go-units"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UlimitOpt defines a map of Ulimits
|
|
||||||
type UlimitOpt struct {
|
|
||||||
values *map[string]*units.Ulimit
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUlimitOpt creates a new UlimitOpt
|
|
||||||
func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt {
|
|
||||||
if ref == nil {
|
|
||||||
ref = &map[string]*units.Ulimit{}
|
|
||||||
}
|
|
||||||
return &UlimitOpt{ref}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates a Ulimit and sets its name as a key in UlimitOpt
|
|
||||||
func (o *UlimitOpt) Set(val string) error {
|
|
||||||
l, err := units.ParseUlimit(val)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
(*o.values)[l.Name] = l
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns Ulimit values as a string.
|
|
||||||
func (o *UlimitOpt) String() string {
|
|
||||||
var out []string
|
|
||||||
for _, v := range *o.values {
|
|
||||||
out = append(out, v.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%v", out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetList returns a slice of pointers to Ulimits.
|
|
||||||
func (o *UlimitOpt) GetList() []*units.Ulimit {
|
|
||||||
var ulimits []*units.Ulimit
|
|
||||||
for _, v := range *o.values {
|
|
||||||
ulimits = append(ulimits, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ulimits
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the option type
|
|
||||||
func (o *UlimitOpt) Type() string {
|
|
||||||
return "ulimit"
|
|
||||||
}
|
|
84
vendor/github.com/docker/cli/opts/weightdevice.go
generated
vendored
84
vendor/github.com/docker/cli/opts/weightdevice.go
generated
vendored
|
@ -1,84 +0,0 @@
|
||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types/blkiodev"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error.
|
|
||||||
type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error)
|
|
||||||
|
|
||||||
// ValidateWeightDevice validates that the specified string has a valid device-weight format.
|
|
||||||
func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) {
|
|
||||||
split := strings.SplitN(val, ":", 2)
|
|
||||||
if len(split) != 2 {
|
|
||||||
return nil, fmt.Errorf("bad format: %s", val)
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(split[0], "/dev/") {
|
|
||||||
return nil, fmt.Errorf("bad format for device path: %s", val)
|
|
||||||
}
|
|
||||||
weight, err := strconv.ParseUint(split[1], 10, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid weight for device: %s", val)
|
|
||||||
}
|
|
||||||
if weight > 0 && (weight < 10 || weight > 1000) {
|
|
||||||
return nil, fmt.Errorf("invalid weight for device: %s", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &blkiodev.WeightDevice{
|
|
||||||
Path: split[0],
|
|
||||||
Weight: uint16(weight),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WeightdeviceOpt defines a map of WeightDevices
|
|
||||||
type WeightdeviceOpt struct {
|
|
||||||
values []*blkiodev.WeightDevice
|
|
||||||
validator ValidatorWeightFctType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWeightdeviceOpt creates a new WeightdeviceOpt
|
|
||||||
func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt {
|
|
||||||
values := []*blkiodev.WeightDevice{}
|
|
||||||
return WeightdeviceOpt{
|
|
||||||
values: values,
|
|
||||||
validator: validator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt
|
|
||||||
func (opt *WeightdeviceOpt) Set(val string) error {
|
|
||||||
var value *blkiodev.WeightDevice
|
|
||||||
if opt.validator != nil {
|
|
||||||
v, err := opt.validator(val)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
}
|
|
||||||
(opt.values) = append((opt.values), value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns WeightdeviceOpt values as a string.
|
|
||||||
func (opt *WeightdeviceOpt) String() string {
|
|
||||||
var out []string
|
|
||||||
for _, v := range opt.values {
|
|
||||||
out = append(out, v.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("%v", out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetList returns a slice of pointers to WeightDevices.
|
|
||||||
func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice {
|
|
||||||
return opt.values
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns the option type
|
|
||||||
func (opt *WeightdeviceOpt) Type() string {
|
|
||||||
return "list"
|
|
||||||
}
|
|
2
vendor/github.com/docker/docker/LICENSE
generated
vendored
2
vendor/github.com/docker/docker/LICENSE
generated
vendored
|
@ -176,7 +176,7 @@
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
Copyright 2013-2017 Docker, Inc.
|
Copyright 2013-2018 Docker, Inc.
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
you may not use this file except in compliance with the License.
|
you may not use this file except in compliance with the License.
|
||||||
|
|
2
vendor/github.com/docker/docker/NOTICE
generated
vendored
2
vendor/github.com/docker/docker/NOTICE
generated
vendored
|
@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
|
||||||
|
|
||||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||||
|
|
||||||
This product contains software (https://github.com/kr/pty) developed
|
This product contains software (https://github.com/creack/pty) developed
|
||||||
by Keith Rarick, licensed under the MIT License.
|
by Keith Rarick, licensed under the MIT License.
|
||||||
|
|
||||||
The following is courtesy of our legal counsel:
|
The following is courtesy of our legal counsel:
|
||||||
|
|
11
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
11
vendor/github.com/docker/docker/api/types/client.go
generated
vendored
|
@ -50,7 +50,7 @@ type ContainerCommitOptions struct {
|
||||||
|
|
||||||
// ContainerExecInspect holds information returned by exec inspect.
|
// ContainerExecInspect holds information returned by exec inspect.
|
||||||
type ContainerExecInspect struct {
|
type ContainerExecInspect struct {
|
||||||
ExecID string
|
ExecID string `json:"ID"`
|
||||||
ContainerID string
|
ContainerID string
|
||||||
Running bool
|
Running bool
|
||||||
ExitCode int
|
ExitCode int
|
||||||
|
@ -187,6 +187,15 @@ type ImageBuildOptions struct {
|
||||||
// build request. The same identifier can be used to gracefully cancel the
|
// build request. The same identifier can be used to gracefully cancel the
|
||||||
// build with the cancel request.
|
// build with the cancel request.
|
||||||
BuildID string
|
BuildID string
|
||||||
|
// Outputs defines configurations for exporting build results. Only supported
|
||||||
|
// in BuildKit mode
|
||||||
|
Outputs []ImageBuildOutput
|
||||||
|
}
|
||||||
|
|
||||||
|
// ImageBuildOutput defines configuration for exporting a build result
|
||||||
|
type ImageBuildOutput struct {
|
||||||
|
Type string
|
||||||
|
Attrs map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuilderVersion sets the version of underlying builder to use
|
// BuilderVersion sets the version of underlying builder to use
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/container/config.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/container/config.go
generated
vendored
|
@ -54,7 +54,7 @@ type Config struct {
|
||||||
Env []string // List of environment variable to set in the container
|
Env []string // List of environment variable to set in the container
|
||||||
Cmd strslice.StrSlice // Command to run when starting the container
|
Cmd strslice.StrSlice // Command to run when starting the container
|
||||||
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
||||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
|
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific).
|
||||||
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
|
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
|
||||||
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
Volumes map[string]struct{} // List of volumes (mounts) used for the container
|
||||||
WorkingDir string // Current directory (PWD) in the command will be launched
|
WorkingDir string // Current directory (PWD) in the command will be launched
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/container/container_changes.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/container/container_changes.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
package container
|
package container // import "github.com/docker/docker/api/types/container"
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// DO NOT EDIT THIS FILE
|
// DO NOT EDIT THIS FILE
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/container/container_create.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/container/container_create.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
package container
|
package container // import "github.com/docker/docker/api/types/container"
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// DO NOT EDIT THIS FILE
|
// DO NOT EDIT THIS FILE
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/container/container_top.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/container/container_top.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
package container
|
package container // import "github.com/docker/docker/api/types/container"
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// DO NOT EDIT THIS FILE
|
// DO NOT EDIT THIS FILE
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/container/container_update.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/container/container_update.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
package container
|
package container // import "github.com/docker/docker/api/types/container"
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// DO NOT EDIT THIS FILE
|
// DO NOT EDIT THIS FILE
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/container/container_wait.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/container/container_wait.go
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
package container
|
package container // import "github.com/docker/docker/api/types/container"
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// DO NOT EDIT THIS FILE
|
// DO NOT EDIT THIS FILE
|
||||||
|
|
22
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
22
vendor/github.com/docker/docker/api/types/container/host_config.go
generated
vendored
|
@ -244,6 +244,16 @@ func (n PidMode) Container() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeviceRequest represents a request for devices from a device driver.
|
||||||
|
// Used by GPU device drivers.
|
||||||
|
type DeviceRequest struct {
|
||||||
|
Driver string // Name of device driver
|
||||||
|
Count int // Number of devices to request (-1 = All)
|
||||||
|
DeviceIDs []string // List of device IDs as recognizable by the device driver
|
||||||
|
Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu")
|
||||||
|
Options map[string]string // Options to pass onto the device driver
|
||||||
|
}
|
||||||
|
|
||||||
// DeviceMapping represents the device mapping between the host and the container.
|
// DeviceMapping represents the device mapping between the host and the container.
|
||||||
type DeviceMapping struct {
|
type DeviceMapping struct {
|
||||||
PathOnHost string
|
PathOnHost string
|
||||||
|
@ -327,13 +337,14 @@ type Resources struct {
|
||||||
CpusetMems string // CpusetMems 0-2, 0,1
|
CpusetMems string // CpusetMems 0-2, 0,1
|
||||||
Devices []DeviceMapping // List of devices to map inside the container
|
Devices []DeviceMapping // List of devices to map inside the container
|
||||||
DeviceCgroupRules []string // List of rule to be added to the device cgroup
|
DeviceCgroupRules []string // List of rule to be added to the device cgroup
|
||||||
DiskQuota int64 // Disk limit (in bytes)
|
DeviceRequests []DeviceRequest // List of device requests for device drivers
|
||||||
KernelMemory int64 // Kernel memory limit (in bytes)
|
KernelMemory int64 // Kernel memory limit (in bytes)
|
||||||
|
KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes)
|
||||||
MemoryReservation int64 // Memory soft limit (in bytes)
|
MemoryReservation int64 // Memory soft limit (in bytes)
|
||||||
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
|
||||||
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
|
||||||
OomKillDisable *bool // Whether to disable OOM Killer or not
|
OomKillDisable *bool // Whether to disable OOM Killer or not
|
||||||
PidsLimit int64 // Setting pids limit for a container
|
PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change.
|
||||||
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
Ulimits []*units.Ulimit // List of ulimits to be set in the container
|
||||||
|
|
||||||
// Applicable to Windows
|
// Applicable to Windows
|
||||||
|
@ -369,9 +380,10 @@ type HostConfig struct {
|
||||||
// Applicable to UNIX platforms
|
// Applicable to UNIX platforms
|
||||||
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
|
||||||
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
|
||||||
DNS []string `json:"Dns"` // List of DNS server to lookup
|
Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set)
|
||||||
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
DNS []string `json:"Dns"` // List of DNS server to lookup
|
||||||
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
|
||||||
|
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
|
||||||
ExtraHosts []string // List of extra hosts
|
ExtraHosts []string // List of extra hosts
|
||||||
GroupAdd []string // List of additional groups that the container process will run as
|
GroupAdd []string // List of additional groups that the container process will run as
|
||||||
IpcMode IpcMode // IPC namespace to use for the container
|
IpcMode IpcMode // IPC namespace to use for the container
|
||||||
|
|
70
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
70
vendor/github.com/docker/docker/api/types/filters/parse.go
generated
vendored
|
@ -5,7 +5,6 @@ package filters // import "github.com/docker/docker/api/types/filters"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -37,39 +36,13 @@ func NewArgs(initialArgs ...KeyValuePair) Args {
|
||||||
return args
|
return args
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseFlag parses a key=value string and adds it to an Args.
|
// Keys returns all the keys in list of Args
|
||||||
//
|
func (args Args) Keys() []string {
|
||||||
// Deprecated: Use Args.Add()
|
keys := make([]string, 0, len(args.fields))
|
||||||
func ParseFlag(arg string, prev Args) (Args, error) {
|
for k := range args.fields {
|
||||||
filters := prev
|
keys = append(keys, k)
|
||||||
if len(arg) == 0 {
|
|
||||||
return filters, nil
|
|
||||||
}
|
}
|
||||||
|
return keys
|
||||||
if !strings.Contains(arg, "=") {
|
|
||||||
return filters, ErrBadFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
f := strings.SplitN(arg, "=", 2)
|
|
||||||
|
|
||||||
name := strings.ToLower(strings.TrimSpace(f[0]))
|
|
||||||
value := strings.TrimSpace(f[1])
|
|
||||||
|
|
||||||
filters.Add(name, value)
|
|
||||||
|
|
||||||
return filters, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrBadFormat is an error returned when a filter is not in the form key=value
|
|
||||||
//
|
|
||||||
// Deprecated: this error will be removed in a future version
|
|
||||||
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
|
|
||||||
|
|
||||||
// ToParam encodes the Args as args JSON encoded string
|
|
||||||
//
|
|
||||||
// Deprecated: use ToJSON
|
|
||||||
func ToParam(a Args) (string, error) {
|
|
||||||
return ToJSON(a)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON returns a JSON byte representation of the Args
|
// MarshalJSON returns a JSON byte representation of the Args
|
||||||
|
@ -107,13 +80,6 @@ func ToParamWithVersion(version string, a Args) (string, error) {
|
||||||
return ToJSON(a)
|
return ToJSON(a)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromParam decodes a JSON encoded string into Args
|
|
||||||
//
|
|
||||||
// Deprecated: use FromJSON
|
|
||||||
func FromParam(p string) (Args, error) {
|
|
||||||
return FromJSON(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FromJSON decodes a JSON encoded string into Args
|
// FromJSON decodes a JSON encoded string into Args
|
||||||
func FromJSON(p string) (Args, error) {
|
func FromJSON(p string) (Args, error) {
|
||||||
args := NewArgs()
|
args := NewArgs()
|
||||||
|
@ -275,14 +241,6 @@ func (args Args) FuzzyMatch(key, source string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Include returns true if the key exists in the mapping
|
|
||||||
//
|
|
||||||
// Deprecated: use Contains
|
|
||||||
func (args Args) Include(field string) bool {
|
|
||||||
_, ok := args.fields[field]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains returns true if the key exists in the mapping
|
// Contains returns true if the key exists in the mapping
|
||||||
func (args Args) Contains(field string) bool {
|
func (args Args) Contains(field string) bool {
|
||||||
_, ok := args.fields[field]
|
_, ok := args.fields[field]
|
||||||
|
@ -323,6 +281,22 @@ func (args Args) WalkValues(field string, op func(value string) error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clone returns a copy of args.
|
||||||
|
func (args Args) Clone() (newArgs Args) {
|
||||||
|
newArgs.fields = make(map[string]map[string]bool, len(args.fields))
|
||||||
|
for k, m := range args.fields {
|
||||||
|
var mm map[string]bool
|
||||||
|
if m != nil {
|
||||||
|
mm = make(map[string]bool, len(m))
|
||||||
|
for kk, v := range m {
|
||||||
|
mm[kk] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
newArgs.fields[k] = mm
|
||||||
|
}
|
||||||
|
return newArgs
|
||||||
|
}
|
||||||
|
|
||||||
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
|
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
|
||||||
m := map[string]map[string]bool{}
|
m := map[string]map[string]bool{}
|
||||||
for k, v := range d {
|
for k, v := range d {
|
||||||
|
|
3
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
3
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
|
@ -79,7 +79,8 @@ const (
|
||||||
|
|
||||||
// BindOptions defines options specific to mounts of type "bind".
|
// BindOptions defines options specific to mounts of type "bind".
|
||||||
type BindOptions struct {
|
type BindOptions struct {
|
||||||
Propagation Propagation `json:",omitempty"`
|
Propagation Propagation `json:",omitempty"`
|
||||||
|
NonRecursive bool `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// VolumeOptions represents the options for a mount of type volume.
|
// VolumeOptions represents the options for a mount of type volume.
|
||||||
|
|
13
vendor/github.com/docker/docker/api/types/network/network.go
generated
vendored
13
vendor/github.com/docker/docker/api/types/network/network.go
generated
vendored
|
@ -112,12 +112,13 @@ type ConfigReference struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
var acceptedFilters = map[string]bool{
|
var acceptedFilters = map[string]bool{
|
||||||
"driver": true,
|
"dangling": true,
|
||||||
"type": true,
|
"driver": true,
|
||||||
"name": true,
|
"id": true,
|
||||||
"id": true,
|
"label": true,
|
||||||
"label": true,
|
"name": true,
|
||||||
"scope": true,
|
"scope": true,
|
||||||
|
"type": true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateFilters validates the list of filter args with the available filters.
|
// ValidateFilters validates the list of filter args with the available filters.
|
||||||
|
|
5
vendor/github.com/docker/docker/api/types/seccomp.go
generated
vendored
5
vendor/github.com/docker/docker/api/types/seccomp.go
generated
vendored
|
@ -77,8 +77,9 @@ type Arg struct {
|
||||||
|
|
||||||
// Filter is used to conditionally apply Seccomp rules
|
// Filter is used to conditionally apply Seccomp rules
|
||||||
type Filter struct {
|
type Filter struct {
|
||||||
Caps []string `json:"caps,omitempty"`
|
Caps []string `json:"caps,omitempty"`
|
||||||
Arches []string `json:"arches,omitempty"`
|
Arches []string `json:"arches,omitempty"`
|
||||||
|
MinKernel string `json:"minKernel,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Syscall is used to match a group of syscalls in Seccomp
|
// Syscall is used to match a group of syscalls in Seccomp
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/stats.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/stats.go
generated
vendored
|
@ -129,7 +129,7 @@ type NetworkStats struct {
|
||||||
TxBytes uint64 `json:"tx_bytes"`
|
TxBytes uint64 `json:"tx_bytes"`
|
||||||
// Packets sent. Windows and Linux.
|
// Packets sent. Windows and Linux.
|
||||||
TxPackets uint64 `json:"tx_packets"`
|
TxPackets uint64 `json:"tx_packets"`
|
||||||
// Sent errors. Not used on Windows. Note that we dont `omitempty` this
|
// Sent errors. Not used on Windows. Note that we don't `omitempty` this
|
||||||
// field as it is expected in the >=v1.21 API stats structure.
|
// field as it is expected in the >=v1.21 API stats structure.
|
||||||
TxErrors uint64 `json:"tx_errors"`
|
TxErrors uint64 `json:"tx_errors"`
|
||||||
// Outgoing packets dropped. Windows and Linux.
|
// Outgoing packets dropped. Windows and Linux.
|
||||||
|
|
7
vendor/github.com/docker/docker/api/types/swarm/config.go
generated
vendored
7
vendor/github.com/docker/docker/api/types/swarm/config.go
generated
vendored
|
@ -27,9 +27,14 @@ type ConfigReferenceFileTarget struct {
|
||||||
Mode os.FileMode
|
Mode os.FileMode
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ConfigReferenceRuntimeTarget is a target for a config specifying that it
|
||||||
|
// isn't mounted into the container but instead has some other purpose.
|
||||||
|
type ConfigReferenceRuntimeTarget struct{}
|
||||||
|
|
||||||
// ConfigReference is a reference to a config in swarm
|
// ConfigReference is a reference to a config in swarm
|
||||||
type ConfigReference struct {
|
type ConfigReference struct {
|
||||||
File *ConfigReferenceFileTarget
|
File *ConfigReferenceFileTarget `json:",omitempty"`
|
||||||
|
Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"`
|
||||||
ConfigID string
|
ConfigID string
|
||||||
ConfigName string
|
ConfigName string
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/swarm/container.go
generated
vendored
|
@ -33,6 +33,7 @@ type SELinuxContext struct {
|
||||||
|
|
||||||
// CredentialSpec for managed service account (Windows only)
|
// CredentialSpec for managed service account (Windows only)
|
||||||
type CredentialSpec struct {
|
type CredentialSpec struct {
|
||||||
|
Config string
|
||||||
File string
|
File string
|
||||||
Registry string
|
Registry string
|
||||||
}
|
}
|
||||||
|
@ -71,4 +72,5 @@ type ContainerSpec struct {
|
||||||
Secrets []*SecretReference `json:",omitempty"`
|
Secrets []*SecretReference `json:",omitempty"`
|
||||||
Configs []*ConfigReference `json:",omitempty"`
|
Configs []*ConfigReference `json:",omitempty"`
|
||||||
Isolation container.Isolation `json:",omitempty"`
|
Isolation container.Isolation `json:",omitempty"`
|
||||||
|
Sysctls map[string]string `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
4
vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
4
vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
|
@ -14,6 +14,7 @@ type ClusterInfo struct {
|
||||||
RootRotationInProgress bool
|
RootRotationInProgress bool
|
||||||
DefaultAddrPool []string
|
DefaultAddrPool []string
|
||||||
SubnetSize uint32
|
SubnetSize uint32
|
||||||
|
DataPathPort uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// Swarm represents a swarm.
|
// Swarm represents a swarm.
|
||||||
|
@ -153,6 +154,7 @@ type InitRequest struct {
|
||||||
ListenAddr string
|
ListenAddr string
|
||||||
AdvertiseAddr string
|
AdvertiseAddr string
|
||||||
DataPathAddr string
|
DataPathAddr string
|
||||||
|
DataPathPort uint32
|
||||||
ForceNewCluster bool
|
ForceNewCluster bool
|
||||||
Spec Spec
|
Spec Spec
|
||||||
AutoLockManagers bool
|
AutoLockManagers bool
|
||||||
|
@ -207,6 +209,8 @@ type Info struct {
|
||||||
Managers int `json:",omitempty"`
|
Managers int `json:",omitempty"`
|
||||||
|
|
||||||
Cluster *ClusterInfo `json:",omitempty"`
|
Cluster *ClusterInfo `json:",omitempty"`
|
||||||
|
|
||||||
|
Warnings []string `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer represents a peer.
|
// Peer represents a peer.
|
||||||
|
|
1
vendor/github.com/docker/docker/api/types/swarm/task.go
generated
vendored
1
vendor/github.com/docker/docker/api/types/swarm/task.go
generated
vendored
|
@ -127,6 +127,7 @@ type ResourceRequirements struct {
|
||||||
type Placement struct {
|
type Placement struct {
|
||||||
Constraints []string `json:",omitempty"`
|
Constraints []string `json:",omitempty"`
|
||||||
Preferences []PlacementPreference `json:",omitempty"`
|
Preferences []PlacementPreference `json:",omitempty"`
|
||||||
|
MaxReplicas uint64 `json:",omitempty"`
|
||||||
|
|
||||||
// Platforms stores all the platforms that the image can run on.
|
// Platforms stores all the platforms that the image can run on.
|
||||||
// This field is used in the platform filter for scheduling. If empty,
|
// This field is used in the platform filter for scheduling. If empty,
|
||||||
|
|
2
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
|
@ -158,10 +158,12 @@ type Info struct {
|
||||||
MemoryLimit bool
|
MemoryLimit bool
|
||||||
SwapLimit bool
|
SwapLimit bool
|
||||||
KernelMemory bool
|
KernelMemory bool
|
||||||
|
KernelMemoryTCP bool
|
||||||
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
|
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
|
||||||
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
CPUCfsQuota bool `json:"CpuCfsQuota"`
|
||||||
CPUShares bool
|
CPUShares bool
|
||||||
CPUSet bool
|
CPUSet bool
|
||||||
|
PidsLimit bool
|
||||||
IPv4Forwarding bool
|
IPv4Forwarding bool
|
||||||
BridgeNfIptables bool
|
BridgeNfIptables bool
|
||||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
|
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
|
||||||
|
|
139
vendor/github.com/docker/docker/daemon/caps/utils.go
generated
vendored
139
vendor/github.com/docker/docker/daemon/caps/utils.go
generated
vendored
|
@ -1,139 +0,0 @@
|
||||||
package caps // import "github.com/docker/docker/daemon/caps"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/syndtr/gocapability/capability"
|
|
||||||
)
|
|
||||||
|
|
||||||
var capabilityList Capabilities
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
last := capability.CAP_LAST_CAP
|
|
||||||
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
|
|
||||||
if last == capability.Cap(63) {
|
|
||||||
last = capability.CAP_BLOCK_SUSPEND
|
|
||||||
}
|
|
||||||
for _, cap := range capability.List() {
|
|
||||||
if cap > last {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
capabilityList = append(capabilityList,
|
|
||||||
&CapabilityMapping{
|
|
||||||
Key: "CAP_" + strings.ToUpper(cap.String()),
|
|
||||||
Value: cap,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type (
|
|
||||||
// CapabilityMapping maps linux capability name to its value of capability.Cap type
|
|
||||||
// Capabilities is one of the security systems in Linux Security Module (LSM)
|
|
||||||
// framework provided by the kernel.
|
|
||||||
// For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html
|
|
||||||
CapabilityMapping struct {
|
|
||||||
Key string `json:"key,omitempty"`
|
|
||||||
Value capability.Cap `json:"value,omitempty"`
|
|
||||||
}
|
|
||||||
// Capabilities contains all CapabilityMapping
|
|
||||||
Capabilities []*CapabilityMapping
|
|
||||||
)
|
|
||||||
|
|
||||||
// String returns <key> of CapabilityMapping
|
|
||||||
func (c *CapabilityMapping) String() string {
|
|
||||||
return c.Key
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetCapability returns CapabilityMapping which contains specific key
|
|
||||||
func GetCapability(key string) *CapabilityMapping {
|
|
||||||
for _, capp := range capabilityList {
|
|
||||||
if capp.Key == key {
|
|
||||||
cpy := *capp
|
|
||||||
return &cpy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllCapabilities returns all of the capabilities
|
|
||||||
func GetAllCapabilities() []string {
|
|
||||||
output := make([]string, len(capabilityList))
|
|
||||||
for i, capability := range capabilityList {
|
|
||||||
output[i] = capability.String()
|
|
||||||
}
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
// inSlice tests whether a string is contained in a slice of strings or not.
|
|
||||||
// Comparison is case insensitive
|
|
||||||
func inSlice(slice []string, s string) bool {
|
|
||||||
for _, ss := range slice {
|
|
||||||
if strings.ToLower(s) == strings.ToLower(ss) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// TweakCapabilities can tweak capabilities by adding or dropping capabilities
|
|
||||||
// based on the basics capabilities.
|
|
||||||
func TweakCapabilities(basics, adds, drops []string) ([]string, error) {
|
|
||||||
var (
|
|
||||||
newCaps []string
|
|
||||||
allCaps = GetAllCapabilities()
|
|
||||||
)
|
|
||||||
|
|
||||||
// FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix
|
|
||||||
// Currently they are mixed in here. We should do conversion in one place.
|
|
||||||
|
|
||||||
// look for invalid cap in the drop list
|
|
||||||
for _, cap := range drops {
|
|
||||||
if strings.ToLower(cap) == "all" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if !inSlice(allCaps, "CAP_"+cap) {
|
|
||||||
return nil, fmt.Errorf("Unknown capability drop: %q", cap)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle --cap-add=all
|
|
||||||
if inSlice(adds, "all") {
|
|
||||||
basics = allCaps
|
|
||||||
}
|
|
||||||
|
|
||||||
if !inSlice(drops, "all") {
|
|
||||||
for _, cap := range basics {
|
|
||||||
// skip `all` already handled above
|
|
||||||
if strings.ToLower(cap) == "all" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// if we don't drop `all`, add back all the non-dropped caps
|
|
||||||
if !inSlice(drops, cap[4:]) {
|
|
||||||
newCaps = append(newCaps, strings.ToUpper(cap))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cap := range adds {
|
|
||||||
// skip `all` already handled above
|
|
||||||
if strings.ToLower(cap) == "all" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
cap = "CAP_" + cap
|
|
||||||
|
|
||||||
if !inSlice(allCaps, cap) {
|
|
||||||
return nil, fmt.Errorf("Unknown capability to add: %q", cap)
|
|
||||||
}
|
|
||||||
|
|
||||||
// add cap if not already in the list
|
|
||||||
if !inSlice(newCaps, cap) {
|
|
||||||
newCaps = append(newCaps, strings.ToUpper(cap))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return newCaps, nil
|
|
||||||
}
|
|
169
vendor/github.com/docker/docker/oci/caps/utils.go
generated
vendored
Normal file
169
vendor/github.com/docker/docker/oci/caps/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,169 @@
|
||||||
|
package caps // import "github.com/docker/docker/oci/caps"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/errdefs"
|
||||||
|
"github.com/syndtr/gocapability/capability"
|
||||||
|
)
|
||||||
|
|
||||||
|
var capabilityList Capabilities
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
last := capability.CAP_LAST_CAP
|
||||||
|
// hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap
|
||||||
|
if last == capability.Cap(63) {
|
||||||
|
last = capability.CAP_BLOCK_SUSPEND
|
||||||
|
}
|
||||||
|
for _, cap := range capability.List() {
|
||||||
|
if cap > last {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
capabilityList = append(capabilityList,
|
||||||
|
&CapabilityMapping{
|
||||||
|
Key: "CAP_" + strings.ToUpper(cap.String()),
|
||||||
|
Value: cap,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// CapabilityMapping maps linux capability name to its value of capability.Cap type
|
||||||
|
// Capabilities is one of the security systems in Linux Security Module (LSM)
|
||||||
|
// framework provided by the kernel.
|
||||||
|
// For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html
|
||||||
|
CapabilityMapping struct {
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
Value capability.Cap `json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
// Capabilities contains all CapabilityMapping
|
||||||
|
Capabilities []*CapabilityMapping
|
||||||
|
)
|
||||||
|
|
||||||
|
// String returns <key> of CapabilityMapping
|
||||||
|
func (c *CapabilityMapping) String() string {
|
||||||
|
return c.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCapability returns CapabilityMapping which contains specific key
|
||||||
|
func GetCapability(key string) *CapabilityMapping {
|
||||||
|
for _, capp := range capabilityList {
|
||||||
|
if capp.Key == key {
|
||||||
|
cpy := *capp
|
||||||
|
return &cpy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllCapabilities returns all of the capabilities
|
||||||
|
func GetAllCapabilities() []string {
|
||||||
|
output := make([]string, len(capabilityList))
|
||||||
|
for i, capability := range capabilityList {
|
||||||
|
output[i] = capability.String()
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
|
// inSlice tests whether a string is contained in a slice of strings or not.
|
||||||
|
func inSlice(slice []string, s string) bool {
|
||||||
|
for _, ss := range slice {
|
||||||
|
if s == ss {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
const allCapabilities = "ALL"
|
||||||
|
|
||||||
|
// NormalizeLegacyCapabilities normalizes, and validates CapAdd/CapDrop capabilities
|
||||||
|
// by upper-casing them, and adding a CAP_ prefix (if not yet present).
|
||||||
|
//
|
||||||
|
// This function also accepts the "ALL" magic-value, that's used by CapAdd/CapDrop.
|
||||||
|
func NormalizeLegacyCapabilities(caps []string) ([]string, error) {
|
||||||
|
var normalized []string
|
||||||
|
|
||||||
|
valids := GetAllCapabilities()
|
||||||
|
for _, c := range caps {
|
||||||
|
c = strings.ToUpper(c)
|
||||||
|
if c == allCapabilities {
|
||||||
|
normalized = append(normalized, c)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(c, "CAP_") {
|
||||||
|
c = "CAP_" + c
|
||||||
|
}
|
||||||
|
if !inSlice(valids, c) {
|
||||||
|
return nil, errdefs.InvalidParameter(fmt.Errorf("unknown capability: %q", c))
|
||||||
|
}
|
||||||
|
normalized = append(normalized, c)
|
||||||
|
}
|
||||||
|
return normalized, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateCapabilities validates if caps only contains valid capabilities
|
||||||
|
func ValidateCapabilities(caps []string) error {
|
||||||
|
valids := GetAllCapabilities()
|
||||||
|
for _, c := range caps {
|
||||||
|
if !inSlice(valids, c) {
|
||||||
|
return errdefs.InvalidParameter(fmt.Errorf("unknown capability: %q", c))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TweakCapabilities tweaks capabilities by adding, dropping, or overriding
|
||||||
|
// capabilities in the basics capabilities list.
|
||||||
|
func TweakCapabilities(basics, adds, drops, capabilities []string, privileged bool) ([]string, error) {
|
||||||
|
switch {
|
||||||
|
case privileged:
|
||||||
|
// Privileged containers get all capabilities
|
||||||
|
return GetAllCapabilities(), nil
|
||||||
|
case capabilities != nil:
|
||||||
|
// Use custom set of capabilities
|
||||||
|
if err := ValidateCapabilities(capabilities); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return capabilities, nil
|
||||||
|
case len(adds) == 0 && len(drops) == 0:
|
||||||
|
// Nothing to tweak; we're done
|
||||||
|
return basics, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
capDrop, err := NormalizeLegacyCapabilities(drops)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
capAdd, err := NormalizeLegacyCapabilities(adds)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var caps []string
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case inSlice(capAdd, allCapabilities):
|
||||||
|
// Add all capabilities except ones on capDrop
|
||||||
|
for _, c := range GetAllCapabilities() {
|
||||||
|
if !inSlice(capDrop, c) {
|
||||||
|
caps = append(caps, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case inSlice(capDrop, allCapabilities):
|
||||||
|
// "Drop" all capabilities; use what's in capAdd instead
|
||||||
|
caps = capAdd
|
||||||
|
default:
|
||||||
|
// First drop some capabilities
|
||||||
|
for _, c := range basics {
|
||||||
|
if !inSlice(capDrop, c) {
|
||||||
|
caps = append(caps, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Then add the list of capabilities from capAdd
|
||||||
|
caps = append(caps, capAdd...)
|
||||||
|
}
|
||||||
|
return caps, nil
|
||||||
|
}
|
15
vendor/github.com/docker/docker/opts/hosts.go
generated
vendored
15
vendor/github.com/docker/docker/opts/hosts.go
generated
vendored
|
@ -4,8 +4,11 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/homedir"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -41,12 +44,20 @@ func ValidateHost(val string) (string, error) {
|
||||||
return val, nil
|
return val, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseHost and set defaults for a Daemon host string
|
// ParseHost and set defaults for a Daemon host string.
|
||||||
func ParseHost(defaultToTLS bool, val string) (string, error) {
|
// defaultToTLS is preferred over defaultToUnixXDG.
|
||||||
|
func ParseHost(defaultToTLS, defaultToUnixXDG bool, val string) (string, error) {
|
||||||
host := strings.TrimSpace(val)
|
host := strings.TrimSpace(val)
|
||||||
if host == "" {
|
if host == "" {
|
||||||
if defaultToTLS {
|
if defaultToTLS {
|
||||||
host = DefaultTLSHost
|
host = DefaultTLSHost
|
||||||
|
} else if defaultToUnixXDG {
|
||||||
|
runtimeDir, err := homedir.GetRuntimeDir()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
socket := filepath.Join(runtimeDir, "docker.sock")
|
||||||
|
host = "unix://" + socket
|
||||||
} else {
|
} else {
|
||||||
host = DefaultHost
|
host = DefaultHost
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
|
@ -660,11 +660,13 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||||
var errors []string
|
var errors []string
|
||||||
for key, value := range hdr.Xattrs {
|
for key, value := range hdr.Xattrs {
|
||||||
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
|
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
|
||||||
if err == syscall.ENOTSUP {
|
if err == syscall.ENOTSUP || err == syscall.EPERM {
|
||||||
// We ignore errors here because not all graphdrivers support
|
// We ignore errors here because not all graphdrivers support
|
||||||
// xattrs *cough* old versions of AUFS *cough*. However only
|
// xattrs *cough* old versions of AUFS *cough*. However only
|
||||||
// ENOTSUP should be emitted in that case, otherwise we still
|
// ENOTSUP should be emitted in that case, otherwise we still
|
||||||
// bail.
|
// bail.
|
||||||
|
// EPERM occurs if modifying xattrs is not allowed. This can
|
||||||
|
// happen when running in userns with restrictions (ChromeOS).
|
||||||
errors = append(errors, err.Error())
|
errors = append(errors, err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -743,7 +745,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||||
compressWriter,
|
compressWriter,
|
||||||
options.ChownOpts,
|
options.ChownOpts,
|
||||||
)
|
)
|
||||||
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
|
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
// Make sure to check the error on Close.
|
// Make sure to check the error on Close.
|
||||||
|
@ -901,7 +903,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
||||||
var dirs []*tar.Header
|
var dirs []*tar.Header
|
||||||
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||||
rootIDs := idMapping.RootPair()
|
rootIDs := idMapping.RootPair()
|
||||||
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
|
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
|
||||||
|
|
||||||
// Iterate through the files in the archive.
|
// Iterate through the files in the archive.
|
||||||
loop:
|
loop:
|
||||||
|
|
179
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
179
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
|
@ -2,22 +2,29 @@ package archive // import "github.com/docker/docker/pkg/archive"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/containerd/continuity/fs"
|
||||||
"github.com/docker/docker/pkg/system"
|
"github.com/docker/docker/pkg/system"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter {
|
||||||
if format == OverlayWhiteoutFormat {
|
if format == OverlayWhiteoutFormat {
|
||||||
return overlayWhiteoutConverter{}
|
return overlayWhiteoutConverter{inUserNS: inUserNS}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type overlayWhiteoutConverter struct{}
|
type overlayWhiteoutConverter struct {
|
||||||
|
inUserNS bool
|
||||||
|
}
|
||||||
|
|
||||||
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
||||||
// convert whiteouts to AUFS format
|
// convert whiteouts to AUFS format
|
||||||
|
@ -61,13 +68,22 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||||
base := filepath.Base(path)
|
base := filepath.Base(path)
|
||||||
dir := filepath.Dir(path)
|
dir := filepath.Dir(path)
|
||||||
|
|
||||||
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||||
if base == WhiteoutOpaqueDir {
|
if base == WhiteoutOpaqueDir {
|
||||||
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||||
|
if err != nil {
|
||||||
|
if c.inUserNS {
|
||||||
|
if err = replaceDirWithOverlayOpaque(dir); err != nil {
|
||||||
|
return false, errors.Wrapf(err, "replaceDirWithOverlayOpaque(%q) failed", dir)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
// don't write the file itself
|
// don't write the file itself
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -78,7 +94,19 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
|
||||||
originalPath := filepath.Join(dir, originalBase)
|
originalPath := filepath.Join(dir, originalBase)
|
||||||
|
|
||||||
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||||
return false, err
|
if c.inUserNS {
|
||||||
|
// Ubuntu and a few distros support overlayfs in userns.
|
||||||
|
//
|
||||||
|
// Although we can't call mknod directly in userns (at least on bionic kernel 4.15),
|
||||||
|
// we can still create 0,0 char device using mknodChar0Overlay().
|
||||||
|
//
|
||||||
|
// NOTE: we don't need this hack for the containerd snapshotter+unpack model.
|
||||||
|
if err := mknodChar0Overlay(originalPath); err != nil {
|
||||||
|
return false, errors.Wrapf(err, "failed to mknodChar0UserNS(%q)", originalPath)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -90,3 +118,144 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mknodChar0Overlay creates 0,0 char device by mounting overlayfs and unlinking.
|
||||||
|
// This function can be used for creating 0,0 char device in userns on Ubuntu.
|
||||||
|
//
|
||||||
|
// Steps:
|
||||||
|
// * Mkdir lower,upper,merged,work
|
||||||
|
// * Create lower/dummy
|
||||||
|
// * Mount overlayfs
|
||||||
|
// * Unlink merged/dummy
|
||||||
|
// * Unmount overlayfs
|
||||||
|
// * Make sure a 0,0 char device is created as upper/dummy
|
||||||
|
// * Rename upper/dummy to cleansedOriginalPath
|
||||||
|
func mknodChar0Overlay(cleansedOriginalPath string) error {
|
||||||
|
dir := filepath.Dir(cleansedOriginalPath)
|
||||||
|
tmp, err := ioutil.TempDir(dir, "mc0o")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to create a tmp directory under %s", dir)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmp)
|
||||||
|
lower := filepath.Join(tmp, "l")
|
||||||
|
upper := filepath.Join(tmp, "u")
|
||||||
|
work := filepath.Join(tmp, "w")
|
||||||
|
merged := filepath.Join(tmp, "m")
|
||||||
|
for _, s := range []string{lower, upper, work, merged} {
|
||||||
|
if err := os.MkdirAll(s, 0700); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to mkdir %s", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dummyBase := "d"
|
||||||
|
lowerDummy := filepath.Join(lower, dummyBase)
|
||||||
|
if err := ioutil.WriteFile(lowerDummy, []byte{}, 0600); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to create a dummy lower file %s", lowerDummy)
|
||||||
|
}
|
||||||
|
mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
|
||||||
|
// docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
|
||||||
|
if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
|
||||||
|
}
|
||||||
|
mergedDummy := filepath.Join(merged, dummyBase)
|
||||||
|
if err := os.Remove(mergedDummy); err != nil {
|
||||||
|
syscall.Unmount(merged, 0)
|
||||||
|
return errors.Wrapf(err, "failed to unlink %s", mergedDummy)
|
||||||
|
}
|
||||||
|
if err := syscall.Unmount(merged, 0); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to unmount %s", merged)
|
||||||
|
}
|
||||||
|
upperDummy := filepath.Join(upper, dummyBase)
|
||||||
|
if err := isChar0(upperDummy); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Rename(upperDummy, cleansedOriginalPath); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to rename %s to %s", upperDummy, cleansedOriginalPath)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isChar0(path string) error {
|
||||||
|
osStat, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to stat %s", path)
|
||||||
|
}
|
||||||
|
st, ok := osStat.Sys().(*syscall.Stat_t)
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("got unsupported stat for %s", path)
|
||||||
|
}
|
||||||
|
if os.FileMode(st.Mode)&syscall.S_IFMT != syscall.S_IFCHR {
|
||||||
|
return errors.Errorf("%s is not a character device, got mode=%d", path, st.Mode)
|
||||||
|
}
|
||||||
|
if st.Rdev != 0 {
|
||||||
|
return errors.Errorf("%s is not a 0,0 character device, got Rdev=%d", path, st.Rdev)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// replaceDirWithOverlayOpaque replaces path with a new directory with trusted.overlay.opaque
|
||||||
|
// xattr. The contents of the directory are preserved.
|
||||||
|
func replaceDirWithOverlayOpaque(path string) error {
|
||||||
|
if path == "/" {
|
||||||
|
return errors.New("replaceDirWithOverlayOpaque: path must not be \"/\"")
|
||||||
|
}
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
tmp, err := ioutil.TempDir(dir, "rdwoo")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to create a tmp directory under %s", dir)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tmp)
|
||||||
|
// newPath is a new empty directory crafted with trusted.overlay.opaque xattr.
|
||||||
|
// we copy the content of path into newPath, remove path, and rename newPath to path.
|
||||||
|
newPath, err := createDirWithOverlayOpaque(tmp)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "createDirWithOverlayOpaque(%q) failed", tmp)
|
||||||
|
}
|
||||||
|
if err := fs.CopyDir(newPath, path); err != nil {
|
||||||
|
return errors.Wrapf(err, "CopyDir(%q, %q) failed", newPath, path)
|
||||||
|
}
|
||||||
|
if err := os.RemoveAll(path); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Rename(newPath, path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// createDirWithOverlayOpaque creates a directory with trusted.overlay.opaque xattr,
|
||||||
|
// without calling setxattr, so as to allow creating opaque dir in userns on Ubuntu.
|
||||||
|
func createDirWithOverlayOpaque(tmp string) (string, error) {
|
||||||
|
lower := filepath.Join(tmp, "l")
|
||||||
|
upper := filepath.Join(tmp, "u")
|
||||||
|
work := filepath.Join(tmp, "w")
|
||||||
|
merged := filepath.Join(tmp, "m")
|
||||||
|
for _, s := range []string{lower, upper, work, merged} {
|
||||||
|
if err := os.MkdirAll(s, 0700); err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to mkdir %s", s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dummyBase := "d"
|
||||||
|
lowerDummy := filepath.Join(lower, dummyBase)
|
||||||
|
if err := os.MkdirAll(lowerDummy, 0700); err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to create a dummy lower directory %s", lowerDummy)
|
||||||
|
}
|
||||||
|
mOpts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower, upper, work)
|
||||||
|
// docker/pkg/mount.Mount() requires procfs to be mounted. So we use syscall.Mount() directly instead.
|
||||||
|
if err := syscall.Mount("overlay", merged, "overlay", uintptr(0), mOpts); err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to mount overlay (%s) on %s", mOpts, merged)
|
||||||
|
}
|
||||||
|
mergedDummy := filepath.Join(merged, dummyBase)
|
||||||
|
if err := os.Remove(mergedDummy); err != nil {
|
||||||
|
syscall.Unmount(merged, 0)
|
||||||
|
return "", errors.Wrapf(err, "failed to rmdir %s", mergedDummy)
|
||||||
|
}
|
||||||
|
// upperDummy becomes a 0,0-char device file here
|
||||||
|
if err := os.Mkdir(mergedDummy, 0700); err != nil {
|
||||||
|
syscall.Unmount(merged, 0)
|
||||||
|
return "", errors.Wrapf(err, "failed to mkdir %s", mergedDummy)
|
||||||
|
}
|
||||||
|
// upperDummy becomes a directory with trusted.overlay.opaque xattr
|
||||||
|
// (but can't be verified in userns)
|
||||||
|
if err := syscall.Unmount(merged, 0); err != nil {
|
||||||
|
return "", errors.Wrapf(err, "failed to unmount %s", merged)
|
||||||
|
}
|
||||||
|
upperDummy := filepath.Join(upper, dummyBase)
|
||||||
|
return upperDummy, nil
|
||||||
|
}
|
||||||
|
|
2
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
|
@ -2,6 +2,6 @@
|
||||||
|
|
||||||
package archive // import "github.com/docker/docker/pkg/archive"
|
package archive // import "github.com/docker/docker/pkg/archive"
|
||||||
|
|
||||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) tarWhiteoutConverter {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
12
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
|
@ -63,12 +63,16 @@ func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||||
func (c changesByPath) Len() int { return len(c) }
|
func (c changesByPath) Len() int { return len(c) }
|
||||||
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||||
|
|
||||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
// Gnu tar doesn't have sub-second mtime precision. The go tar
|
||||||
// precision, which is problematic when we apply changes via tar
|
// writer (1.10+) does when using PAX format, but we round times to seconds
|
||||||
// files, we handle this by comparing for exact times, *or* same
|
// to ensure archives have the same hashes for backwards compatibility.
|
||||||
|
// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
|
||||||
|
//
|
||||||
|
// Non-sub-second is problematic when we apply changes via tar
|
||||||
|
// files. We handle this by comparing for exact times, *or* same
|
||||||
// second count and either a or b having exactly 0 nanoseconds
|
// second count and either a or b having exactly 0 nanoseconds
|
||||||
func sameFsTime(a, b time.Time) bool {
|
func sameFsTime(a, b time.Time) bool {
|
||||||
return a == b ||
|
return a.Equal(b) ||
|
||||||
(a.Unix() == b.Unix() &&
|
(a.Unix() == b.Unix() &&
|
||||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||||
}
|
}
|
||||||
|
|
8
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
|
@ -16,7 +16,13 @@ func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
oldStat.UID() != newStat.UID() ||
|
oldStat.UID() != newStat.UID() ||
|
||||||
oldStat.GID() != newStat.GID() ||
|
oldStat.GID() != newStat.GID() ||
|
||||||
oldStat.Rdev() != newStat.Rdev() ||
|
oldStat.Rdev() != newStat.Rdev() ||
|
||||||
// Don't look at size for dirs, its not a good measure of change
|
// Don't look at size or modification time for dirs, its not a good
|
||||||
|
// measure of change. See https://github.com/moby/moby/issues/9874
|
||||||
|
// for a description of the issue with modification time, and
|
||||||
|
// https://github.com/moby/moby/pull/11422 for the change.
|
||||||
|
// (Note that in the Windows implementation of this function,
|
||||||
|
// modification time IS taken as a change). See
|
||||||
|
// https://github.com/moby/moby/pull/37982 for more information.
|
||||||
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
||||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||||
return true
|
return true
|
||||||
|
|
8
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
|
@ -7,9 +7,13 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
// Note there is slight difference between the Linux and Windows
|
||||||
|
// implementations here. Due to https://github.com/moby/moby/issues/9874,
|
||||||
|
// and the fix at https://github.com/moby/moby/pull/11422, Linux does not
|
||||||
|
// consider a change to the directory time as a change. Windows on NTFS
|
||||||
|
// does. See https://github.com/moby/moby/pull/37982 for more information.
|
||||||
|
|
||||||
// Don't look at size for dirs, its not a good measure of change
|
if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
|
||||||
if oldStat.Mtim() != newStat.Mtim() ||
|
|
||||||
oldStat.Mode() != newStat.Mode() ||
|
oldStat.Mode() != newStat.Mode() ||
|
||||||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
||||||
return true
|
return true
|
||||||
|
|
10
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
10
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
|
@ -240,11 +240,13 @@ func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decomp
|
||||||
dest = filepath.Clean(dest)
|
dest = filepath.Clean(dest)
|
||||||
|
|
||||||
// We need to be able to set any perms
|
// We need to be able to set any perms
|
||||||
oldmask, err := system.Umask(0)
|
if runtime.GOOS != "windows" {
|
||||||
if err != nil {
|
oldmask, err := system.Umask(0)
|
||||||
return 0, err
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer system.Umask(oldmask)
|
||||||
}
|
}
|
||||||
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
|
||||||
|
|
||||||
if decompress {
|
if decompress {
|
||||||
decompLayer, err := DecompressStream(layer)
|
decompLayer, err := DecompressStream(layer)
|
||||||
|
|
98
vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
generated
vendored
98
vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
generated
vendored
|
@ -1,21 +1,93 @@
|
||||||
package homedir // import "github.com/docker/docker/pkg/homedir"
|
package homedir // import "github.com/docker/docker/pkg/homedir"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"github.com/docker/docker/pkg/idtools"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetStatic returns the home directory for the current user without calling
|
// GetRuntimeDir returns XDG_RUNTIME_DIR.
|
||||||
// os/user.Current(). This is useful for static-linked binary on glibc-based
|
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
|
||||||
// system, because a call to os/user.Current() in a static binary leads to
|
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
|
||||||
// segfault due to a glibc issue that won't be fixed in a short term.
|
//
|
||||||
// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
|
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||||
func GetStatic() (string, error) {
|
func GetRuntimeDir() (string, error) {
|
||||||
uid := os.Getuid()
|
if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
|
||||||
usr, err := idtools.LookupUID(uid)
|
return xdgRuntimeDir, nil
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
}
|
||||||
return usr.Home, nil
|
return "", errors.New("could not get XDG_RUNTIME_DIR")
|
||||||
|
}
|
||||||
|
|
||||||
|
// StickRuntimeDirContents sets the sticky bit on files that are under
|
||||||
|
// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
|
||||||
|
//
|
||||||
|
// StickyRuntimeDir returns slice of sticked files.
|
||||||
|
// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set.
|
||||||
|
//
|
||||||
|
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||||
|
func StickRuntimeDirContents(files []string) ([]string, error) {
|
||||||
|
runtimeDir, err := GetRuntimeDir()
|
||||||
|
if err != nil {
|
||||||
|
// ignore error if runtimeDir is empty
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
runtimeDir, err = filepath.Abs(runtimeDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var sticked []string
|
||||||
|
for _, f := range files {
|
||||||
|
f, err = filepath.Abs(f)
|
||||||
|
if err != nil {
|
||||||
|
return sticked, err
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(f, runtimeDir+"/") {
|
||||||
|
if err = stick(f); err != nil {
|
||||||
|
return sticked, err
|
||||||
|
}
|
||||||
|
sticked = append(sticked, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sticked, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stick(f string) error {
|
||||||
|
st, err := os.Stat(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m := st.Mode()
|
||||||
|
m |= os.ModeSticky
|
||||||
|
return os.Chmod(f, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDataHome returns XDG_DATA_HOME.
|
||||||
|
// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
|
||||||
|
//
|
||||||
|
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||||
|
func GetDataHome() (string, error) {
|
||||||
|
if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
|
||||||
|
return xdgDataHome, nil
|
||||||
|
}
|
||||||
|
home := os.Getenv("HOME")
|
||||||
|
if home == "" {
|
||||||
|
return "", errors.New("could not get either XDG_DATA_HOME or HOME")
|
||||||
|
}
|
||||||
|
return filepath.Join(home, ".local", "share"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConfigHome returns XDG_CONFIG_HOME.
|
||||||
|
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
|
||||||
|
//
|
||||||
|
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||||
|
func GetConfigHome() (string, error) {
|
||||||
|
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
|
||||||
|
return xdgConfigHome, nil
|
||||||
|
}
|
||||||
|
home := os.Getenv("HOME")
|
||||||
|
if home == "" {
|
||||||
|
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
|
||||||
|
}
|
||||||
|
return filepath.Join(home, ".config"), nil
|
||||||
}
|
}
|
||||||
|
|
22
vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
generated
vendored
|
@ -6,8 +6,22 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetStatic is not needed for non-linux systems.
|
// GetRuntimeDir is unsupported on non-linux system.
|
||||||
// (Precisely, it is needed only for glibc-based linux systems.)
|
func GetRuntimeDir() (string, error) {
|
||||||
func GetStatic() (string, error) {
|
return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
|
||||||
return "", errors.New("homedir.GetStatic() is not supported on this system")
|
}
|
||||||
|
|
||||||
|
// StickRuntimeDirContents is unsupported on non-linux system.
|
||||||
|
func StickRuntimeDirContents(files []string) ([]string, error) {
|
||||||
|
return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDataHome is unsupported on non-linux system.
|
||||||
|
func GetDataHome() (string, error) {
|
||||||
|
return "", errors.New("homedir.GetDataHome() is not supported on this system")
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetConfigHome is unsupported on non-linux system.
|
||||||
|
func GetConfigHome() (string, error) {
|
||||||
|
return "", errors.New("homedir.GetConfigHome() is not supported on this system")
|
||||||
}
|
}
|
||||||
|
|
12
vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
generated
vendored
|
@ -4,8 +4,7 @@ package homedir // import "github.com/docker/docker/pkg/homedir"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"os/user"
|
||||||
"github.com/opencontainers/runc/libcontainer/user"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Key returns the env var name for the user's home dir based on
|
// Key returns the env var name for the user's home dir based on
|
||||||
|
@ -17,11 +16,16 @@ func Key() string {
|
||||||
// Get returns the home directory of the current user with the help of
|
// Get returns the home directory of the current user with the help of
|
||||||
// environment variables depending on the target operating system.
|
// environment variables depending on the target operating system.
|
||||||
// Returned path should be used with "path/filepath" to form new paths.
|
// Returned path should be used with "path/filepath" to form new paths.
|
||||||
|
//
|
||||||
|
// If linking statically with cgo enabled against glibc, ensure the
|
||||||
|
// osusergo build tag is used.
|
||||||
|
//
|
||||||
|
// If needing to do nss lookups, do not disable cgo or set osusergo.
|
||||||
func Get() string {
|
func Get() string {
|
||||||
home := os.Getenv(Key())
|
home := os.Getenv(Key())
|
||||||
if home == "" {
|
if home == "" {
|
||||||
if u, err := user.CurrentUser(); err == nil {
|
if u, err := user.Current(); err == nil {
|
||||||
return u.Home
|
return u.HomeDir
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return home
|
return home
|
||||||
|
|
104
vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
generated
vendored
104
vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go
generated
vendored
|
@ -4,13 +4,12 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/Nvveen/Gotty"
|
|
||||||
"github.com/docker/docker/pkg/term"
|
"github.com/docker/docker/pkg/term"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
|
"github.com/morikuni/aec"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
|
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
|
||||||
|
@ -151,60 +150,23 @@ type JSONMessage struct {
|
||||||
Aux *json.RawMessage `json:"aux,omitempty"`
|
Aux *json.RawMessage `json:"aux,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
|
func clearLine(out io.Writer) {
|
||||||
type termInfo interface {
|
eraseMode := aec.EraseModes.All
|
||||||
Parse(attr string, params ...interface{}) (string, error)
|
cl := aec.EraseLine(eraseMode)
|
||||||
|
fmt.Fprint(out, cl)
|
||||||
}
|
}
|
||||||
|
|
||||||
type noTermInfo struct{} // canary used when no terminfo.
|
func cursorUp(out io.Writer, l uint) {
|
||||||
|
fmt.Fprint(out, aec.Up(l))
|
||||||
func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
|
|
||||||
return "", fmt.Errorf("noTermInfo")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func clearLine(out io.Writer, ti termInfo) {
|
func cursorDown(out io.Writer, l uint) {
|
||||||
// el2 (clear whole line) is not exposed by terminfo.
|
fmt.Fprint(out, aec.Down(l))
|
||||||
|
|
||||||
// First clear line from beginning to cursor
|
|
||||||
if attr, err := ti.Parse("el1"); err == nil {
|
|
||||||
fmt.Fprintf(out, "%s", attr)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(out, "\x1b[1K")
|
|
||||||
}
|
|
||||||
// Then clear line from cursor to end
|
|
||||||
if attr, err := ti.Parse("el"); err == nil {
|
|
||||||
fmt.Fprintf(out, "%s", attr)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(out, "\x1b[K")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func cursorUp(out io.Writer, ti termInfo, l int) {
|
// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the
|
||||||
if l == 0 { // Should never be the case, but be tolerant
|
// entire current line when displaying the progressbar.
|
||||||
return
|
func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
|
||||||
}
|
|
||||||
if attr, err := ti.Parse("cuu", l); err == nil {
|
|
||||||
fmt.Fprintf(out, "%s", attr)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(out, "\x1b[%dA", l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func cursorDown(out io.Writer, ti termInfo, l int) {
|
|
||||||
if l == 0 { // Should never be the case, but be tolerant
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if attr, err := ti.Parse("cud", l); err == nil {
|
|
||||||
fmt.Fprintf(out, "%s", attr)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(out, "\x1b[%dB", l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
|
|
||||||
// is a terminal. If this is the case, it will erase the entire current line
|
|
||||||
// when displaying the progressbar.
|
|
||||||
func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
|
|
||||||
if jm.Error != nil {
|
if jm.Error != nil {
|
||||||
if jm.Error.Code == 401 {
|
if jm.Error.Code == 401 {
|
||||||
return fmt.Errorf("authentication is required")
|
return fmt.Errorf("authentication is required")
|
||||||
|
@ -212,8 +174,8 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
|
||||||
return jm.Error
|
return jm.Error
|
||||||
}
|
}
|
||||||
var endl string
|
var endl string
|
||||||
if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
|
if isTerminal && jm.Stream == "" && jm.Progress != nil {
|
||||||
clearLine(out, termInfo)
|
clearLine(out)
|
||||||
endl = "\r"
|
endl = "\r"
|
||||||
fmt.Fprintf(out, endl)
|
fmt.Fprintf(out, endl)
|
||||||
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
|
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
|
||||||
|
@ -230,7 +192,7 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
|
||||||
if jm.From != "" {
|
if jm.From != "" {
|
||||||
fmt.Fprintf(out, "(from %s) ", jm.From)
|
fmt.Fprintf(out, "(from %s) ", jm.From)
|
||||||
}
|
}
|
||||||
if jm.Progress != nil && termInfo != nil {
|
if jm.Progress != nil && isTerminal {
|
||||||
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
|
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
|
||||||
} else if jm.ProgressMessage != "" { //deprecated
|
} else if jm.ProgressMessage != "" { //deprecated
|
||||||
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
|
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
|
||||||
|
@ -248,25 +210,11 @@ func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
|
||||||
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error {
|
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error {
|
||||||
var (
|
var (
|
||||||
dec = json.NewDecoder(in)
|
dec = json.NewDecoder(in)
|
||||||
ids = make(map[string]int)
|
ids = make(map[string]uint)
|
||||||
)
|
)
|
||||||
|
|
||||||
var termInfo termInfo
|
|
||||||
|
|
||||||
if isTerminal {
|
|
||||||
term := os.Getenv("TERM")
|
|
||||||
if term == "" {
|
|
||||||
term = "vt102"
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if termInfo, err = gotty.OpenTermInfo(term); err != nil {
|
|
||||||
termInfo = &noTermInfo{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
diff := 0
|
var diff uint
|
||||||
var jm JSONMessage
|
var jm JSONMessage
|
||||||
if err := dec.Decode(&jm); err != nil {
|
if err := dec.Decode(&jm); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
|
@ -294,15 +242,15 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
|
||||||
// when we output something that's not
|
// when we output something that's not
|
||||||
// accounted for in the map, such as a line
|
// accounted for in the map, such as a line
|
||||||
// with no ID.
|
// with no ID.
|
||||||
line = len(ids)
|
line = uint(len(ids))
|
||||||
ids[jm.ID] = line
|
ids[jm.ID] = line
|
||||||
if termInfo != nil {
|
if isTerminal {
|
||||||
fmt.Fprintf(out, "\n")
|
fmt.Fprintf(out, "\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
diff = len(ids) - line
|
diff = uint(len(ids)) - line
|
||||||
if termInfo != nil {
|
if isTerminal {
|
||||||
cursorUp(out, termInfo, diff)
|
cursorUp(out, diff)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// When outputting something that isn't progress
|
// When outputting something that isn't progress
|
||||||
|
@ -310,11 +258,11 @@ func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr,
|
||||||
// don't want progress entries from some previous
|
// don't want progress entries from some previous
|
||||||
// operation to be updated (for example, pull -a
|
// operation to be updated (for example, pull -a
|
||||||
// with multiple tags).
|
// with multiple tags).
|
||||||
ids = make(map[string]int)
|
ids = make(map[string]uint)
|
||||||
}
|
}
|
||||||
err := jm.Display(out, termInfo)
|
err := jm.Display(out, isTerminal)
|
||||||
if jm.ID != "" && termInfo != nil {
|
if jm.ID != "" && isTerminal {
|
||||||
cursorDown(out, termInfo, diff)
|
cursorDown(out, diff)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
1
vendor/github.com/docker/docker/registry/auth.go
generated
vendored
1
vendor/github.com/docker/docker/registry/auth.go
generated
vendored
|
@ -248,7 +248,6 @@ func (err PingResponseError) Error() string {
|
||||||
// challenge manager for the supported authentication types and
|
// challenge manager for the supported authentication types and
|
||||||
// whether v2 was confirmed by the response. If a response is received but
|
// whether v2 was confirmed by the response. If a response is received but
|
||||||
// cannot be interpreted a PingResponseError will be returned.
|
// cannot be interpreted a PingResponseError will be returned.
|
||||||
// nolint: interfacer
|
|
||||||
func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) {
|
func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) {
|
||||||
var (
|
var (
|
||||||
foundV2 = false
|
foundV2 = false
|
||||||
|
|
6
vendor/github.com/docker/docker/registry/config.go
generated
vendored
6
vendor/github.com/docker/docker/registry/config.go
generated
vendored
|
@ -19,16 +19,11 @@ type ServiceOptions struct {
|
||||||
AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"`
|
AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"`
|
||||||
Mirrors []string `json:"registry-mirrors,omitempty"`
|
Mirrors []string `json:"registry-mirrors,omitempty"`
|
||||||
InsecureRegistries []string `json:"insecure-registries,omitempty"`
|
InsecureRegistries []string `json:"insecure-registries,omitempty"`
|
||||||
|
|
||||||
// V2Only controls access to legacy registries. If it is set to true via the
|
|
||||||
// command line flag the daemon will not attempt to contact v1 legacy registries
|
|
||||||
V2Only bool `json:"disable-legacy-registry,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// serviceConfig holds daemon configuration for the registry service.
|
// serviceConfig holds daemon configuration for the registry service.
|
||||||
type serviceConfig struct {
|
type serviceConfig struct {
|
||||||
registrytypes.ServiceConfig
|
registrytypes.ServiceConfig
|
||||||
V2Only bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -76,7 +71,6 @@ func newServiceConfig(options ServiceOptions) (*serviceConfig, error) {
|
||||||
// Hack: Bypass setting the mirrors to IndexConfigs since they are going away
|
// Hack: Bypass setting the mirrors to IndexConfigs since they are going away
|
||||||
// and Mirrors are only for the official registry anyways.
|
// and Mirrors are only for the official registry anyways.
|
||||||
},
|
},
|
||||||
V2Only: options.V2Only,
|
|
||||||
}
|
}
|
||||||
if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil {
|
if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
17
vendor/github.com/docker/docker/registry/registry.go
generated
vendored
17
vendor/github.com/docker/docker/registry/registry.go
generated
vendored
|
@ -17,6 +17,9 @@ import (
|
||||||
"github.com/docker/go-connections/sockets"
|
"github.com/docker/go-connections/sockets"
|
||||||
"github.com/docker/go-connections/tlsconfig"
|
"github.com/docker/go-connections/tlsconfig"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/homedir"
|
||||||
|
"github.com/docker/docker/rootless"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -32,7 +35,19 @@ func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) {
|
||||||
tlsConfig.InsecureSkipVerify = !isSecure
|
tlsConfig.InsecureSkipVerify = !isSecure
|
||||||
|
|
||||||
if isSecure && CertsDir != "" {
|
if isSecure && CertsDir != "" {
|
||||||
hostDir := filepath.Join(CertsDir, cleanPath(hostname))
|
certsDir := CertsDir
|
||||||
|
|
||||||
|
if rootless.RunningWithRootlessKit() {
|
||||||
|
configHome, err := homedir.GetConfigHome()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
certsDir = filepath.Join(configHome, "docker/certs.d")
|
||||||
|
}
|
||||||
|
|
||||||
|
hostDir := filepath.Join(certsDir, cleanPath(hostname))
|
||||||
|
|
||||||
logrus.Debugf("hostDir: %s", hostDir)
|
logrus.Debugf("hostDir: %s", hostDir)
|
||||||
if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil {
|
if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
17
vendor/github.com/docker/docker/registry/service.go
generated
vendored
17
vendor/github.com/docker/docker/registry/service.go
generated
vendored
|
@ -309,20 +309,5 @@ func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEn
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
func (s *DefaultService) lookupEndpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
||||||
endpoints, err = s.lookupV2Endpoints(hostname)
|
return s.lookupV2Endpoints(hostname)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.config.V2Only {
|
|
||||||
return endpoints, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
legacyEndpoints, err := s.lookupV1Endpoints(hostname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
endpoints = append(endpoints, legacyEndpoints...)
|
|
||||||
|
|
||||||
return endpoints, nil
|
|
||||||
}
|
}
|
||||||
|
|
40
vendor/github.com/docker/docker/registry/service_v1.go
generated
vendored
40
vendor/github.com/docker/docker/registry/service_v1.go
generated
vendored
|
@ -1,40 +0,0 @@
|
||||||
package registry // import "github.com/docker/docker/registry"
|
|
||||||
|
|
||||||
import "net/url"
|
|
||||||
|
|
||||||
func (s *DefaultService) lookupV1Endpoints(hostname string) (endpoints []APIEndpoint, err error) {
|
|
||||||
if hostname == DefaultNamespace || hostname == DefaultV2Registry.Host || hostname == IndexHostname {
|
|
||||||
return []APIEndpoint{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tlsConfig, err := s.tlsConfig(hostname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
endpoints = []APIEndpoint{
|
|
||||||
{
|
|
||||||
URL: &url.URL{
|
|
||||||
Scheme: "https",
|
|
||||||
Host: hostname,
|
|
||||||
},
|
|
||||||
Version: APIVersion1,
|
|
||||||
TrimHostname: true,
|
|
||||||
TLSConfig: tlsConfig,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if tlsConfig.InsecureSkipVerify {
|
|
||||||
endpoints = append(endpoints, APIEndpoint{ // or this
|
|
||||||
URL: &url.URL{
|
|
||||||
Scheme: "http",
|
|
||||||
Host: hostname,
|
|
||||||
},
|
|
||||||
Version: APIVersion1,
|
|
||||||
TrimHostname: true,
|
|
||||||
// used to check if supposed to be secure via InsecureSkipVerify
|
|
||||||
TLSConfig: tlsConfig,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return endpoints, nil
|
|
||||||
}
|
|
25
vendor/github.com/docker/docker/rootless/rootless.go
generated
vendored
Normal file
25
vendor/github.com/docker/docker/rootless/rootless.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
package rootless // import "github.com/docker/docker/rootless"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy
|
||||||
|
RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
runningWithRootlessKit bool
|
||||||
|
runningWithRootlessKitOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
// RunningWithRootlessKit returns true if running under RootlessKit namespaces.
|
||||||
|
func RunningWithRootlessKit() bool {
|
||||||
|
runningWithRootlessKitOnce.Do(func() {
|
||||||
|
u := os.Getenv("ROOTLESSKIT_STATE_DIR")
|
||||||
|
runningWithRootlessKit = u != ""
|
||||||
|
})
|
||||||
|
return runningWithRootlessKit
|
||||||
|
}
|
3
vendor/github.com/docker/docker/volume/mounts/linux_parser.go
generated
vendored
3
vendor/github.com/docker/docker/volume/mounts/linux_parser.go
generated
vendored
|
@ -100,6 +100,9 @@ func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSour
|
||||||
return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")}
|
return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")}
|
||||||
}
|
}
|
||||||
case mount.TypeTmpfs:
|
case mount.TypeTmpfs:
|
||||||
|
if mnt.BindOptions != nil {
|
||||||
|
return &errMountConfig{mnt, errExtraField("BindOptions")}
|
||||||
|
}
|
||||||
if len(mnt.Source) != 0 {
|
if len(mnt.Source) != 0 {
|
||||||
return &errMountConfig{mnt, errExtraField("Source")}
|
return &errMountConfig{mnt, errExtraField("Source")}
|
||||||
}
|
}
|
||||||
|
|
789
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
generated
vendored
Normal file
789
vendor/github.com/hashicorp/hcl/hcl/printer/nodes.go
generated
vendored
Normal file
|
@ -0,0 +1,789 @@
|
||||||
|
package printer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
"github.com/hashicorp/hcl/hcl/token"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
blank = byte(' ')
|
||||||
|
newline = byte('\n')
|
||||||
|
tab = byte('\t')
|
||||||
|
infinity = 1 << 30 // offset or line
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
unindent = []byte("\uE123") // in the private use space
|
||||||
|
)
|
||||||
|
|
||||||
|
type printer struct {
|
||||||
|
cfg Config
|
||||||
|
prev token.Pos
|
||||||
|
|
||||||
|
comments []*ast.CommentGroup // may be nil, contains all comments
|
||||||
|
standaloneComments []*ast.CommentGroup // contains all standalone comments (not assigned to any node)
|
||||||
|
|
||||||
|
enableTrace bool
|
||||||
|
indentTrace int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ByPosition []*ast.CommentGroup
|
||||||
|
|
||||||
|
func (b ByPosition) Len() int { return len(b) }
|
||||||
|
func (b ByPosition) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||||
|
func (b ByPosition) Less(i, j int) bool { return b[i].Pos().Before(b[j].Pos()) }
|
||||||
|
|
||||||
|
// collectComments comments all standalone comments which are not lead or line
|
||||||
|
// comment
|
||||||
|
func (p *printer) collectComments(node ast.Node) {
|
||||||
|
// first collect all comments. This is already stored in
|
||||||
|
// ast.File.(comments)
|
||||||
|
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||||
|
switch t := nn.(type) {
|
||||||
|
case *ast.File:
|
||||||
|
p.comments = t.Comments
|
||||||
|
return nn, false
|
||||||
|
}
|
||||||
|
return nn, true
|
||||||
|
})
|
||||||
|
|
||||||
|
standaloneComments := make(map[token.Pos]*ast.CommentGroup, 0)
|
||||||
|
for _, c := range p.comments {
|
||||||
|
standaloneComments[c.Pos()] = c
|
||||||
|
}
|
||||||
|
|
||||||
|
// next remove all lead and line comments from the overall comment map.
|
||||||
|
// This will give us comments which are standalone, comments which are not
|
||||||
|
// assigned to any kind of node.
|
||||||
|
ast.Walk(node, func(nn ast.Node) (ast.Node, bool) {
|
||||||
|
switch t := nn.(type) {
|
||||||
|
case *ast.LiteralType:
|
||||||
|
if t.LeadComment != nil {
|
||||||
|
for _, comment := range t.LeadComment.List {
|
||||||
|
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||||
|
delete(standaloneComments, comment.Pos())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.LineComment != nil {
|
||||||
|
for _, comment := range t.LineComment.List {
|
||||||
|
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||||
|
delete(standaloneComments, comment.Pos())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case *ast.ObjectItem:
|
||||||
|
if t.LeadComment != nil {
|
||||||
|
for _, comment := range t.LeadComment.List {
|
||||||
|
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||||
|
delete(standaloneComments, comment.Pos())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t.LineComment != nil {
|
||||||
|
for _, comment := range t.LineComment.List {
|
||||||
|
if _, ok := standaloneComments[comment.Pos()]; ok {
|
||||||
|
delete(standaloneComments, comment.Pos())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nn, true
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, c := range standaloneComments {
|
||||||
|
p.standaloneComments = append(p.standaloneComments, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(ByPosition(p.standaloneComments))
|
||||||
|
}
|
||||||
|
|
||||||
|
// output prints creates b printable HCL output and returns it.
|
||||||
|
func (p *printer) output(n interface{}) []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
switch t := n.(type) {
|
||||||
|
case *ast.File:
|
||||||
|
// File doesn't trace so we add the tracing here
|
||||||
|
defer un(trace(p, "File"))
|
||||||
|
return p.output(t.Node)
|
||||||
|
case *ast.ObjectList:
|
||||||
|
defer un(trace(p, "ObjectList"))
|
||||||
|
|
||||||
|
var index int
|
||||||
|
for {
|
||||||
|
// Determine the location of the next actual non-comment
|
||||||
|
// item. If we're at the end, the next item is at "infinity"
|
||||||
|
var nextItem token.Pos
|
||||||
|
if index != len(t.Items) {
|
||||||
|
nextItem = t.Items[index].Pos()
|
||||||
|
} else {
|
||||||
|
nextItem = token.Pos{Offset: infinity, Line: infinity}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through the standalone comments in the file and print out
|
||||||
|
// the comments that we should be for this object item.
|
||||||
|
for _, c := range p.standaloneComments {
|
||||||
|
// Go through all the comments in the group. The group
|
||||||
|
// should be printed together, not separated by double newlines.
|
||||||
|
printed := false
|
||||||
|
newlinePrinted := false
|
||||||
|
for _, comment := range c.List {
|
||||||
|
// We only care about comments after the previous item
|
||||||
|
// we've printed so that comments are printed in the
|
||||||
|
// correct locations (between two objects for example).
|
||||||
|
// And before the next item.
|
||||||
|
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||||
|
// if we hit the end add newlines so we can print the comment
|
||||||
|
// we don't do this if prev is invalid which means the
|
||||||
|
// beginning of the file since the first comment should
|
||||||
|
// be at the first line.
|
||||||
|
if !newlinePrinted && p.prev.IsValid() && index == len(t.Items) {
|
||||||
|
buf.Write([]byte{newline, newline})
|
||||||
|
newlinePrinted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the actual comment.
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
|
||||||
|
// Set printed to true to note that we printed something
|
||||||
|
printed = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not at the last item, write a new line so
|
||||||
|
// that there is a newline separating this comment from
|
||||||
|
// the next object.
|
||||||
|
if printed && index != len(t.Items) {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if index == len(t.Items) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Write(p.output(t.Items[index]))
|
||||||
|
if index != len(t.Items)-1 {
|
||||||
|
// Always write a newline to separate us from the next item
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
|
||||||
|
// Need to determine if we're going to separate the next item
|
||||||
|
// with a blank line. The logic here is simple, though there
|
||||||
|
// are a few conditions:
|
||||||
|
//
|
||||||
|
// 1. The next object is more than one line away anyways,
|
||||||
|
// so we need an empty line.
|
||||||
|
//
|
||||||
|
// 2. The next object is not a "single line" object, so
|
||||||
|
// we need an empty line.
|
||||||
|
//
|
||||||
|
// 3. This current object is not a single line object,
|
||||||
|
// so we need an empty line.
|
||||||
|
current := t.Items[index]
|
||||||
|
next := t.Items[index+1]
|
||||||
|
if next.Pos().Line != t.Items[index].Pos().Line+1 ||
|
||||||
|
!p.isSingleLineObject(next) ||
|
||||||
|
!p.isSingleLineObject(current) {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
case *ast.ObjectKey:
|
||||||
|
buf.WriteString(t.Token.Text)
|
||||||
|
case *ast.ObjectItem:
|
||||||
|
p.prev = t.Pos()
|
||||||
|
buf.Write(p.objectItem(t))
|
||||||
|
case *ast.LiteralType:
|
||||||
|
buf.Write(p.literalType(t))
|
||||||
|
case *ast.ListType:
|
||||||
|
buf.Write(p.list(t))
|
||||||
|
case *ast.ObjectType:
|
||||||
|
buf.Write(p.objectType(t))
|
||||||
|
default:
|
||||||
|
fmt.Printf(" unknown type: %T\n", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *printer) literalType(lit *ast.LiteralType) []byte {
|
||||||
|
result := []byte(lit.Token.Text)
|
||||||
|
switch lit.Token.Type {
|
||||||
|
case token.HEREDOC:
|
||||||
|
// Clear the trailing newline from heredocs
|
||||||
|
if result[len(result)-1] == '\n' {
|
||||||
|
result = result[:len(result)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Poison lines 2+ so that we don't indent them
|
||||||
|
result = p.heredocIndent(result)
|
||||||
|
case token.STRING:
|
||||||
|
// If this is a multiline string, poison lines 2+ so we don't
|
||||||
|
// indent them.
|
||||||
|
if bytes.IndexRune(result, '\n') >= 0 {
|
||||||
|
result = p.heredocIndent(result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectItem returns the printable HCL form of an object item. An object type
|
||||||
|
// starts with one/multiple keys and has a value. The value might be of any
|
||||||
|
// type.
|
||||||
|
func (p *printer) objectItem(o *ast.ObjectItem) []byte {
|
||||||
|
defer un(trace(p, fmt.Sprintf("ObjectItem: %s", o.Keys[0].Token.Text)))
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
if o.LeadComment != nil {
|
||||||
|
for _, comment := range o.LeadComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If key and val are on different lines, treat line comments like lead comments.
|
||||||
|
if o.LineComment != nil && o.Val.Pos().Line != o.Keys[0].Pos().Line {
|
||||||
|
for _, comment := range o.LineComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range o.Keys {
|
||||||
|
buf.WriteString(k.Token.Text)
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
|
||||||
|
// reach end of key
|
||||||
|
if o.Assign.IsValid() && i == len(o.Keys)-1 && len(o.Keys) == 1 {
|
||||||
|
buf.WriteString("=")
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Write(p.output(o.Val))
|
||||||
|
|
||||||
|
if o.LineComment != nil && o.Val.Pos().Line == o.Keys[0].Pos().Line {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
for _, comment := range o.LineComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// objectType returns the printable HCL form of an object type. An object type
|
||||||
|
// begins with a brace and ends with a brace.
|
||||||
|
func (p *printer) objectType(o *ast.ObjectType) []byte {
|
||||||
|
defer un(trace(p, "ObjectType"))
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString("{")
|
||||||
|
|
||||||
|
var index int
|
||||||
|
var nextItem token.Pos
|
||||||
|
var commented, newlinePrinted bool
|
||||||
|
for {
|
||||||
|
// Determine the location of the next actual non-comment
|
||||||
|
// item. If we're at the end, the next item is the closing brace
|
||||||
|
if index != len(o.List.Items) {
|
||||||
|
nextItem = o.List.Items[index].Pos()
|
||||||
|
} else {
|
||||||
|
nextItem = o.Rbrace
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through the standalone comments in the file and print out
|
||||||
|
// the comments that we should be for this object item.
|
||||||
|
for _, c := range p.standaloneComments {
|
||||||
|
printed := false
|
||||||
|
var lastCommentPos token.Pos
|
||||||
|
for _, comment := range c.List {
|
||||||
|
// We only care about comments after the previous item
|
||||||
|
// we've printed so that comments are printed in the
|
||||||
|
// correct locations (between two objects for example).
|
||||||
|
// And before the next item.
|
||||||
|
if comment.Pos().After(p.prev) && comment.Pos().Before(nextItem) {
|
||||||
|
// If there are standalone comments and the initial newline has not
|
||||||
|
// been printed yet, do it now.
|
||||||
|
if !newlinePrinted {
|
||||||
|
newlinePrinted = true
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// add newline if it's between other printed nodes
|
||||||
|
if index > 0 {
|
||||||
|
commented = true
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store this position
|
||||||
|
lastCommentPos = comment.Pos()
|
||||||
|
|
||||||
|
// output the comment itself
|
||||||
|
buf.Write(p.indent(p.heredocIndent([]byte(comment.Text))))
|
||||||
|
|
||||||
|
// Set printed to true to note that we printed something
|
||||||
|
printed = true
|
||||||
|
|
||||||
|
/*
|
||||||
|
if index != len(o.List.Items) {
|
||||||
|
buf.WriteByte(newline) // do not print on the end
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stuff to do if we had comments
|
||||||
|
if printed {
|
||||||
|
// Always write a newline
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
|
||||||
|
// If there is another item in the object and our comment
|
||||||
|
// didn't hug it directly, then make sure there is a blank
|
||||||
|
// line separating them.
|
||||||
|
if nextItem != o.Rbrace && nextItem.Line != lastCommentPos.Line+1 {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if index == len(o.List.Items) {
|
||||||
|
p.prev = o.Rbrace
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// At this point we are sure that it's not a totally empty block: print
|
||||||
|
// the initial newline if it hasn't been printed yet by the previous
|
||||||
|
// block about standalone comments.
|
||||||
|
if !newlinePrinted {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
newlinePrinted = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if we have adjacent one liner items. If yes we'll going to align
|
||||||
|
// the comments.
|
||||||
|
var aligned []*ast.ObjectItem
|
||||||
|
for _, item := range o.List.Items[index:] {
|
||||||
|
// we don't group one line lists
|
||||||
|
if len(o.List.Items) == 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// one means a oneliner with out any lead comment
|
||||||
|
// two means a oneliner with lead comment
|
||||||
|
// anything else might be something else
|
||||||
|
cur := lines(string(p.objectItem(item)))
|
||||||
|
if cur > 2 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
curPos := item.Pos()
|
||||||
|
|
||||||
|
nextPos := token.Pos{}
|
||||||
|
if index != len(o.List.Items)-1 {
|
||||||
|
nextPos = o.List.Items[index+1].Pos()
|
||||||
|
}
|
||||||
|
|
||||||
|
prevPos := token.Pos{}
|
||||||
|
if index != 0 {
|
||||||
|
prevPos = o.List.Items[index-1].Pos()
|
||||||
|
}
|
||||||
|
|
||||||
|
// fmt.Println("DEBUG ----------------")
|
||||||
|
// fmt.Printf("prev = %+v prevPos: %s\n", prev, prevPos)
|
||||||
|
// fmt.Printf("cur = %+v curPos: %s\n", cur, curPos)
|
||||||
|
// fmt.Printf("next = %+v nextPos: %s\n", next, nextPos)
|
||||||
|
|
||||||
|
if curPos.Line+1 == nextPos.Line {
|
||||||
|
aligned = append(aligned, item)
|
||||||
|
index++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if curPos.Line-1 == prevPos.Line {
|
||||||
|
aligned = append(aligned, item)
|
||||||
|
index++
|
||||||
|
|
||||||
|
// finish if we have a new line or comment next. This happens
|
||||||
|
// if the next item is not adjacent
|
||||||
|
if curPos.Line+1 != nextPos.Line {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// put newlines if the items are between other non aligned items.
|
||||||
|
// newlines are also added if there is a standalone comment already, so
|
||||||
|
// check it too
|
||||||
|
if !commented && index != len(aligned) {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(aligned) >= 1 {
|
||||||
|
p.prev = aligned[len(aligned)-1].Pos()
|
||||||
|
|
||||||
|
items := p.alignedItems(aligned)
|
||||||
|
buf.Write(p.indent(items))
|
||||||
|
} else {
|
||||||
|
p.prev = o.List.Items[index].Pos()
|
||||||
|
|
||||||
|
buf.Write(p.indent(p.objectItem(o.List.Items[index])))
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("}")
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *printer) alignedItems(items []*ast.ObjectItem) []byte {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
|
||||||
|
// find the longest key and value length, needed for alignment
|
||||||
|
var longestKeyLen int // longest key length
|
||||||
|
var longestValLen int // longest value length
|
||||||
|
for _, item := range items {
|
||||||
|
key := len(item.Keys[0].Token.Text)
|
||||||
|
val := len(p.output(item.Val))
|
||||||
|
|
||||||
|
if key > longestKeyLen {
|
||||||
|
longestKeyLen = key
|
||||||
|
}
|
||||||
|
|
||||||
|
if val > longestValLen {
|
||||||
|
longestValLen = val
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, item := range items {
|
||||||
|
if item.LeadComment != nil {
|
||||||
|
for _, comment := range item.LeadComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range item.Keys {
|
||||||
|
keyLen := len(k.Token.Text)
|
||||||
|
buf.WriteString(k.Token.Text)
|
||||||
|
for i := 0; i < longestKeyLen-keyLen+1; i++ {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
|
||||||
|
// reach end of key
|
||||||
|
if i == len(item.Keys)-1 && len(item.Keys) == 1 {
|
||||||
|
buf.WriteString("=")
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
val := p.output(item.Val)
|
||||||
|
valLen := len(val)
|
||||||
|
buf.Write(val)
|
||||||
|
|
||||||
|
if item.Val.Pos().Line == item.Keys[0].Pos().Line && item.LineComment != nil {
|
||||||
|
for i := 0; i < longestValLen-valLen+1; i++ {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, comment := range item.LineComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// do not print for the last item
|
||||||
|
if i != len(items)-1 {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// list returns the printable HCL form of an list type.
|
||||||
|
func (p *printer) list(l *ast.ListType) []byte {
|
||||||
|
if p.isSingleLineList(l) {
|
||||||
|
return p.singleLineList(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
buf.WriteString("[")
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
|
||||||
|
var longestLine int
|
||||||
|
for _, item := range l.List {
|
||||||
|
// for now we assume that the list only contains literal types
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok {
|
||||||
|
lineLen := len(lit.Token.Text)
|
||||||
|
if lineLen > longestLine {
|
||||||
|
longestLine = lineLen
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
haveEmptyLine := false
|
||||||
|
for i, item := range l.List {
|
||||||
|
// If we have a lead comment, then we want to write that first
|
||||||
|
leadComment := false
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok && lit.LeadComment != nil {
|
||||||
|
leadComment = true
|
||||||
|
|
||||||
|
// Ensure an empty line before every element with a
|
||||||
|
// lead comment (except the first item in a list).
|
||||||
|
if !haveEmptyLine && i != 0 {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, comment := range lit.LeadComment.List {
|
||||||
|
buf.Write(p.indent([]byte(comment.Text)))
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// also indent each line
|
||||||
|
val := p.output(item)
|
||||||
|
curLen := len(val)
|
||||||
|
buf.Write(p.indent(val))
|
||||||
|
|
||||||
|
// if this item is a heredoc, then we output the comma on
|
||||||
|
// the next line. This is the only case this happens.
|
||||||
|
comma := []byte{','}
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
comma = p.indent(comma)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Write(comma)
|
||||||
|
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok && lit.LineComment != nil {
|
||||||
|
// if the next item doesn't have any comments, do not align
|
||||||
|
buf.WriteByte(blank) // align one space
|
||||||
|
for i := 0; i < longestLine-curLen; i++ {
|
||||||
|
buf.WriteByte(blank)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, comment := range lit.LineComment.List {
|
||||||
|
buf.WriteString(comment.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
|
||||||
|
// Ensure an empty line after every element with a
|
||||||
|
// lead comment (except the first item in a list).
|
||||||
|
haveEmptyLine = leadComment && i != len(l.List)-1
|
||||||
|
if haveEmptyLine {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("]")
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSingleLineList returns true if:
|
||||||
|
// * they were previously formatted entirely on one line
|
||||||
|
// * they consist entirely of literals
|
||||||
|
// * there are either no heredoc strings or the list has exactly one element
|
||||||
|
// * there are no line comments
|
||||||
|
func (printer) isSingleLineList(l *ast.ListType) bool {
|
||||||
|
for _, item := range l.List {
|
||||||
|
if item.Pos().Line != l.Lbrack.Line {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
lit, ok := item.(*ast.LiteralType)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if lit.Token.Type == token.HEREDOC && len(l.List) != 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if lit.LineComment != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// singleLineList prints a simple single line list.
|
||||||
|
// For a definition of "simple", see isSingleLineList above.
|
||||||
|
func (p *printer) singleLineList(l *ast.ListType) []byte {
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
|
||||||
|
buf.WriteString("[")
|
||||||
|
for i, item := range l.List {
|
||||||
|
if i != 0 {
|
||||||
|
buf.WriteString(", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output the item itself
|
||||||
|
buf.Write(p.output(item))
|
||||||
|
|
||||||
|
// The heredoc marker needs to be at the end of line.
|
||||||
|
if lit, ok := item.(*ast.LiteralType); ok && lit.Token.Type == token.HEREDOC {
|
||||||
|
buf.WriteByte(newline)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.WriteString("]")
|
||||||
|
return buf.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// indent indents the lines of the given buffer for each non-empty line
|
||||||
|
func (p *printer) indent(buf []byte) []byte {
|
||||||
|
var prefix []byte
|
||||||
|
if p.cfg.SpacesWidth != 0 {
|
||||||
|
for i := 0; i < p.cfg.SpacesWidth; i++ {
|
||||||
|
prefix = append(prefix, blank)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
prefix = []byte{tab}
|
||||||
|
}
|
||||||
|
|
||||||
|
var res []byte
|
||||||
|
bol := true
|
||||||
|
for _, c := range buf {
|
||||||
|
if bol && c != '\n' {
|
||||||
|
res = append(res, prefix...)
|
||||||
|
}
|
||||||
|
|
||||||
|
res = append(res, c)
|
||||||
|
bol = c == '\n'
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// unindent removes all the indentation from the tombstoned lines
|
||||||
|
func (p *printer) unindent(buf []byte) []byte {
|
||||||
|
var res []byte
|
||||||
|
for i := 0; i < len(buf); i++ {
|
||||||
|
skip := len(buf)-i <= len(unindent)
|
||||||
|
if !skip {
|
||||||
|
skip = !bytes.Equal(unindent, buf[i:i+len(unindent)])
|
||||||
|
}
|
||||||
|
if skip {
|
||||||
|
res = append(res, buf[i])
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have a marker. we have to backtrace here and clean out
|
||||||
|
// any whitespace ahead of our tombstone up to a \n
|
||||||
|
for j := len(res) - 1; j >= 0; j-- {
|
||||||
|
if res[j] == '\n' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
res = res[:j]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the entire unindent marker
|
||||||
|
i += len(unindent) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// heredocIndent marks all the 2nd and further lines as unindentable
|
||||||
|
func (p *printer) heredocIndent(buf []byte) []byte {
|
||||||
|
var res []byte
|
||||||
|
bol := false
|
||||||
|
for _, c := range buf {
|
||||||
|
if bol && c != '\n' {
|
||||||
|
res = append(res, unindent...)
|
||||||
|
}
|
||||||
|
res = append(res, c)
|
||||||
|
bol = c == '\n'
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSingleLineObject tells whether the given object item is a single
|
||||||
|
// line object such as "obj {}".
|
||||||
|
//
|
||||||
|
// A single line object:
|
||||||
|
//
|
||||||
|
// * has no lead comments (hence multi-line)
|
||||||
|
// * has no assignment
|
||||||
|
// * has no values in the stanza (within {})
|
||||||
|
//
|
||||||
|
func (p *printer) isSingleLineObject(val *ast.ObjectItem) bool {
|
||||||
|
// If there is a lead comment, can't be one line
|
||||||
|
if val.LeadComment != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is assignment, we always break by line
|
||||||
|
if val.Assign.IsValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If it isn't an object type, then its not a single line object
|
||||||
|
ot, ok := val.Val.(*ast.ObjectType)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the object has no items, it is single line!
|
||||||
|
return len(ot.List.Items) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func lines(txt string) int {
|
||||||
|
endline := 1
|
||||||
|
for i := 0; i < len(txt); i++ {
|
||||||
|
if txt[i] == '\n' {
|
||||||
|
endline++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return endline
|
||||||
|
}
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// Tracing support
|
||||||
|
|
||||||
|
func (p *printer) printTrace(a ...interface{}) {
|
||||||
|
if !p.enableTrace {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
|
||||||
|
const n = len(dots)
|
||||||
|
i := 2 * p.indentTrace
|
||||||
|
for i > n {
|
||||||
|
fmt.Print(dots)
|
||||||
|
i -= n
|
||||||
|
}
|
||||||
|
// i <= n
|
||||||
|
fmt.Print(dots[0:i])
|
||||||
|
fmt.Println(a...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func trace(p *printer, msg string) *printer {
|
||||||
|
p.printTrace(msg, "(")
|
||||||
|
p.indentTrace++
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Usage pattern: defer un(trace(p, "..."))
|
||||||
|
func un(p *printer) {
|
||||||
|
p.indentTrace--
|
||||||
|
p.printTrace(")")
|
||||||
|
}
|
66
vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
generated
vendored
Normal file
66
vendor/github.com/hashicorp/hcl/hcl/printer/printer.go
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
// Package printer implements printing of AST nodes to HCL format.
|
||||||
|
package printer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"text/tabwriter"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/hcl/ast"
|
||||||
|
"github.com/hashicorp/hcl/hcl/parser"
|
||||||
|
)
|
||||||
|
|
||||||
|
var DefaultConfig = Config{
|
||||||
|
SpacesWidth: 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Config node controls the output of Fprint.
|
||||||
|
type Config struct {
|
||||||
|
SpacesWidth int // if set, it will use spaces instead of tabs for alignment
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Config) Fprint(output io.Writer, node ast.Node) error {
|
||||||
|
p := &printer{
|
||||||
|
cfg: *c,
|
||||||
|
comments: make([]*ast.CommentGroup, 0),
|
||||||
|
standaloneComments: make([]*ast.CommentGroup, 0),
|
||||||
|
// enableTrace: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
p.collectComments(node)
|
||||||
|
|
||||||
|
if _, err := output.Write(p.unindent(p.output(node))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// flush tabwriter, if any
|
||||||
|
var err error
|
||||||
|
if tw, _ := output.(*tabwriter.Writer); tw != nil {
|
||||||
|
err = tw.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fprint "pretty-prints" an HCL node to output
|
||||||
|
// It calls Config.Fprint with default settings.
|
||||||
|
func Fprint(output io.Writer, node ast.Node) error {
|
||||||
|
return DefaultConfig.Fprint(output, node)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format formats src HCL and returns the result.
|
||||||
|
func Format(src []byte) ([]byte, error) {
|
||||||
|
node, err := parser.Parse(src)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := DefaultConfig.Fprint(&buf, node); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add trailing newline to result
|
||||||
|
buf.WriteString("\n")
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
21
vendor/github.com/morikuni/aec/LICENSE
generated
vendored
Normal file
21
vendor/github.com/morikuni/aec/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Taihei Morikuni
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
178
vendor/github.com/morikuni/aec/README.md
generated
vendored
Normal file
178
vendor/github.com/morikuni/aec/README.md
generated
vendored
Normal file
|
@ -0,0 +1,178 @@
|
||||||
|
# aec
|
||||||
|
|
||||||
|
[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec)
|
||||||
|
|
||||||
|
Go wrapper for ANSI escape code.
|
||||||
|
|
||||||
|
## Install
|
||||||
|
|
||||||
|
```bash
|
||||||
|
go get github.com/morikuni/aec
|
||||||
|
```
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
ANSI escape codes depend on terminal environment.
|
||||||
|
Some of these features may not work.
|
||||||
|
Check supported Font-Style/Font-Color features with [checkansi](./checkansi).
|
||||||
|
|
||||||
|
[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail.
|
||||||
|
|
||||||
|
### Cursor
|
||||||
|
|
||||||
|
- `Up(n)`
|
||||||
|
- `Down(n)`
|
||||||
|
- `Right(n)`
|
||||||
|
- `Left(n)`
|
||||||
|
- `NextLine(n)`
|
||||||
|
- `PreviousLine(n)`
|
||||||
|
- `Column(col)`
|
||||||
|
- `Position(row, col)`
|
||||||
|
- `Save`
|
||||||
|
- `Restore`
|
||||||
|
- `Hide`
|
||||||
|
- `Show`
|
||||||
|
- `Report`
|
||||||
|
|
||||||
|
### Erase
|
||||||
|
|
||||||
|
- `EraseDisplay(mode)`
|
||||||
|
- `EraseLine(mode)`
|
||||||
|
|
||||||
|
### Scroll
|
||||||
|
|
||||||
|
- `ScrollUp(n)`
|
||||||
|
- `ScrollDown(n)`
|
||||||
|
|
||||||
|
### Font Style
|
||||||
|
|
||||||
|
- `Bold`
|
||||||
|
- `Faint`
|
||||||
|
- `Italic`
|
||||||
|
- `Underline`
|
||||||
|
- `BlinkSlow`
|
||||||
|
- `BlinkRapid`
|
||||||
|
- `Inverse`
|
||||||
|
- `Conceal`
|
||||||
|
- `CrossOut`
|
||||||
|
- `Frame`
|
||||||
|
- `Encircle`
|
||||||
|
- `Overline`
|
||||||
|
|
||||||
|
### Font Color
|
||||||
|
|
||||||
|
Foreground color.
|
||||||
|
|
||||||
|
- `DefaultF`
|
||||||
|
- `BlackF`
|
||||||
|
- `RedF`
|
||||||
|
- `GreenF`
|
||||||
|
- `YellowF`
|
||||||
|
- `BlueF`
|
||||||
|
- `MagentaF`
|
||||||
|
- `CyanF`
|
||||||
|
- `WhiteF`
|
||||||
|
- `LightBlackF`
|
||||||
|
- `LightRedF`
|
||||||
|
- `LightGreenF`
|
||||||
|
- `LightYellowF`
|
||||||
|
- `LightBlueF`
|
||||||
|
- `LightMagentaF`
|
||||||
|
- `LightCyanF`
|
||||||
|
- `LightWhiteF`
|
||||||
|
- `Color3BitF(color)`
|
||||||
|
- `Color8BitF(color)`
|
||||||
|
- `FullColorF(r, g, b)`
|
||||||
|
|
||||||
|
Background color.
|
||||||
|
|
||||||
|
- `DefaultB`
|
||||||
|
- `BlackB`
|
||||||
|
- `RedB`
|
||||||
|
- `GreenB`
|
||||||
|
- `YellowB`
|
||||||
|
- `BlueB`
|
||||||
|
- `MagentaB`
|
||||||
|
- `CyanB`
|
||||||
|
- `WhiteB`
|
||||||
|
- `LightBlackB`
|
||||||
|
- `LightRedB`
|
||||||
|
- `LightGreenB`
|
||||||
|
- `LightYellowB`
|
||||||
|
- `LightBlueB`
|
||||||
|
- `LightMagentaB`
|
||||||
|
- `LightCyanB`
|
||||||
|
- `LightWhiteB`
|
||||||
|
- `Color3BitB(color)`
|
||||||
|
- `Color8BitB(color)`
|
||||||
|
- `FullColorB(r, g, b)`
|
||||||
|
|
||||||
|
### Color Converter
|
||||||
|
|
||||||
|
24bit RGB color to ANSI color.
|
||||||
|
|
||||||
|
- `NewRGB3Bit(r, g, b)`
|
||||||
|
- `NewRGB8Bit(r, g, b)`
|
||||||
|
|
||||||
|
### Builder
|
||||||
|
|
||||||
|
To mix these features.
|
||||||
|
|
||||||
|
```go
|
||||||
|
custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI
|
||||||
|
custom.Apply("Hello World")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI`
|
||||||
|
2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))`
|
||||||
|
|
||||||
|
`aec.Reset` should be added when using font style or font color features.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
Simple progressbar.
|
||||||
|
|
||||||
|
![sample](./sample.gif)
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/morikuni/aec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
const n = 20
|
||||||
|
builder := aec.EmptyBuilder
|
||||||
|
|
||||||
|
up2 := aec.Up(2)
|
||||||
|
col := aec.Column(n + 2)
|
||||||
|
bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64))
|
||||||
|
label := builder.LightRedF().Underline().With(col).Right(1).ANSI
|
||||||
|
|
||||||
|
// for up2
|
||||||
|
fmt.Println()
|
||||||
|
fmt.Println()
|
||||||
|
|
||||||
|
for i := 0; i <= n; i++ {
|
||||||
|
fmt.Print(up2)
|
||||||
|
fmt.Println(label.Apply(fmt.Sprint(i, "/", n)))
|
||||||
|
fmt.Print("[")
|
||||||
|
fmt.Print(bar.Apply(strings.Repeat("=", i)))
|
||||||
|
fmt.Println(col.Apply("]"))
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
[MIT](./LICENSE)
|
||||||
|
|
||||||
|
|
137
vendor/github.com/morikuni/aec/aec.go
generated
vendored
Normal file
137
vendor/github.com/morikuni/aec/aec.go
generated
vendored
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
package aec
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// EraseMode is listed in a variable EraseModes.
|
||||||
|
type EraseMode uint
|
||||||
|
|
||||||
|
var (
|
||||||
|
// EraseModes is a list of EraseMode.
|
||||||
|
EraseModes struct {
|
||||||
|
// All erase all.
|
||||||
|
All EraseMode
|
||||||
|
|
||||||
|
// Head erase to head.
|
||||||
|
Head EraseMode
|
||||||
|
|
||||||
|
// Tail erase to tail.
|
||||||
|
Tail EraseMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save saves the cursor position.
|
||||||
|
Save ANSI
|
||||||
|
|
||||||
|
// Restore restores the cursor position.
|
||||||
|
Restore ANSI
|
||||||
|
|
||||||
|
// Hide hides the cursor.
|
||||||
|
Hide ANSI
|
||||||
|
|
||||||
|
// Show shows the cursor.
|
||||||
|
Show ANSI
|
||||||
|
|
||||||
|
// Report reports the cursor position.
|
||||||
|
Report ANSI
|
||||||
|
)
|
||||||
|
|
||||||
|
// Up moves up the cursor.
|
||||||
|
func Up(n uint) ANSI {
|
||||||
|
if n == 0 {
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dA", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Down moves down the cursor.
|
||||||
|
func Down(n uint) ANSI {
|
||||||
|
if n == 0 {
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dB", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Right moves right the cursor.
|
||||||
|
func Right(n uint) ANSI {
|
||||||
|
if n == 0 {
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dC", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Left moves left the cursor.
|
||||||
|
func Left(n uint) ANSI {
|
||||||
|
if n == 0 {
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dD", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextLine moves down the cursor to head of a line.
|
||||||
|
func NextLine(n uint) ANSI {
|
||||||
|
if n == 0 {
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dE", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreviousLine moves up the cursor to head of a line.
|
||||||
|
func PreviousLine(n uint) ANSI {
|
||||||
|
if n == 0 {
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dF", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Column set the cursor position to a given column.
|
||||||
|
func Column(col uint) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dG", col))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Position set the cursor position to a given absolute position.
|
||||||
|
func Position(row, col uint) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EraseDisplay erases display by given EraseMode.
|
||||||
|
func EraseDisplay(m EraseMode) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dJ", m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EraseLine erases lines by given EraseMode.
|
||||||
|
func EraseLine(m EraseMode) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dK", m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrollUp scrolls up the page.
|
||||||
|
func ScrollUp(n int) ANSI {
|
||||||
|
if n == 0 {
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dS", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrollDown scrolls down the page.
|
||||||
|
func ScrollDown(n int) ANSI {
|
||||||
|
if n == 0 {
|
||||||
|
return empty
|
||||||
|
}
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dT", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
EraseModes = struct {
|
||||||
|
All EraseMode
|
||||||
|
Head EraseMode
|
||||||
|
Tail EraseMode
|
||||||
|
}{
|
||||||
|
Tail: 0,
|
||||||
|
Head: 1,
|
||||||
|
All: 2,
|
||||||
|
}
|
||||||
|
|
||||||
|
Save = newAnsi(esc + "s")
|
||||||
|
Restore = newAnsi(esc + "u")
|
||||||
|
Hide = newAnsi(esc + "?25l")
|
||||||
|
Show = newAnsi(esc + "?25h")
|
||||||
|
Report = newAnsi(esc + "6n")
|
||||||
|
}
|
59
vendor/github.com/morikuni/aec/ansi.go
generated
vendored
Normal file
59
vendor/github.com/morikuni/aec/ansi.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
||||||
|
package aec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const esc = "\x1b["
|
||||||
|
|
||||||
|
// Reset resets SGR effect.
|
||||||
|
const Reset string = "\x1b[0m"
|
||||||
|
|
||||||
|
var empty = newAnsi("")
|
||||||
|
|
||||||
|
// ANSI represents ANSI escape code.
|
||||||
|
type ANSI interface {
|
||||||
|
fmt.Stringer
|
||||||
|
|
||||||
|
// With adapts given ANSIs.
|
||||||
|
With(...ANSI) ANSI
|
||||||
|
|
||||||
|
// Apply wraps given string in ANSI.
|
||||||
|
Apply(string) string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ansiImpl string
|
||||||
|
|
||||||
|
func newAnsi(s string) *ansiImpl {
|
||||||
|
r := ansiImpl(s)
|
||||||
|
return &r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ansiImpl) With(ansi ...ANSI) ANSI {
|
||||||
|
return concat(append([]ANSI{a}, ansi...))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ansiImpl) Apply(s string) string {
|
||||||
|
return a.String() + s + Reset
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ansiImpl) String() string {
|
||||||
|
return string(*a)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply wraps given string in ANSIs.
|
||||||
|
func Apply(s string, ansi ...ANSI) string {
|
||||||
|
if len(ansi) == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return concat(ansi).Apply(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func concat(ansi []ANSI) ANSI {
|
||||||
|
strs := make([]string, 0, len(ansi))
|
||||||
|
for _, p := range ansi {
|
||||||
|
strs = append(strs, p.String())
|
||||||
|
}
|
||||||
|
return newAnsi(strings.Join(strs, ""))
|
||||||
|
}
|
388
vendor/github.com/morikuni/aec/builder.go
generated
vendored
Normal file
388
vendor/github.com/morikuni/aec/builder.go
generated
vendored
Normal file
|
@ -0,0 +1,388 @@
|
||||||
|
package aec
|
||||||
|
|
||||||
|
// Builder is a lightweight syntax to construct customized ANSI.
|
||||||
|
type Builder struct {
|
||||||
|
ANSI ANSI
|
||||||
|
}
|
||||||
|
|
||||||
|
// EmptyBuilder is an initialized Builder.
|
||||||
|
var EmptyBuilder *Builder
|
||||||
|
|
||||||
|
// NewBuilder creates a Builder from existing ANSI.
|
||||||
|
func NewBuilder(a ...ANSI) *Builder {
|
||||||
|
return &Builder{concat(a)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// With is a syntax for With.
|
||||||
|
func (builder *Builder) With(a ...ANSI) *Builder {
|
||||||
|
return NewBuilder(builder.ANSI.With(a...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Up is a syntax for Up.
|
||||||
|
func (builder *Builder) Up(n uint) *Builder {
|
||||||
|
return builder.With(Up(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Down is a syntax for Down.
|
||||||
|
func (builder *Builder) Down(n uint) *Builder {
|
||||||
|
return builder.With(Down(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Right is a syntax for Right.
|
||||||
|
func (builder *Builder) Right(n uint) *Builder {
|
||||||
|
return builder.With(Right(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Left is a syntax for Left.
|
||||||
|
func (builder *Builder) Left(n uint) *Builder {
|
||||||
|
return builder.With(Left(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextLine is a syntax for NextLine.
|
||||||
|
func (builder *Builder) NextLine(n uint) *Builder {
|
||||||
|
return builder.With(NextLine(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PreviousLine is a syntax for PreviousLine.
|
||||||
|
func (builder *Builder) PreviousLine(n uint) *Builder {
|
||||||
|
return builder.With(PreviousLine(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Column is a syntax for Column.
|
||||||
|
func (builder *Builder) Column(col uint) *Builder {
|
||||||
|
return builder.With(Column(col))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Position is a syntax for Position.
|
||||||
|
func (builder *Builder) Position(row, col uint) *Builder {
|
||||||
|
return builder.With(Position(row, col))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EraseDisplay is a syntax for EraseDisplay.
|
||||||
|
func (builder *Builder) EraseDisplay(m EraseMode) *Builder {
|
||||||
|
return builder.With(EraseDisplay(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// EraseLine is a syntax for EraseLine.
|
||||||
|
func (builder *Builder) EraseLine(m EraseMode) *Builder {
|
||||||
|
return builder.With(EraseLine(m))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrollUp is a syntax for ScrollUp.
|
||||||
|
func (builder *Builder) ScrollUp(n int) *Builder {
|
||||||
|
return builder.With(ScrollUp(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScrollDown is a syntax for ScrollDown.
|
||||||
|
func (builder *Builder) ScrollDown(n int) *Builder {
|
||||||
|
return builder.With(ScrollDown(n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save is a syntax for Save.
|
||||||
|
func (builder *Builder) Save() *Builder {
|
||||||
|
return builder.With(Save)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore is a syntax for Restore.
|
||||||
|
func (builder *Builder) Restore() *Builder {
|
||||||
|
return builder.With(Restore)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hide is a syntax for Hide.
|
||||||
|
func (builder *Builder) Hide() *Builder {
|
||||||
|
return builder.With(Hide)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show is a syntax for Show.
|
||||||
|
func (builder *Builder) Show() *Builder {
|
||||||
|
return builder.With(Show)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Report is a syntax for Report.
|
||||||
|
func (builder *Builder) Report() *Builder {
|
||||||
|
return builder.With(Report)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bold is a syntax for Bold.
|
||||||
|
func (builder *Builder) Bold() *Builder {
|
||||||
|
return builder.With(Bold)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Faint is a syntax for Faint.
|
||||||
|
func (builder *Builder) Faint() *Builder {
|
||||||
|
return builder.With(Faint)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Italic is a syntax for Italic.
|
||||||
|
func (builder *Builder) Italic() *Builder {
|
||||||
|
return builder.With(Italic)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Underline is a syntax for Underline.
|
||||||
|
func (builder *Builder) Underline() *Builder {
|
||||||
|
return builder.With(Underline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlinkSlow is a syntax for BlinkSlow.
|
||||||
|
func (builder *Builder) BlinkSlow() *Builder {
|
||||||
|
return builder.With(BlinkSlow)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlinkRapid is a syntax for BlinkRapid.
|
||||||
|
func (builder *Builder) BlinkRapid() *Builder {
|
||||||
|
return builder.With(BlinkRapid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inverse is a syntax for Inverse.
|
||||||
|
func (builder *Builder) Inverse() *Builder {
|
||||||
|
return builder.With(Inverse)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conceal is a syntax for Conceal.
|
||||||
|
func (builder *Builder) Conceal() *Builder {
|
||||||
|
return builder.With(Conceal)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CrossOut is a syntax for CrossOut.
|
||||||
|
func (builder *Builder) CrossOut() *Builder {
|
||||||
|
return builder.With(CrossOut)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlackF is a syntax for BlackF.
|
||||||
|
func (builder *Builder) BlackF() *Builder {
|
||||||
|
return builder.With(BlackF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedF is a syntax for RedF.
|
||||||
|
func (builder *Builder) RedF() *Builder {
|
||||||
|
return builder.With(RedF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GreenF is a syntax for GreenF.
|
||||||
|
func (builder *Builder) GreenF() *Builder {
|
||||||
|
return builder.With(GreenF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// YellowF is a syntax for YellowF.
|
||||||
|
func (builder *Builder) YellowF() *Builder {
|
||||||
|
return builder.With(YellowF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlueF is a syntax for BlueF.
|
||||||
|
func (builder *Builder) BlueF() *Builder {
|
||||||
|
return builder.With(BlueF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MagentaF is a syntax for MagentaF.
|
||||||
|
func (builder *Builder) MagentaF() *Builder {
|
||||||
|
return builder.With(MagentaF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CyanF is a syntax for CyanF.
|
||||||
|
func (builder *Builder) CyanF() *Builder {
|
||||||
|
return builder.With(CyanF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhiteF is a syntax for WhiteF.
|
||||||
|
func (builder *Builder) WhiteF() *Builder {
|
||||||
|
return builder.With(WhiteF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultF is a syntax for DefaultF.
|
||||||
|
func (builder *Builder) DefaultF() *Builder {
|
||||||
|
return builder.With(DefaultF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlackB is a syntax for BlackB.
|
||||||
|
func (builder *Builder) BlackB() *Builder {
|
||||||
|
return builder.With(BlackB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedB is a syntax for RedB.
|
||||||
|
func (builder *Builder) RedB() *Builder {
|
||||||
|
return builder.With(RedB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GreenB is a syntax for GreenB.
|
||||||
|
func (builder *Builder) GreenB() *Builder {
|
||||||
|
return builder.With(GreenB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// YellowB is a syntax for YellowB.
|
||||||
|
func (builder *Builder) YellowB() *Builder {
|
||||||
|
return builder.With(YellowB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlueB is a syntax for BlueB.
|
||||||
|
func (builder *Builder) BlueB() *Builder {
|
||||||
|
return builder.With(BlueB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MagentaB is a syntax for MagentaB.
|
||||||
|
func (builder *Builder) MagentaB() *Builder {
|
||||||
|
return builder.With(MagentaB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CyanB is a syntax for CyanB.
|
||||||
|
func (builder *Builder) CyanB() *Builder {
|
||||||
|
return builder.With(CyanB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WhiteB is a syntax for WhiteB.
|
||||||
|
func (builder *Builder) WhiteB() *Builder {
|
||||||
|
return builder.With(WhiteB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultB is a syntax for DefaultB.
|
||||||
|
func (builder *Builder) DefaultB() *Builder {
|
||||||
|
return builder.With(DefaultB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Frame is a syntax for Frame.
|
||||||
|
func (builder *Builder) Frame() *Builder {
|
||||||
|
return builder.With(Frame)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encircle is a syntax for Encircle.
|
||||||
|
func (builder *Builder) Encircle() *Builder {
|
||||||
|
return builder.With(Encircle)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overline is a syntax for Overline.
|
||||||
|
func (builder *Builder) Overline() *Builder {
|
||||||
|
return builder.With(Overline)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightBlackF is a syntax for LightBlueF.
|
||||||
|
func (builder *Builder) LightBlackF() *Builder {
|
||||||
|
return builder.With(LightBlackF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightRedF is a syntax for LightRedF.
|
||||||
|
func (builder *Builder) LightRedF() *Builder {
|
||||||
|
return builder.With(LightRedF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightGreenF is a syntax for LightGreenF.
|
||||||
|
func (builder *Builder) LightGreenF() *Builder {
|
||||||
|
return builder.With(LightGreenF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightYellowF is a syntax for LightYellowF.
|
||||||
|
func (builder *Builder) LightYellowF() *Builder {
|
||||||
|
return builder.With(LightYellowF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightBlueF is a syntax for LightBlueF.
|
||||||
|
func (builder *Builder) LightBlueF() *Builder {
|
||||||
|
return builder.With(LightBlueF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightMagentaF is a syntax for LightMagentaF.
|
||||||
|
func (builder *Builder) LightMagentaF() *Builder {
|
||||||
|
return builder.With(LightMagentaF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightCyanF is a syntax for LightCyanF.
|
||||||
|
func (builder *Builder) LightCyanF() *Builder {
|
||||||
|
return builder.With(LightCyanF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightWhiteF is a syntax for LightWhiteF.
|
||||||
|
func (builder *Builder) LightWhiteF() *Builder {
|
||||||
|
return builder.With(LightWhiteF)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightBlackB is a syntax for LightBlackB.
|
||||||
|
func (builder *Builder) LightBlackB() *Builder {
|
||||||
|
return builder.With(LightBlackB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightRedB is a syntax for LightRedB.
|
||||||
|
func (builder *Builder) LightRedB() *Builder {
|
||||||
|
return builder.With(LightRedB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightGreenB is a syntax for LightGreenB.
|
||||||
|
func (builder *Builder) LightGreenB() *Builder {
|
||||||
|
return builder.With(LightGreenB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightYellowB is a syntax for LightYellowB.
|
||||||
|
func (builder *Builder) LightYellowB() *Builder {
|
||||||
|
return builder.With(LightYellowB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightBlueB is a syntax for LightBlueB.
|
||||||
|
func (builder *Builder) LightBlueB() *Builder {
|
||||||
|
return builder.With(LightBlueB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightMagentaB is a syntax for LightMagentaB.
|
||||||
|
func (builder *Builder) LightMagentaB() *Builder {
|
||||||
|
return builder.With(LightMagentaB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightCyanB is a syntax for LightCyanB.
|
||||||
|
func (builder *Builder) LightCyanB() *Builder {
|
||||||
|
return builder.With(LightCyanB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LightWhiteB is a syntax for LightWhiteB.
|
||||||
|
func (builder *Builder) LightWhiteB() *Builder {
|
||||||
|
return builder.With(LightWhiteB)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Color3BitF is a syntax for Color3BitF.
|
||||||
|
func (builder *Builder) Color3BitF(c RGB3Bit) *Builder {
|
||||||
|
return builder.With(Color3BitF(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Color3BitB is a syntax for Color3BitB.
|
||||||
|
func (builder *Builder) Color3BitB(c RGB3Bit) *Builder {
|
||||||
|
return builder.With(Color3BitB(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Color8BitF is a syntax for Color8BitF.
|
||||||
|
func (builder *Builder) Color8BitF(c RGB8Bit) *Builder {
|
||||||
|
return builder.With(Color8BitF(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Color8BitB is a syntax for Color8BitB.
|
||||||
|
func (builder *Builder) Color8BitB(c RGB8Bit) *Builder {
|
||||||
|
return builder.With(Color8BitB(c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FullColorF is a syntax for FullColorF.
|
||||||
|
func (builder *Builder) FullColorF(r, g, b uint8) *Builder {
|
||||||
|
return builder.With(FullColorF(r, g, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FullColorB is a syntax for FullColorB.
|
||||||
|
func (builder *Builder) FullColorB(r, g, b uint8) *Builder {
|
||||||
|
return builder.With(FullColorB(r, g, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit.
|
||||||
|
func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder {
|
||||||
|
return builder.Color3BitF(NewRGB3Bit(r, g, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit.
|
||||||
|
func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder {
|
||||||
|
return builder.Color3BitB(NewRGB3Bit(r, g, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit.
|
||||||
|
func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder {
|
||||||
|
return builder.Color8BitF(NewRGB8Bit(r, g, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit.
|
||||||
|
func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder {
|
||||||
|
return builder.Color8BitB(NewRGB8Bit(r, g, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
EmptyBuilder = &Builder{empty}
|
||||||
|
}
|
BIN
vendor/github.com/morikuni/aec/sample.gif
(Stored with Git LFS)
generated
vendored
Normal file
BIN
vendor/github.com/morikuni/aec/sample.gif
(Stored with Git LFS)
generated
vendored
Normal file
Binary file not shown.
202
vendor/github.com/morikuni/aec/sgr.go
generated
vendored
Normal file
202
vendor/github.com/morikuni/aec/sgr.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
package aec
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RGB3Bit is a 3bit RGB color.
|
||||||
|
type RGB3Bit uint8
|
||||||
|
|
||||||
|
// RGB8Bit is a 8bit RGB color.
|
||||||
|
type RGB8Bit uint8
|
||||||
|
|
||||||
|
func newSGR(n uint) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dm", n))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRGB3Bit create a RGB3Bit from given RGB.
|
||||||
|
func NewRGB3Bit(r, g, b uint8) RGB3Bit {
|
||||||
|
return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRGB8Bit create a RGB8Bit from given RGB.
|
||||||
|
func NewRGB8Bit(r, g, b uint8) RGB8Bit {
|
||||||
|
return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Color3BitF set the foreground color of text.
|
||||||
|
func Color3BitF(c RGB3Bit) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dm", c+30))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Color3BitB set the background color of text.
|
||||||
|
func Color3BitB(c RGB3Bit) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"%dm", c+40))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Color8BitF set the foreground color of text.
|
||||||
|
func Color8BitF(c RGB8Bit) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Color8BitB set the background color of text.
|
||||||
|
func Color8BitB(c RGB8Bit) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FullColorF set the foreground color of text.
|
||||||
|
func FullColorF(r, g, b uint8) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FullColorB set the foreground color of text.
|
||||||
|
func FullColorB(r, g, b uint8) ANSI {
|
||||||
|
return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Style
|
||||||
|
var (
|
||||||
|
// Bold set the text style to bold or increased intensity.
|
||||||
|
Bold ANSI
|
||||||
|
|
||||||
|
// Faint set the text style to faint.
|
||||||
|
Faint ANSI
|
||||||
|
|
||||||
|
// Italic set the text style to italic.
|
||||||
|
Italic ANSI
|
||||||
|
|
||||||
|
// Underline set the text style to underline.
|
||||||
|
Underline ANSI
|
||||||
|
|
||||||
|
// BlinkSlow set the text style to slow blink.
|
||||||
|
BlinkSlow ANSI
|
||||||
|
|
||||||
|
// BlinkRapid set the text style to rapid blink.
|
||||||
|
BlinkRapid ANSI
|
||||||
|
|
||||||
|
// Inverse swap the foreground color and background color.
|
||||||
|
Inverse ANSI
|
||||||
|
|
||||||
|
// Conceal set the text style to conceal.
|
||||||
|
Conceal ANSI
|
||||||
|
|
||||||
|
// CrossOut set the text style to crossed out.
|
||||||
|
CrossOut ANSI
|
||||||
|
|
||||||
|
// Frame set the text style to framed.
|
||||||
|
Frame ANSI
|
||||||
|
|
||||||
|
// Encircle set the text style to encircled.
|
||||||
|
Encircle ANSI
|
||||||
|
|
||||||
|
// Overline set the text style to overlined.
|
||||||
|
Overline ANSI
|
||||||
|
)
|
||||||
|
|
||||||
|
// Foreground color of text.
|
||||||
|
var (
|
||||||
|
// DefaultF is the default color of foreground.
|
||||||
|
DefaultF ANSI
|
||||||
|
|
||||||
|
// Normal color
|
||||||
|
BlackF ANSI
|
||||||
|
RedF ANSI
|
||||||
|
GreenF ANSI
|
||||||
|
YellowF ANSI
|
||||||
|
BlueF ANSI
|
||||||
|
MagentaF ANSI
|
||||||
|
CyanF ANSI
|
||||||
|
WhiteF ANSI
|
||||||
|
|
||||||
|
// Light color
|
||||||
|
LightBlackF ANSI
|
||||||
|
LightRedF ANSI
|
||||||
|
LightGreenF ANSI
|
||||||
|
LightYellowF ANSI
|
||||||
|
LightBlueF ANSI
|
||||||
|
LightMagentaF ANSI
|
||||||
|
LightCyanF ANSI
|
||||||
|
LightWhiteF ANSI
|
||||||
|
)
|
||||||
|
|
||||||
|
// Background color of text.
|
||||||
|
var (
|
||||||
|
// DefaultB is the default color of background.
|
||||||
|
DefaultB ANSI
|
||||||
|
|
||||||
|
// Normal color
|
||||||
|
BlackB ANSI
|
||||||
|
RedB ANSI
|
||||||
|
GreenB ANSI
|
||||||
|
YellowB ANSI
|
||||||
|
BlueB ANSI
|
||||||
|
MagentaB ANSI
|
||||||
|
CyanB ANSI
|
||||||
|
WhiteB ANSI
|
||||||
|
|
||||||
|
// Light color
|
||||||
|
LightBlackB ANSI
|
||||||
|
LightRedB ANSI
|
||||||
|
LightGreenB ANSI
|
||||||
|
LightYellowB ANSI
|
||||||
|
LightBlueB ANSI
|
||||||
|
LightMagentaB ANSI
|
||||||
|
LightCyanB ANSI
|
||||||
|
LightWhiteB ANSI
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
Bold = newSGR(1)
|
||||||
|
Faint = newSGR(2)
|
||||||
|
Italic = newSGR(3)
|
||||||
|
Underline = newSGR(4)
|
||||||
|
BlinkSlow = newSGR(5)
|
||||||
|
BlinkRapid = newSGR(6)
|
||||||
|
Inverse = newSGR(7)
|
||||||
|
Conceal = newSGR(8)
|
||||||
|
CrossOut = newSGR(9)
|
||||||
|
|
||||||
|
BlackF = newSGR(30)
|
||||||
|
RedF = newSGR(31)
|
||||||
|
GreenF = newSGR(32)
|
||||||
|
YellowF = newSGR(33)
|
||||||
|
BlueF = newSGR(34)
|
||||||
|
MagentaF = newSGR(35)
|
||||||
|
CyanF = newSGR(36)
|
||||||
|
WhiteF = newSGR(37)
|
||||||
|
|
||||||
|
DefaultF = newSGR(39)
|
||||||
|
|
||||||
|
BlackB = newSGR(40)
|
||||||
|
RedB = newSGR(41)
|
||||||
|
GreenB = newSGR(42)
|
||||||
|
YellowB = newSGR(43)
|
||||||
|
BlueB = newSGR(44)
|
||||||
|
MagentaB = newSGR(45)
|
||||||
|
CyanB = newSGR(46)
|
||||||
|
WhiteB = newSGR(47)
|
||||||
|
|
||||||
|
DefaultB = newSGR(49)
|
||||||
|
|
||||||
|
Frame = newSGR(51)
|
||||||
|
Encircle = newSGR(52)
|
||||||
|
Overline = newSGR(53)
|
||||||
|
|
||||||
|
LightBlackF = newSGR(90)
|
||||||
|
LightRedF = newSGR(91)
|
||||||
|
LightGreenF = newSGR(92)
|
||||||
|
LightYellowF = newSGR(93)
|
||||||
|
LightBlueF = newSGR(94)
|
||||||
|
LightMagentaF = newSGR(95)
|
||||||
|
LightCyanF = newSGR(96)
|
||||||
|
LightWhiteF = newSGR(97)
|
||||||
|
|
||||||
|
LightBlackB = newSGR(100)
|
||||||
|
LightRedB = newSGR(101)
|
||||||
|
LightGreenB = newSGR(102)
|
||||||
|
LightYellowB = newSGR(103)
|
||||||
|
LightBlueB = newSGR(104)
|
||||||
|
LightMagentaB = newSGR(105)
|
||||||
|
LightCyanB = newSGR(106)
|
||||||
|
LightWhiteB = newSGR(107)
|
||||||
|
}
|
78
vendor/vendor.json
vendored
78
vendor/vendor.json
vendored
|
@ -99,9 +99,9 @@
|
||||||
{"path":"github.com/coreos/pkg/dlopen","checksumSHA1":"O8c/VKtW34XPJNNlyeb/im8vWSI=","revision":"399ea9e2e55f791b6e3d920860dbecb99c3692f0","revisionTime":"2018-09-28T19:01:04Z"},
|
{"path":"github.com/coreos/pkg/dlopen","checksumSHA1":"O8c/VKtW34XPJNNlyeb/im8vWSI=","revision":"399ea9e2e55f791b6e3d920860dbecb99c3692f0","revisionTime":"2018-09-28T19:01:04Z"},
|
||||||
{"path":"github.com/cyphar/filepath-securejoin","checksumSHA1":"Ov+ja3hmONKxbhq9HAgK/vHSi/0=","revision":"7efe413b52e1bceaaee7efafebe396f9d648f258","revisionTime":"2019-02-05T14:40:30Z"},
|
{"path":"github.com/cyphar/filepath-securejoin","checksumSHA1":"Ov+ja3hmONKxbhq9HAgK/vHSi/0=","revision":"7efe413b52e1bceaaee7efafebe396f9d648f258","revisionTime":"2019-02-05T14:40:30Z"},
|
||||||
{"path":"github.com/davecgh/go-spew/spew","checksumSHA1":"mrz/kicZiUaHxkyfvC/DyQcr8Do=","revision":"ecdeabc65495df2dec95d7c4a4c3e021903035e5","revisionTime":"2017-10-02T20:02:53Z"},
|
{"path":"github.com/davecgh/go-spew/spew","checksumSHA1":"mrz/kicZiUaHxkyfvC/DyQcr8Do=","revision":"ecdeabc65495df2dec95d7c4a4c3e021903035e5","revisionTime":"2017-10-02T20:02:53Z"},
|
||||||
{"path":"github.com/docker/cli/cli/config/configfile","checksumSHA1":"wf9Rn3a9cPag5B9Dd+qHHEink+I=","revision":"67f9a3912cf944cf71b31f3fc14e3f2a18d95802","revisionTime":"2018-08-14T14:54:37Z","version":"v18.06.1-ce","versionExact":"v18.06.1-ce"},
|
{"path":"github.com/docker/cli/cli/config/configfile","checksumSHA1":"7B3J+qGjLaXOfpdpBqP2jlye8EA=","revision":"eb310fca49568dccd87c6136f774ef6fff2a1b51","revisionTime":"2020-03-03T21:59:52Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/cli/cli/config/credentials","checksumSHA1":"fJpuGdxgATGNHm+INOPNVIhBnj0=","revision":"deb84a9e4e10b590e6de6aa6081532c87a5a2cfe","revisionTime":"2018-08-29T13:09:58Z"},
|
{"path":"github.com/docker/cli/cli/config/credentials","checksumSHA1":"2k151rs1rXrCrv3TK6GugXAd2h0=","revision":"eb310fca49568dccd87c6136f774ef6fff2a1b51","revisionTime":"2020-03-03T21:59:52Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/cli/opts","checksumSHA1":"+yq5Rc1QTapDrr151x0m5ANZZeY=","revision":"67f9a3912cf944cf71b31f3fc14e3f2a18d95802","revisionTime":"2018-08-14T14:54:37Z","version":"v18.06.1-ce","versionExact":"v18.06.1-ce"},
|
{"path":"github.com/docker/cli/cli/config/types","checksumSHA1":"zNiAwPvs7/RpI1cIw25lNhqLAsI=","revision":"eb310fca49568dccd87c6136f774ef6fff2a1b51","revisionTime":"2020-03-03T21:59:52Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/distribution","checksumSHA1":"ae06MP/1OVwQ/s/PsEp9wxfnBXM=","revision":"b12bd4004afc203f1cbd2072317c8fda30b89710","revisionTime":"2018-08-28T23:03:05Z"},
|
{"path":"github.com/docker/distribution","checksumSHA1":"ae06MP/1OVwQ/s/PsEp9wxfnBXM=","revision":"b12bd4004afc203f1cbd2072317c8fda30b89710","revisionTime":"2018-08-28T23:03:05Z"},
|
||||||
{"path":"github.com/docker/distribution/digestset","checksumSHA1":"Gj+xR1VgFKKmFXYOJMnAczC3Znk=","revision":"83389a148052d74ac602f5f1d62f86ff2f3c4aa5","revisionTime":"2018-03-27T20:24:08Z"},
|
{"path":"github.com/docker/distribution/digestset","checksumSHA1":"Gj+xR1VgFKKmFXYOJMnAczC3Znk=","revision":"83389a148052d74ac602f5f1d62f86ff2f3c4aa5","revisionTime":"2018-03-27T20:24:08Z"},
|
||||||
{"path":"github.com/docker/distribution/metrics","checksumSHA1":"yqCaL8oUi3OlR/Mr4oHB5HKpstc=","revision":"b12bd4004afc203f1cbd2072317c8fda30b89710","revisionTime":"2018-08-28T23:03:05Z"},
|
{"path":"github.com/docker/distribution/metrics","checksumSHA1":"yqCaL8oUi3OlR/Mr4oHB5HKpstc=","revision":"b12bd4004afc203f1cbd2072317c8fda30b89710","revisionTime":"2018-08-28T23:03:05Z"},
|
||||||
|
@ -116,42 +116,43 @@
|
||||||
{"path":"github.com/docker/distribution/registry/storage/cache/memory","checksumSHA1":"T8G3A63WALmJ3JT/A0r01LG4KI0=","revision":"b12bd4004afc203f1cbd2072317c8fda30b89710","revisionTime":"2018-08-28T23:03:05Z"},
|
{"path":"github.com/docker/distribution/registry/storage/cache/memory","checksumSHA1":"T8G3A63WALmJ3JT/A0r01LG4KI0=","revision":"b12bd4004afc203f1cbd2072317c8fda30b89710","revisionTime":"2018-08-28T23:03:05Z"},
|
||||||
{"path":"github.com/docker/docker-credential-helpers/client","checksumSHA1":"zcDmNPSzI1wVokOiHis5+JSg2Rk=","revision":"73e5f5dbfea31ee3b81111ebbf189785fa69731c","revisionTime":"2018-07-19T07:47:51Z"},
|
{"path":"github.com/docker/docker-credential-helpers/client","checksumSHA1":"zcDmNPSzI1wVokOiHis5+JSg2Rk=","revision":"73e5f5dbfea31ee3b81111ebbf189785fa69731c","revisionTime":"2018-07-19T07:47:51Z"},
|
||||||
{"path":"github.com/docker/docker-credential-helpers/credentials","checksumSHA1":"4u6EMQqD1zIqOHp76zQFLVH5V8U=","revision":"73e5f5dbfea31ee3b81111ebbf189785fa69731c","revisionTime":"2018-07-19T07:47:51Z"},
|
{"path":"github.com/docker/docker-credential-helpers/credentials","checksumSHA1":"4u6EMQqD1zIqOHp76zQFLVH5V8U=","revision":"73e5f5dbfea31ee3b81111ebbf189785fa69731c","revisionTime":"2018-07-19T07:47:51Z"},
|
||||||
{"path":"github.com/docker/docker/api/types","checksumSHA1":"3l48PzQYRyVBD3qRJ0x84/q5C2E=","origin":"github.com/docker/engine/api/types","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types","checksumSHA1":"mFZrlxHiQuq52BGccWNl/bO1GN0=","origin":"github.com/moby/moby/api/types","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/blkiodev","checksumSHA1":"/jF0HVFiLzUUuywSjp4F/piM7BM=","origin":"github.com/docker/engine/api/types/blkiodev","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/blkiodev","checksumSHA1":"/jF0HVFiLzUUuywSjp4F/piM7BM=","origin":"github.com/moby/moby/api/types/blkiodev","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/container","checksumSHA1":"lsxFU6qegOtXClSTthOvfPtly5I=","origin":"github.com/docker/engine/api/types/container","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/container","checksumSHA1":"rBGoI39KB5EQNaYMa3atjIa2LcY=","origin":"github.com/moby/moby/api/types/container","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/filters","checksumSHA1":"y9EA6+kZQLx6kCM277CFHTm4eiw=","origin":"github.com/docker/engine/api/types/filters","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/filters","checksumSHA1":"XcXpxlu8Ewt+vbVjZuMnDXG/Z8M=","origin":"github.com/moby/moby/api/types/filters","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/mount","checksumSHA1":"k9CaJVvYL7SxcIP72ng/YcOuF9k=","origin":"github.com/docker/engine/api/types/mount","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/mount","checksumSHA1":"9OClWW7OCikgz4QCS/sAVcvqcWk=","origin":"github.com/moby/moby/api/types/mount","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/network","checksumSHA1":"qZNE4g8YWfV6ryZp8kN9BwWYCeM=","origin":"github.com/docker/engine/api/types/network","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/network","checksumSHA1":"00k6FhkdRZ+TEiPPsUPAY594bCw=","origin":"github.com/moby/moby/api/types/network","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/registry","checksumSHA1":"m4Jg5WnW75I65nvkEno8PElSXik=","origin":"github.com/docker/engine/api/types/registry","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/registry","checksumSHA1":"m4Jg5WnW75I65nvkEno8PElSXik=","origin":"github.com/moby/moby/api/types/registry","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/strslice","checksumSHA1":"OQEUS/2J2xVHpfvcsxcXzYqBSeY=","origin":"github.com/docker/engine/api/types/strslice","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/strslice","checksumSHA1":"OQEUS/2J2xVHpfvcsxcXzYqBSeY=","origin":"github.com/moby/moby/api/types/strslice","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/swarm","checksumSHA1":"hHIt7htGk3uVtYCQid713a752Ik=","origin":"github.com/docker/engine/api/types/swarm","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/swarm","checksumSHA1":"lyByEOaPKxCLcBvrXmt3VRw1PAI=","origin":"github.com/moby/moby/api/types/swarm","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/swarm/runtime","checksumSHA1":"txs5EKTbKgVyKmKKSnaH3fr+odA=","origin":"github.com/docker/engine/api/types/swarm/runtime","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/swarm/runtime","checksumSHA1":"txs5EKTbKgVyKmKKSnaH3fr+odA=","origin":"github.com/moby/moby/api/types/swarm/runtime","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/api/types/versions","checksumSHA1":"MZsgRjJJ0D/gAsXfKiEys+op6dE=","origin":"github.com/docker/engine/api/types/versions","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/api/types/versions","checksumSHA1":"MZsgRjJJ0D/gAsXfKiEys+op6dE=","origin":"github.com/moby/moby/api/types/versions","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/daemon/caps","checksumSHA1":"rzKSF5uuCNgizeLUf5FkUpIXOVk=","origin":"github.com/docker/engine/daemon/caps","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/errdefs","checksumSHA1":"q4R77xtScr+W3m77Otw6kr34ktg=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/errdefs","checksumSHA1":"q4R77xtScr+W3m77Otw6kr34ktg=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/oci/caps","checksumSHA1":"xUqupdS1MfBMyhwTDQGjxOq/Bug=","origin":"github.com/moby/moby/oci/caps","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/opts","checksumSHA1":"u6EOrZRfhdjr4up14b2JJ7MMMaY=","origin":"github.com/docker/engine/opts","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/opts","checksumSHA1":"dFf9rWD7Ous9YKO0udunqNZEaXw=","origin":"github.com/moby/moby/opts","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/pkg/archive","checksumSHA1":"398RRnhPhHTnar6mONbDm0Qw44U=","origin":"github.com/docker/engine/pkg/archive","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/pkg/archive","checksumSHA1":"R4EHRKN+Xu8AhENs0xak3mZm4vg=","origin":"github.com/moby/moby/pkg/archive","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/pkg/fileutils","checksumSHA1":"eMoRb/diYeuYLojU7ChN5DaETHc=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/fileutils","checksumSHA1":"eMoRb/diYeuYLojU7ChN5DaETHc=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/homedir","checksumSHA1":"y37I+5AS96wQSiAxOayiMgnZawA=","origin":"github.com/docker/engine/pkg/homedir","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/pkg/homedir","checksumSHA1":"mNR92hhd6LtKaSFtglL/rfl9dDo=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/idtools","checksumSHA1":"K9OcyoMKNt/w7u4FzhegR1rjnz8=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/idtools","checksumSHA1":"K9OcyoMKNt/w7u4FzhegR1rjnz8=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/ioutils","checksumSHA1":"vZk7/lVjHDlRDDf5XJbNMock1WI=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/ioutils","checksumSHA1":"vZk7/lVjHDlRDDf5XJbNMock1WI=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/jsonmessage","checksumSHA1":"KQflv+x9hoJywgvxMwWcJqrmdkQ=","origin":"github.com/docker/engine/pkg/jsonmessage","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/pkg/jsonmessage","checksumSHA1":"xX1+9qXSGHg3P/SllPGeAAhlBcE=","origin":"github.com/moby/moby/pkg/jsonmessage","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/pkg/longpath","checksumSHA1":"EXiIm2xIL7Ds+YsQUx8Z3eUYPtI=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/longpath","checksumSHA1":"EXiIm2xIL7Ds+YsQUx8Z3eUYPtI=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/mount","checksumSHA1":"LcjCcFuNe42Dxur5Um1uEp8pq5k=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/mount","checksumSHA1":"LcjCcFuNe42Dxur5Um1uEp8pq5k=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/pools","checksumSHA1":"Yl6cD918tLOXa0I/iuGiovmszQU=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/pools","checksumSHA1":"Yl6cD918tLOXa0I/iuGiovmszQU=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/stdcopy","checksumSHA1":"w0waeTRJ1sFygI0dZXH6l9E1c60=","origin":"github.com/docker/engine/pkg/stdcopy","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/pkg/stdcopy","checksumSHA1":"w0waeTRJ1sFygI0dZXH6l9E1c60=","origin":"github.com/moby/moby/pkg/stdcopy","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/pkg/stringid","checksumSHA1":"THVhMDu12TT7TpGJkazOSxQhmRs=","origin":"github.com/docker/engine/pkg/stringid","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/pkg/stringid","checksumSHA1":"THVhMDu12TT7TpGJkazOSxQhmRs=","origin":"github.com/moby/moby/pkg/stringid","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/pkg/system","checksumSHA1":"oh3sJYwwHBwqdIqhjK2jwxgrD+I=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/system","checksumSHA1":"oh3sJYwwHBwqdIqhjK2jwxgrD+I=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/tarsum","checksumSHA1":"I6mTgOFa7NeZpYw2S5342eenRLY=","origin":"github.com/docker/engine/pkg/tarsum","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/pkg/tarsum","checksumSHA1":"I6mTgOFa7NeZpYw2S5342eenRLY=","origin":"github.com/moby/moby/pkg/tarsum","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/pkg/term","checksumSHA1":"XRmmAW8XT4s/T5aWgyJ/zoZ6UDY=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/term","checksumSHA1":"XRmmAW8XT4s/T5aWgyJ/zoZ6UDY=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/pkg/term/windows","checksumSHA1":"TcF/eOWcG/6Knb6pbJgcwLoIwaw=","revision":"37defbfd9b968f38e8e15dfa5f06d9f878bd65ba","revisionTime":"2020-03-13T22:45:19Z"},
|
{"path":"github.com/docker/docker/pkg/term/windows","checksumSHA1":"TcF/eOWcG/6Knb6pbJgcwLoIwaw=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/registry","checksumSHA1":"8h7Jtudbhfhp/fEhkcAcod54i1E=","origin":"github.com/docker/engine/registry","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/registry","checksumSHA1":"su5cICFANqJpcZnKUtD457fVtqc=","origin":"github.com/moby/moby/registry","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/registry/resumable","checksumSHA1":"jH7uQnDehFQygPP3zLC/mLSqgOk=","origin":"github.com/docker/engine/registry/resumable","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/registry/resumable","checksumSHA1":"jH7uQnDehFQygPP3zLC/mLSqgOk=","origin":"github.com/moby/moby/registry/resumable","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/docker/volume","checksumSHA1":"Bs344j8rU7oCQyIcIhO9FJyk3ts=","origin":"github.com/docker/engine/volume","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/rootless","checksumSHA1":"NgEtGryOwSLJ6QRlMwNcDLp1zIM=","revision":"82e3c0c30336310de2fce7a8949dfb10bc082edd","revisionTime":"2020-03-26T18:48:34Z"},
|
||||||
{"path":"github.com/docker/docker/volume/mounts","checksumSHA1":"wulvV/MmKqzEtgxcN6LO3PM9riQ=","origin":"github.com/docker/engine/volume/mounts","revision":"9552f2b2fddeb0c2537b350f4b159ffe525d7a42","revisionTime":"2019-08-22T18:07:41Z","version":"v18.09.9","versionExact":"v18.09.9"},
|
{"path":"github.com/docker/docker/volume","checksumSHA1":"Bs344j8rU7oCQyIcIhO9FJyk3ts=","origin":"github.com/moby/moby/volume","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
|
{"path":"github.com/docker/docker/volume/mounts","checksumSHA1":"PNKeHho5s98kfdQAd+3+3442luU=","origin":"github.com/moby/moby/volume/mounts","revision":"aa6a9891b09cce3d9004121294301a30d45d998d","revisionTime":"2020-01-17T19:55:42Z","version":"v19.03.8","versionExact":"v19.03.8"},
|
||||||
{"path":"github.com/docker/go-connections/nat","checksumSHA1":"1IPGX6/BnX7QN4DjbBk0UafTB2U=","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"},
|
{"path":"github.com/docker/go-connections/nat","checksumSHA1":"1IPGX6/BnX7QN4DjbBk0UafTB2U=","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"},
|
||||||
{"path":"github.com/docker/go-connections/sockets","checksumSHA1":"jUfDG3VQsA2UZHvvIXncgiddpYA=","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"},
|
{"path":"github.com/docker/go-connections/sockets","checksumSHA1":"jUfDG3VQsA2UZHvvIXncgiddpYA=","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"},
|
||||||
{"path":"github.com/docker/go-connections/tlsconfig","checksumSHA1":"KGILLnJybU/+xWJu8rgM4CpYT2M=","origin":"github.com/docker/docker/vendor/github.com/docker/go-connections/tlsconfig","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"},
|
{"path":"github.com/docker/go-connections/tlsconfig","checksumSHA1":"KGILLnJybU/+xWJu8rgM4CpYT2M=","revision":"7395e3f8aa162843a74ed6d48e79627d9792ac55","revisionTime":"2018-02-28T14:10:15Z","version":"v0.4.0","versionExact":"v0.4.0"},
|
||||||
{"path":"github.com/docker/go-metrics","checksumSHA1":"kHVt4M5Pfby2dhurp+hZJfQhzVU=","revision":"399ea8c73916000c64c2c76e8da00ca82f8387ab","revisionTime":"2018-02-09T01:25:29Z"},
|
{"path":"github.com/docker/go-metrics","checksumSHA1":"kHVt4M5Pfby2dhurp+hZJfQhzVU=","revision":"399ea8c73916000c64c2c76e8da00ca82f8387ab","revisionTime":"2018-02-09T01:25:29Z"},
|
||||||
{"path":"github.com/docker/go-units","checksumSHA1":"18hmvak2Dc9x5cgKeZ2iApviT7w=","comment":"v0.1.0-23-g5d2041e","revision":"5d2041e26a699eaca682e2ea41c8f891e1060444"},
|
{"path":"github.com/docker/go-units","checksumSHA1":"18hmvak2Dc9x5cgKeZ2iApviT7w=","comment":"v0.1.0-23-g5d2041e","revision":"5d2041e26a699eaca682e2ea41c8f891e1060444"},
|
||||||
{"path":"github.com/docker/libnetwork/ipamutils","checksumSHA1":"X07lwsZTwq6wVkKDAPxyTmimwq8=","origin":"github.com/moby/libnetwork/ipamutils","revision":"ef149a924dfde2e506ea3cb3f617d7d0fa96b8ee","revisionTime":"2020-03-18T18:26:00Z"},
|
{"path":"github.com/docker/libnetwork/ipamutils","checksumSHA1":"X07lwsZTwq6wVkKDAPxyTmimwq8=","origin":"github.com/moby/libnetwork/ipamutils","revision":"ef149a924dfde2e506ea3cb3f617d7d0fa96b8ee","revisionTime":"2020-03-18T18:26:00Z"},
|
||||||
|
@ -251,7 +252,7 @@
|
||||||
{"path":"github.com/hashicorp/hcl","checksumSHA1":"vgGv8zuy7q8c5LBAFO1fnnQRRgE=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
{"path":"github.com/hashicorp/hcl","checksumSHA1":"vgGv8zuy7q8c5LBAFO1fnnQRRgE=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
||||||
{"path":"github.com/hashicorp/hcl/hcl/ast","checksumSHA1":"XQmjDva9JCGGkIecOgwtBEMCJhU=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
{"path":"github.com/hashicorp/hcl/hcl/ast","checksumSHA1":"XQmjDva9JCGGkIecOgwtBEMCJhU=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
||||||
{"path":"github.com/hashicorp/hcl/hcl/parser","checksumSHA1":"1GmX7G0Pgf5XprOh+T3zXMXX0dc=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
{"path":"github.com/hashicorp/hcl/hcl/parser","checksumSHA1":"1GmX7G0Pgf5XprOh+T3zXMXX0dc=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
||||||
{"path":"github.com/hashicorp/hcl/hcl/printer","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
{"path":"github.com/hashicorp/hcl/hcl/printer","checksumSHA1":"encY+ZtDf4nJaMvsVL2c+EJ2r3Q=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
||||||
{"path":"github.com/hashicorp/hcl/hcl/scanner","checksumSHA1":"+qJTCxhkwC7r+VZlPlZz8S74KmU=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
{"path":"github.com/hashicorp/hcl/hcl/scanner","checksumSHA1":"+qJTCxhkwC7r+VZlPlZz8S74KmU=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
||||||
{"path":"github.com/hashicorp/hcl/hcl/strconv","checksumSHA1":"oS3SCN9Wd6D8/LG0Yx1fu84a7gI=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
{"path":"github.com/hashicorp/hcl/hcl/strconv","checksumSHA1":"oS3SCN9Wd6D8/LG0Yx1fu84a7gI=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
||||||
{"path":"github.com/hashicorp/hcl/hcl/token","checksumSHA1":"c6yprzj06ASwCo18TtbbNNBHljA=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
{"path":"github.com/hashicorp/hcl/hcl/token","checksumSHA1":"c6yprzj06ASwCo18TtbbNNBHljA=","revision":"914dc3f8dd7c463188c73fc47e9ced82a6e421ca","revisionTime":"2019-10-16T23:15:34Z"},
|
||||||
|
@ -302,6 +303,7 @@
|
||||||
{"path":"github.com/mitchellh/hashstructure","checksumSHA1":"Z3FoiV93oUfDoQYMMiHxWCQPlBw=","revision":"1ef5c71b025aef149d12346356ac5973992860bc"},
|
{"path":"github.com/mitchellh/hashstructure","checksumSHA1":"Z3FoiV93oUfDoQYMMiHxWCQPlBw=","revision":"1ef5c71b025aef149d12346356ac5973992860bc"},
|
||||||
{"path":"github.com/mitchellh/mapstructure","checksumSHA1":"4Js6Jlu93Wa0o6Kjt393L9Z7diE=","revision":"281073eb9eb092240d33ef253c404f1cca550309"},
|
{"path":"github.com/mitchellh/mapstructure","checksumSHA1":"4Js6Jlu93Wa0o6Kjt393L9Z7diE=","revision":"281073eb9eb092240d33ef253c404f1cca550309"},
|
||||||
{"path":"github.com/mitchellh/reflectwalk","checksumSHA1":"KqsMqI+Y+3EFYPhyzafpIneaVCM=","revision":"8d802ff4ae93611b807597f639c19f76074df5c6","revisionTime":"2017-05-08T17:38:06Z"},
|
{"path":"github.com/mitchellh/reflectwalk","checksumSHA1":"KqsMqI+Y+3EFYPhyzafpIneaVCM=","revision":"8d802ff4ae93611b807597f639c19f76074df5c6","revisionTime":"2017-05-08T17:38:06Z"},
|
||||||
|
{"path":"github.com/morikuni/aec","checksumSHA1":"OfTtsGbmK3BBWGvGfXJ1BV3fzmY=","revision":"39771216ff4c63d11f5e604076f9c45e8be1067b","revisionTime":"2017-01-13T03:34:06Z"},
|
||||||
{"path":"github.com/mrunalp/fileutils","checksumSHA1":"WmO6bE5oONg3/8HazAAIshRZAtU=","revision":"7d4729fb36185a7c1719923406c9d40e54fb93c7","revisionTime":"2017-11-03T03:01:05Z"},
|
{"path":"github.com/mrunalp/fileutils","checksumSHA1":"WmO6bE5oONg3/8HazAAIshRZAtU=","revision":"7d4729fb36185a7c1719923406c9d40e54fb93c7","revisionTime":"2017-11-03T03:01:05Z"},
|
||||||
{"path":"github.com/oklog/run","checksumSHA1":"nf3UoPNBIut7BL9nWE8Fw2X2j+Q=","revision":"6934b124db28979da51d3470dadfa34d73d72652","revisionTime":"2018-03-08T00:51:04Z"},
|
{"path":"github.com/oklog/run","checksumSHA1":"nf3UoPNBIut7BL9nWE8Fw2X2j+Q=","revision":"6934b124db28979da51d3470dadfa34d73d72652","revisionTime":"2018-03-08T00:51:04Z"},
|
||||||
{"path":"github.com/onsi/ginkgo","checksumSHA1":"cwbidLG1ET7YSqlwca+nSfYxIbg=","revision":"ba8e856bb854d6771a72ddf6497a42dad3a0c971","revisionTime":"2018-03-12T10:34:14Z"},
|
{"path":"github.com/onsi/ginkgo","checksumSHA1":"cwbidLG1ET7YSqlwca+nSfYxIbg=","revision":"ba8e856bb854d6771a72ddf6497a42dad3a0c971","revisionTime":"2018-03-12T10:34:14Z"},
|
||||||
|
|
Loading…
Reference in a new issue