Merge pull request #2258 from hashicorp/f-vendor-updates

Updates vendored dependencies.
This commit is contained in:
James Phillips 2016-08-09 17:50:07 -07:00 committed by GitHub
commit caab3e739c
471 changed files with 11403 additions and 104807 deletions

View File

@ -167,15 +167,15 @@ func (ct *CompiledTemplate) Render(name string) (*structs.PreparedQuery, error)
return nil
}
hv, ht, err := hil.Eval(tree, config)
res, err := hil.Eval(tree, config)
if err != nil {
return fmt.Errorf("Bad evaluation for '%s' in Service%s: %s", v.String(), path, err)
}
if ht != ast.TypeString {
return fmt.Errorf("Expected Service%s field to be a string, got %s", path, ht)
if res.Type != hil.TypeString {
return fmt.Errorf("Expected Service%s field to be a string, got %s", path, res.Type)
}
v.SetString(hv.(string))
v.SetString(res.Value.(string))
return nil
}
if err := walk(&query.Service, eval); err != nil {

19
vendor/github.com/DataDog/datadog-go/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2015 Datadog, Inc
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -25,6 +25,7 @@ package statsd
import (
"bytes"
"errors"
"fmt"
"math/rand"
"net"
@ -34,6 +35,25 @@ import (
"time"
)
/*
OptimalPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes
is optimal for regular networks with an MTU of 1500 so datagrams don't get
fragmented. It's generally recommended not to fragment UDP datagrams as losing
a single fragment will cause the entire datagram to be lost.
This can be increased if your network has a greater MTU or you don't mind UDP
datagrams getting fragmented. The practical limit is MaxUDPPayloadSize
*/
const OptimalPayloadSize = 1432
/*
MaxUDPPayloadSize defines the maximum payload size for a UDP datagram.
Its value comes from the calculation: 65535 bytes Max UDP datagram size -
8byte UDP header - 60byte max IP headers
any number greater than that will see frames being cut out.
*/
const MaxUDPPayloadSize = 65467
// A Client is a handle for sending udp messages to dogstatsd. It is safe to
// use one Client from multiple goroutines simultaneously.
type Client struct {
@ -46,6 +66,7 @@ type Client struct {
bufferLength int
flushTime time.Duration
commands []string
buffer bytes.Buffer
stop bool
sync.Mutex
}
@ -120,36 +141,100 @@ func (c *Client) watch() {
}
func (c *Client) append(cmd string) error {
c.Lock()
c.commands = append(c.commands, cmd)
// if we should flush, lets do it
if len(c.commands) == c.bufferLength {
if err := c.flush(); err != nil {
c.Unlock()
return err
}
}
c.Unlock()
return nil
}
func (c *Client) joinMaxSize(cmds []string, sep string, maxSize int) ([][]byte, []int) {
c.buffer.Reset() //clear buffer
var frames [][]byte
var ncmds []int
sepBytes := []byte(sep)
sepLen := len(sep)
elem := 0
for _, cmd := range cmds {
needed := len(cmd)
if elem != 0 {
needed = needed + sepLen
}
if c.buffer.Len()+needed <= maxSize {
if elem != 0 {
c.buffer.Write(sepBytes)
}
c.buffer.WriteString(cmd)
elem++
} else {
frames = append(frames, copyAndResetBuffer(&c.buffer))
ncmds = append(ncmds, elem)
// if cmd is bigger than maxSize it will get flushed on next loop
c.buffer.WriteString(cmd)
elem = 1
}
}
//add whatever is left! if there's actually something
if c.buffer.Len() > 0 {
frames = append(frames, copyAndResetBuffer(&c.buffer))
ncmds = append(ncmds, elem)
}
return frames, ncmds
}
func copyAndResetBuffer(buf *bytes.Buffer) []byte {
tmpBuf := make([]byte, buf.Len())
copy(tmpBuf, buf.Bytes())
buf.Reset()
return tmpBuf
}
// flush the commands in the buffer. Lock must be held by caller.
func (c *Client) flush() error {
data := strings.Join(c.commands, "\n")
_, err := c.conn.Write([]byte(data))
frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize)
var err error
cmdsFlushed := 0
for i, data := range frames {
_, e := c.conn.Write(data)
if e != nil {
err = e
break
}
cmdsFlushed += flushable[i]
}
// clear the slice with a slice op, doesn't realloc
c.commands = c.commands[:0]
if cmdsFlushed == len(c.commands) {
c.commands = c.commands[:0]
} else {
//this case will cause a future realloc...
// drop problematic command though (sorry).
c.commands = c.commands[cmdsFlushed+1:]
}
return err
}
func (c *Client) sendMsg(msg string) error {
// if this client is buffered, then we'll just append this
c.Lock()
defer c.Unlock()
if c.bufferLength > 0 {
// return an error if message is bigger than OptimalPayloadSize
if len(msg) > MaxUDPPayloadSize {
return errors.New("message size exceeds MaxUDPPayloadSize")
}
return c.append(msg)
}
c.Lock()
_, err := c.conn.Write([]byte(msg))
c.Unlock()
return err
}
@ -296,15 +381,17 @@ func (e Event) Encode(tags ...string) (string, error) {
if err != nil {
return "", err
}
text := e.escapedText()
var buffer bytes.Buffer
buffer.WriteString("_e{")
buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10))
buffer.WriteRune(',')
buffer.WriteString(strconv.FormatInt(int64(len(e.Text)), 10))
buffer.WriteString(strconv.FormatInt(int64(len(text)), 10))
buffer.WriteString("}:")
buffer.WriteString(e.Title)
buffer.WriteRune('|')
buffer.WriteString(e.Text)
buffer.WriteString(text)
if !e.Timestamp.IsZero() {
buffer.WriteString("|d:")
@ -351,3 +438,7 @@ func (e Event) Encode(tags ...string) (string, error) {
return buffer.String(), nil
}
func (e Event) escapedText() string {
return strings.Replace(e.Text, "\n", "\\n", -1)
}

View File

@ -1,10 +1,21 @@
# 0.9.0 (Unreleased)
# 0.10.0
* feature: Add a test hook (#180)
* feature: `ParseLevel` is now case-insensitive (#326)
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
* performance: avoid re-allocations on `WithFields` (#335)
# 0.9.0
* logrus/text_formatter: don't emit empty msg
* logrus/hooks/airbrake: move out of main repository
* logrus/hooks/sentry: move out of main repository
* logrus/hooks/papertrail: move out of main repository
* logrus/hooks/bugsnag: move out of main repository
* logrus/core: run tests with `-race`
* logrus/core: detect TTY based on `stderr`
* logrus/core: support `WithError` on logger
* logrus/core: Solaris support
# 0.8.7

View File

@ -1,4 +1,4 @@
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
@ -12,7 +12,7 @@ plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
@ -32,7 +32,7 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
@ -218,9 +218,17 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
#### Level logging
@ -298,14 +306,10 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
```go
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
```
Third party logging formatters:
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
@ -362,4 +366,37 @@ entries. It should not be a feature of the application-level logger.
| ---- | ----------- |
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
#### Testing
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
```go
logger, hook := NewNullLogger()
logger.Error("Hello error")
assert.Equal(1, len(hook.Entries))
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
assert.Equal("Hello error", hook.LastEntry().Message)
hook.Reset()
assert.Nil(hook.LastEntry())
```
#### Fatal handlers
Logrus can register one or more functions that will be called when any `fatal`
level message is logged. The registered handlers will be executed before
logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
```
...
handler := func() {
// gracefully shutdown something...
}
logrus.RegisterExitHandler(handler)
...
```

64
vendor/github.com/Sirupsen/logrus/alt_exit.go generated vendored Normal file
View File

@ -0,0 +1,64 @@
package logrus
// The following code was sourced and modified from the
// https://bitbucket.org/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import (
"fmt"
"os"
)
var handlers = []func(){}
func runHandler(handler func()) {
defer func() {
if err := recover(); err != nil {
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
}
}()
handler()
}
func runHandlers() {
for _, handler := range handlers {
runHandler(handler)
}
}
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
func Exit(code int) {
runHandlers()
os.Exit(code)
}
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
// all handlers. The handlers will also be invoked when any Fatal log entry is
// made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
// closing database connections, or sending a alert that the application is
// closing.
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}

View File

@ -68,7 +68,7 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry {
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := Fields{}
data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
@ -150,7 +150,7 @@ func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
os.Exit(1)
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
@ -198,7 +198,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
os.Exit(1)
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
@ -245,7 +245,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
os.Exit(1)
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {

View File

@ -31,18 +31,15 @@ type Formatter interface {
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
_, ok := data["time"]
if ok {
data["fields.time"] = data["time"]
if t, ok := data["time"]; ok {
data["fields.time"] = t
}
_, ok = data["msg"]
if ok {
data["fields.msg"] = data["msg"]
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
}
_, ok = data["level"]
if ok {
data["fields.level"] = data["level"]
if l, ok := data["level"]; ok {
data["fields.level"] = l
}
}

View File

@ -51,7 +51,7 @@ func New() *Logger {
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
@ -108,7 +108,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalf(format, args...)
}
os.Exit(1)
Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
@ -155,7 +155,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatal(args...)
}
os.Exit(1)
Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
@ -202,7 +202,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
NewEntry(logger).Fatalln(args...)
}
os.Exit(1)
Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {

View File

@ -3,6 +3,7 @@ package logrus
import (
"fmt"
"log"
"strings"
)
// Fields type, used to pass to `WithFields`.
@ -33,7 +34,7 @@ func (level Level) String() string {
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch lvl {
switch strings.ToLower(lvl) {
case "panic":
return PanicLevel, nil
case "fatal":
@ -52,6 +53,16 @@ func ParseLevel(lvl string) (Level, error) {
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
FatalLevel,
ErrorLevel,
WarnLevel,
InfoLevel,
DebugLevel,
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
@ -96,3 +107,37 @@ type StdLogger interface {
Panicf(string, ...interface{})
Panicln(...interface{})
}
// The FieldLogger interface generalizes the Entry and Logger types
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
WithError(err error) *Entry
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Printf(format string, args ...interface{})
Warnf(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Print(args ...interface{})
Warn(args ...interface{})
Warning(args ...interface{})
Error(args ...interface{})
Fatal(args ...interface{})
Panic(args ...interface{})
Debugln(args ...interface{})
Infoln(args ...interface{})
Println(args ...interface{})
Warnln(args ...interface{})
Warningln(args ...interface{})
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
}

View File

@ -5,7 +5,7 @@ package logrus
import (
"os"
"github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.

View File

@ -128,10 +128,10 @@ func needsQuoting(text string) bool {
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return false
return true
}
}
return true
return false
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
@ -141,14 +141,14 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf
switch value := value.(type) {
case string:
if needsQuoting(value) {
if !needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
}
case error:
errmsg := value.Error()
if needsQuoting(errmsg) {
if !needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", value)

53
vendor/github.com/Sirupsen/logrus/writer.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = logger.Debug
case InfoLevel:
printFunc = logger.Info
case WarnLevel:
printFunc = logger.Warn
case ErrorLevel:
printFunc = logger.Error
case FatalLevel:
printFunc = logger.Fatal
case PanicLevel:
printFunc = logger.Panic
default:
printFunc = logger.Print
}
go logger.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

0
vendor/github.com/armon/circbuf/LICENSE generated vendored Normal file → Executable file
View File

View File

@ -1,24 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
/metrics.out

View File

@ -1,22 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

View File

@ -1,3 +0,0 @@
language: go
go:
- tip

View File

@ -1,2 +0,0 @@
example/example
example/example.exe

View File

@ -16,14 +16,14 @@ func Ask(prompt string) (password string, err error) {
// Same as the Ask function, except it is possible to specify the file to write
// the prompt to.
func FAsk(file *os.File, prompt string) (password string, err error) {
func FAsk(wr io.Writer, prompt string) (password string, err error) {
if prompt != "" {
fmt.Fprint(file, prompt) // Display the prompt.
fmt.Fprint(wr, prompt) // Display the prompt.
}
password, err = getPassword()
// Carriage return after the user input.
fmt.Fprintln(file, "")
fmt.Fprintln(wr, "")
return
}

View File

@ -3,7 +3,6 @@
package speakeasy
import (
"os"
"syscall"
)
@ -12,18 +11,17 @@ import (
const ENABLE_ECHO_INPUT = 0x0004
func getPassword() (password string, err error) {
hStdin := syscall.Handle(os.Stdin.Fd())
var oldMode uint32
err = syscall.GetConsoleMode(hStdin, &oldMode)
err = syscall.GetConsoleMode(syscall.Stdin, &oldMode)
if err != nil {
return
}
var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT)
err = setConsoleMode(hStdin, newMode)
defer setConsoleMode(hStdin, oldMode)
err = setConsoleMode(syscall.Stdin, newMode)
defer setConsoleMode(syscall.Stdin, oldMode)
if err != nil {
return
}

View File

@ -1,4 +0,0 @@
*.prof
*.test
*.swp
/bin/

View File

@ -1,2 +0,0 @@
.DS_Store
env.sh

View File

@ -1,69 +0,0 @@
# Setting up dev/test environment
Get go installed and environment configured
```sh
cd $GOPATH
mkdir -pv src/github.com/{hashicorp,armon,circonus-labs}
cd $GOPATH/src/github.com/hashicorp
git clone https://github.com/maier/consul.git
cd $GOPATH/src/github.com/armon
git clone https://github.com/maier/go-metrics.git
cd $GOPATH/src/github.com/circonus-labs
git clone https://github.com/maier/circonus-gometrics.git
cd $GOPATH/src/github.com/hashicorp/consul
make dev
```
In `$GOPATH/src/github.com/hashicorp/consul/bin` is the binary just created.
Create a consul configuration file somewhere (e.g. ~/testconfig.json) and add any applicable configuration settings. As an example:
```json
{
"datacenter": "mcfl",
"server": true,
"log_level": "debug",
"telemetry": {
"statsd_address": "127.0.0.1:8125",
"circonus_api_token": "...",
"circonus_api_host": "..."
}
}
```
StatsD was used as a check to see what metrics consul was sending and what metrics circonus was receiving. So, it can safely be elided.
Fill in appropriate cirocnus specific settings:
* circonus_api_token - required
* circonus_api_app - optional, default is circonus-gometrics
* circonus_api_host - optional, default is api.circonus.com (for dev stuff yon can use "http://..." to circumvent ssl)
* circonus_submission_url - optional
* circonus_submission_interval - optional, seconds, defaults to 10 seconds
* circonus_check_id - optional
* circonus_broker_id - optional (unless you want to use the public one, then add it)
The actual circonus-gometrics package has more configuraiton options, the above are exposed in the consul configuration.
CirconusMetrics.InstanceId is derived from consul's config.NodeName and config.Datacenter
CirconusMetrics.SearchTag is hardcoded as 'service:consul'
The defaults are taken for other options.
---
To run after creating the config:
`$GOPATH/src/github.com/hashicorp/consul/bin/consul agent -dev -config-file <config file>`
or, to add the ui (localhost:8500)
`$GOPATH/src/github.com/hashicorp/consul/bin/consul agent -dev -ui -config-file <config file>`

View File

@ -142,8 +142,6 @@ func main() {
}
```
# untested
### HTTP Handler wrapping
```
@ -156,14 +154,20 @@ http.HandleFunc("/", metrics.TrackHTTPLatency("/", handler_func))
package main
import (
"os"
"fmt"
"net/http"
metrics "github.com/circonus-labs/circonus-gometrics"
cgm "github.com/circonus-labs/circonus-gometrics"
)
func main() {
metrics.WithAuthToken("9fdd5432-5308-4691-acd1-6bf1f7a20f73")
metrics.WithCheckId(115010)
cmc := &cgm.Config{}
cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN")
metrics, err := cgm.NewCirconusMetrics(cmc)
if err != nil {
panic(err)
}
metrics.Start()
http.HandleFunc("/", metrics.TrackHTTPLatency("/", func(w http.ResponseWriter, r *http.Request) {

View File

@ -68,7 +68,7 @@ type CirconusMetrics struct {
counterFuncs map[string]func() uint64
cfm sync.Mutex
gauges map[string]int64
gauges map[string]string
gm sync.Mutex
gaugeFuncs map[string]func() int64
@ -94,7 +94,7 @@ func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) {
cm := &CirconusMetrics{
counters: make(map[string]uint64),
counterFuncs: make(map[string]func() uint64),
gauges: make(map[string]int64),
gauges: make(map[string]string),
gaugeFuncs: make(map[string]func() int64),
histograms: make(map[string]*Histogram),
text: make(map[string]string),

View File

@ -15,6 +15,13 @@ func (m *CirconusMetrics) IncrementByValue(metric string, val uint64) {
m.Add(metric, val)
}
// Set a counter to specific value
func (m *CirconusMetrics) Set(metric string, val uint64) {
m.cm.Lock()
defer m.cm.Unlock()
m.counters[metric] = val
}
// Add updates counter by supplied value
func (m *CirconusMetrics) Add(metric string, val uint64) {
m.cm.Lock()

View File

@ -5,16 +5,20 @@ package circonusgometrics
// Use a gauge to track metrics which increase and decrease (e.g., amount of
// free memory).
import (
"fmt"
)
// Gauge sets a gauge to a value
func (m *CirconusMetrics) Gauge(metric string, val int64) {
func (m *CirconusMetrics) Gauge(metric string, val interface{}) {
m.SetGauge(metric, val)
}
// SetGauge sets a gauge to a value
func (m *CirconusMetrics) SetGauge(metric string, val int64) {
func (m *CirconusMetrics) SetGauge(metric string, val interface{}) {
m.gm.Lock()
defer m.gm.Unlock()
m.gauges[metric] = val
m.gauges[metric] = m.gaugeValString(val)
}
// RemoveGauge removes a gauge
@ -37,3 +41,37 @@ func (m *CirconusMetrics) RemoveGaugeFunc(metric string) {
defer m.gfm.Unlock()
delete(m.gaugeFuncs, metric)
}
// gaugeValString converts an interface value (of a supported type) to a string
func (m *CirconusMetrics) gaugeValString(val interface{}) string {
vs := ""
switch v := val.(type) {
default:
// ignore it, unsupported type
case int:
vs = fmt.Sprintf("%d", v)
case int8:
vs = fmt.Sprintf("%d", v)
case int16:
vs = fmt.Sprintf("%d", v)
case int32:
vs = fmt.Sprintf("%d", v)
case int64:
vs = fmt.Sprintf("%d", v)
case uint:
vs = fmt.Sprintf("%d", v)
case uint8:
vs = fmt.Sprintf("%d", v)
case uint16:
vs = fmt.Sprintf("%d", v)
case uint32:
vs = fmt.Sprintf("%d", v)
case uint64:
vs = fmt.Sprintf("%d", v)
case float32:
vs = fmt.Sprintf("%f", v)
case float64:
vs = fmt.Sprintf("%f", v)
}
return vs
}

View File

@ -29,7 +29,7 @@ func (m *CirconusMetrics) Reset() {
m.counters = make(map[string]uint64)
m.counterFuncs = make(map[string]func() uint64)
m.gauges = make(map[string]int64)
m.gauges = make(map[string]string)
m.gaugeFuncs = make(map[string]func() int64)
m.histograms = make(map[string]*Histogram)
m.text = make(map[string]string)
@ -37,7 +37,7 @@ func (m *CirconusMetrics) Reset() {
}
// snapshot returns a copy of the values of all registered counters and gauges.
func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]int64, h map[string]*circonusllhist.Histogram, t map[string]string) {
func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) {
m.cm.Lock()
defer m.cm.Unlock()
@ -68,13 +68,14 @@ func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]int64, h
c[n] = f()
}
g = make(map[string]int64, len(m.gauges)+len(m.gaugeFuncs))
//g = make(map[string]int64, len(m.gauges)+len(m.gaugeFuncs))
g = make(map[string]string, len(m.gauges)+len(m.gaugeFuncs))
for n, v := range m.gauges {
g[n] = v
}
for n, f := range m.gaugeFuncs {
g[n] = f()
g[n] = m.gaugeValString(f())
}
h = make(map[string]*circonusllhist.Histogram, len(m.histograms))

191
vendor/github.com/docker/docker/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/docker/docker/NOTICE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Docker
Copyright 2012-2016 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

View File

@ -4,16 +4,12 @@ import (
"fmt"
"net"
"net/url"
"runtime"
"strconv"
"strings"
)
var (
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
// is not supplied. A better longer term solution would be to use a named
// pipe as the default on the Windows daemon.
// These are the IANA registered port numbers for use with Docker
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DefaultHTTPPort = 2375 // Default HTTP Port
@ -26,13 +22,19 @@ var (
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
// DefaultNamedPipe defines the default named pipe used by docker on Windows
DefaultNamedPipe = `//./pipe/docker_engine`
)
// ValidateHost validates that the specified string is a valid host and returns it.
func ValidateHost(val string) (string, error) {
_, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val)
if err != nil {
return val, err
host := strings.TrimSpace(val)
// The empty string means default and is not handled by parseDockerDaemonHost
if host != "" {
_, err := parseDockerDaemonHost(host)
if err != nil {
return val, err
}
}
// Note: unlike most flag validators, we don't return the mutated value here
// we need to know what the user entered later (using ParseHost) to adjust for tls
@ -40,39 +42,39 @@ func ValidateHost(val string) (string, error) {
}
// ParseHost and set defaults for a Daemon host string
func ParseHost(defaultHost, val string) (string, error) {
host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val)
if err != nil {
return val, err
func ParseHost(defaultToTLS bool, val string) (string, error) {
host := strings.TrimSpace(val)
if host == "" {
if defaultToTLS {
host = DefaultTLSHost
} else {
host = DefaultHost
}
} else {
var err error
host, err = parseDockerDaemonHost(host)
if err != nil {
return val, err
}
}
return host, nil
}
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
// defaultUnixAddr must be a absolute file path (no `unix://` prefix)
// defaultTCPAddr must be the full `tcp://host:port` form
func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) {
addr = strings.TrimSpace(addr)
if addr == "" {
if defaultAddr == defaultTLSHost {
return defaultTLSHost, nil
}
if runtime.GOOS != "windows" {
return fmt.Sprintf("unix://%s", defaultUnixAddr), nil
}
return defaultTCPAddr, nil
}
addrParts := strings.Split(addr, "://")
if len(addrParts) == 1 {
// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
func parseDockerDaemonHost(addr string) (string, error) {
addrParts := strings.SplitN(addr, "://", 2)
if len(addrParts) == 1 && addrParts[0] != "" {
addrParts = []string{"tcp", addrParts[0]}
}
switch addrParts[0] {
case "tcp":
return parseTCPAddr(addrParts[1], defaultTCPAddr)
return ParseTCPAddr(addrParts[1], DefaultTCPHost)
case "unix":
return parseUnixAddr(addrParts[1], defaultUnixAddr)
return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
case "npipe":
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
case "fd":
return addr, nil
default:
@ -80,27 +82,27 @@ func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defa
}
}
// parseUnixAddr parses and validates that the specified address is a valid UNIX
// socket address. It returns a formatted UNIX socket address, either using the
// address parsed from addr, or the contents of defaultAddr if addr is a blank
// string.
func parseUnixAddr(addr string, defaultAddr string) (string, error) {
addr = strings.TrimPrefix(addr, "unix://")
// parseSimpleProtoAddr parses and validates that the specified address is a valid
// socket address for simple protocols like unix and npipe. It returns a formatted
// socket address, either using the address parsed from addr, or the contents of
// defaultAddr if addr is a blank string.
func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
addr = strings.TrimPrefix(addr, proto+"://")
if strings.Contains(addr, "://") {
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
}
if addr == "" {
addr = defaultAddr
}
return fmt.Sprintf("unix://%s", addr), nil
return fmt.Sprintf("%s://%s", proto, addr), nil
}
// parseTCPAddr parses and validates that the specified address is a valid TCP
// ParseTCPAddr parses and validates that the specified address is a valid TCP
// address. It returns a formatted TCP address, either using the address parsed
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
// tryAddr is expected to have already been Trim()'d
// defaultAddr must be in the full `tcp://host:port` form
func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
if tryAddr == "" || tryAddr == "tcp://" {
return defaultAddr, nil
}
@ -125,8 +127,11 @@ func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
if err != nil {
return "", err
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
// try port addition once
host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
}
if err != nil {
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
}

View File

@ -3,4 +3,4 @@
package opts
// DefaultHost constant defines the default host string used by docker on Windows
var DefaultHost = DefaultTCPHost
var DefaultHost = "npipe://" + DefaultNamedPipe

View File

@ -3,9 +3,10 @@ package opts
import (
"fmt"
"net"
"os"
"regexp"
"strings"
"github.com/docker/engine-api/types/filters"
)
var (
@ -37,7 +38,7 @@ func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values)))
}
// Set validates if needed the input value and add it to the
// Set validates if needed the input value and adds it to the
// internal slice.
func (opts *ListOpts) Set(value string) error {
if opts.validator != nil {
@ -101,6 +102,40 @@ func (opts *ListOpts) Len() int {
return len((*opts.values))
}
// Type returns a string name for this Option type
func (opts *ListOpts) Type() string {
return "list"
}
// NamedOption is an interface that list and map options
// with names implement.
type NamedOption interface {
Name() string
}
// NamedListOpts is a ListOpts with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedListOpts struct {
name string
ListOpts
}
var _ NamedOption = &NamedListOpts{}
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
return &NamedListOpts{
name: name,
ListOpts: *NewListOptsRef(values, validator),
}
}
// Name returns the name of the NamedListOpts in the configuration.
func (o *NamedListOpts) Name() string {
return o.name
}
//MapOpts holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
@ -135,6 +170,11 @@ func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
// Type returns a string name for this Option type
func (opts *MapOpts) Type() string {
return "map"
}
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
@ -146,40 +186,35 @@ func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
}
}
// NamedMapOpts is a MapOpts struct with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedMapOpts struct {
name string
MapOpts
}
var _ NamedOption = &NamedMapOpts{}
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
return &NamedMapOpts{
name: name,
MapOpts: *NewMapOpts(values, validator),
}
}
// Name returns the name of the NamedMapOpts in the configuration.
func (o *NamedMapOpts) Name() string {
return o.name
}
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateAttach validates that the specified string is a valid attach option.
func ValidateAttach(val string) (string, error) {
s := strings.ToLower(val)
for _, str := range []string{"stdin", "stdout", "stderr"} {
if s == str {
return s, nil
}
}
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
}
// ValidateEnv validates an environment variable and returns it.
// If no value is specified, it returns the current value using os.Getenv.
//
// As on ParseEnvFile and related to #16585, environment variable names
// are not validate what so ever, it's up to application inside docker
// to validate them or not.
func ValidateEnv(val string) (string, error) {
arr := strings.Split(val, "=")
if len(arr) > 1 {
return val, nil
}
if !doesEnvExist(val) {
return val, nil
}
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
}
// ValidateIPAddress validates an Ip address.
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
@ -189,15 +224,6 @@ func ValidateIPAddress(val string) (string, error) {
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress validates a MAC address.
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
}
return val, nil
}
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
@ -218,20 +244,6 @@ func validateDomain(val string) (string, error) {
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
func ValidateExtraHost(val string) (string, error) {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
if len(arr) != 2 || len(arr[0]) == 0 {
return "", fmt.Errorf("bad format for add-host: %q", val)
}
if _, err := ValidateIPAddress(arr[1]); err != nil {
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
return val, nil
}
// ValidateLabel validates that the specified string is a valid label, and returns it.
// Labels are in the form on key=value.
func ValidateLabel(val string) (string, error) {
@ -241,12 +253,69 @@ func ValidateLabel(val string) (string, error) {
return val, nil
}
func doesEnvExist(name string) bool {
for _, entry := range os.Environ() {
parts := strings.SplitN(entry, "=", 2)
if parts[0] == name {
return true
// ValidateSysctl validates a sysctl and returns it.
func ValidateSysctl(val string) (string, error) {
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
arr := strings.Split(val, "=")
if len(arr) < 2 {
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
if validSysctlMap[arr[0]] {
return val, nil
}
for _, vp := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], vp) {
return val, nil
}
}
return false
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
// FilterOpt is a flag type for validating filters
type FilterOpt struct {
filter filters.Args
}
// NewFilterOpt returns a new FilterOpt
func NewFilterOpt() FilterOpt {
return FilterOpt{filter: filters.NewArgs()}
}
func (o *FilterOpt) String() string {
repr, err := filters.ToParam(o.filter)
if err != nil {
return "invalid filters"
}
return repr
}
// Set sets the value of the opt by parsing the command line value
func (o *FilterOpt) Set(value string) error {
var err error
o.filter, err = filters.ParseFlag(value, o.filter)
return err
}
// Type returns the option type
func (o *FilterOpt) Type() string {
return "filter"
}
// Value returns the value of this option
func (o *FilterOpt) Value() filters.Args {
return o.filter
}

View File

@ -1,10 +1,10 @@
package opts
// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4.
// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
// @jhowardmsft, @swernli.
//
// On Windows, this mitigates a problem with the default options of running
// a docker client against a local docker daemon on TP4.
// a docker client against a local docker daemon on TP5.
//
// What was found that if the default host is "localhost", even if the client
// (and daemon as this is local) is not physically on a network, and the DNS
@ -35,7 +35,7 @@ package opts
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
//
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory,
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
// the Windows networking stack is supposed to resolve "localhost" internally,
// without hitting DNS, or even reading the hosts file (which is why localhost
// is commented out in the hosts file on Windows).
@ -44,12 +44,12 @@ package opts
// address does not cause the delay.
//
// This does not occur with the docker client built with 1.4.3 on the same
// Windows TP4 build, regardless of whether the daemon is built using 1.5.1
// Windows build, regardless of whether the daemon is built using 1.5.1
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
// on a cross-compiled Windows binary (from Linux).
//
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...'
// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
// explicitly.
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080

View File

@ -17,13 +17,13 @@ import (
"strings"
"syscall"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/system"
)
type (
@ -33,6 +33,8 @@ type (
Reader io.Reader
// Compression is the state represents if compressed or not.
Compression int
// WhiteoutFormat is the format of whiteouts unpacked
WhiteoutFormat int
// TarChownOptions wraps the chown options UID and GID.
TarChownOptions struct {
UID, GID int
@ -47,6 +49,10 @@ type (
GIDMaps []idtools.IDMap
ChownOpts *TarChownOptions
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
@ -93,6 +99,14 @@ const (
Xz
)
const (
// AUFSWhiteoutFormat is the default format for whiteouts
AUFSWhiteoutFormat WhiteoutFormat = iota
// OverlayWhiteoutFormat formats whiteout according to the overlay
// standard.
OverlayWhiteoutFormat
)
// IsArchive checks for the magic bytes of a tar or any supported compression
// algorithm.
func IsArchive(header []byte) bool {
@ -130,7 +144,7 @@ func DetectCompression(source []byte) Compression {
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
if len(source) < len(m) {
logrus.Debugf("Len too short")
logrus.Debug("Len too short")
continue
}
if bytes.Compare(m, source[:len(m)]) == 0 {
@ -146,7 +160,7 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
return cmdStream(exec.Command(args[0], args[1:]...), archive)
}
// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
@ -192,8 +206,8 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
}
}
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
// CompressStream compresseses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
switch compression {
@ -228,6 +242,11 @@ func (compression *Compression) Extension() string {
return ""
}
type tarWhiteoutConverter interface {
ConvertWrite(*tar.Header, string, os.FileInfo) error
ConvertRead(*tar.Header, string) (bool, error)
}
type tarAppender struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
@ -236,6 +255,12 @@ type tarAppender struct {
SeenFiles map[uint64]string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
// For packing and unpacking whiteout files in the
// non standard format. The whiteout files defined
// by the AUFS standard are used as the tar whiteout
// standard.
WhiteoutConverter tarWhiteoutConverter
}
// canonicalTarName provides a platform-independent and consistent posix-style
@ -253,6 +278,7 @@ func canonicalTarName(name string, isDir bool) (string, error) {
return name, nil
}
// addTarFile adds to the tar archive a file from `path` as `name`
func (ta *tarAppender) addTarFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
@ -323,11 +349,17 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Gid = xGID
}
if ta.WhiteoutConverter != nil {
if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil {
return err
}
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg {
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
file, err := os.Open(path)
if err != nil {
return err
@ -408,7 +440,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
case tar.TypeXGlobalHeader:
logrus.Debugf("PAX Global Extended Headers found and ignored")
logrus.Debug("PAX Global Extended Headers found and ignored")
return nil
default:
@ -425,10 +457,26 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
}
var errors []string
for key, value := range hdr.Xattrs {
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
if err == syscall.ENOTSUP {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
// ENOTSUP should be emitted in that case, otherwise we still
// bail.
errors = append(errors, err.Error())
continue
}
return err
}
}
if len(errors) > 0 {
logrus.WithFields(logrus.Fields{
"errors": errors,
}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
}
// There is no LChmod, so ignore mode for symlink. Also, this
@ -492,23 +540,24 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: options.UIDMaps,
GIDMaps: options.GIDMaps,
TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: options.UIDMaps,
GIDMaps: options.GIDMaps,
WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
}
defer func() {
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close tar writer: %s", err)
logrus.Errorf("Can't close tar writer: %s", err)
}
if err := compressWriter.Close(); err != nil {
logrus.Debugf("Can't close compress writer: %s", err)
logrus.Errorf("Can't close compress writer: %s", err)
}
if err := pipeWriter.Close(); err != nil {
logrus.Debugf("Can't close pipe writer: %s", err)
logrus.Errorf("Can't close pipe writer: %s", err)
}
}()
@ -551,7 +600,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
walkRoot := getWalkRoot(srcPath, include)
filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err)
return nil
}
@ -576,16 +625,42 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if include != relFilePath {
skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs)
if err != nil {
logrus.Debugf("Error matching %s: %v", relFilePath, err)
logrus.Errorf("Error matching %s: %v", relFilePath, err)
return err
}
}
if skip {
if !exceptions && f.IsDir() {
// If we want to skip this file and its a directory
// then we should first check to see if there's an
// excludes pattern (eg !dir/file) that starts with this
// dir. If so then we can't skip this dir.
// Its not a dir then so we can just return/skip.
if !f.IsDir() {
return nil
}
// No exceptions (!...) in patterns so just skip dir
if !exceptions {
return filepath.SkipDir
}
return nil
dirSlash := relFilePath + string(filepath.Separator)
for _, pat := range patterns {
if pat[0] != '!' {
continue
}
pat = pat[1:] + string(filepath.Separator)
if strings.HasPrefix(pat, dirSlash) {
// found a match - so can't skip this dir
return nil
}
}
// No matching exclusion dir so just skip dir
return filepath.SkipDir
}
if seen[relFilePath] {
@ -607,7 +682,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
}
if err := ta.addTarFile(filePath, relFilePath); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", filePath, err)
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
if err == io.ErrClosedPipe {
return err
}
}
return nil
})
@ -628,6 +707,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
if err != nil {
return err
}
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
// Iterate through the files in the archive.
loop:
@ -660,7 +740,7 @@ loop:
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0777)
err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
if err != nil {
return err
}
@ -727,6 +807,16 @@ loop:
hdr.Gid = xGID
}
if whiteoutConverter != nil {
writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
if err != nil {
return err
}
if !writeFile {
continue
}
}
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
return err
}
@ -851,9 +941,17 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
if !srcSt.IsDir() {
return archiver.CopyFileWithTar(src, dst)
}
// if this archiver is set up with ID mapping we need to create
// the new destination directory with the remapped root UID/GID pair
// as owner
rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
if err != nil {
return err
}
// Create dst, copy src's content into it
logrus.Debugf("Creating dest directory: %s", dst)
if err := system.MkdirAll(dst, 0755); err != nil {
if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil {
return err
}
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)

View File

@ -0,0 +1,91 @@
package archive
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/docker/docker/pkg/system"
)
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
return overlayWhiteoutConverter{}
}
return nil
}
type overlayWhiteoutConverter struct{}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return err
}
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
*hdr = tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return nil
}
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
return false, err
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@ -0,0 +1,7 @@
// +build !linux
package archive
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
return nil
}

View File

@ -9,7 +9,7 @@ import (
"path/filepath"
"syscall"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/system"
)
// fixVolumePathPrefix does platform specific processing to ensure that if

View File

@ -9,7 +9,7 @@ import (
"path/filepath"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
"github.com/docker/docker/pkg/longpath"
)
// fixVolumePathPrefix does platform specific processing to ensure that if

View File

@ -13,10 +13,10 @@ import (
"syscall"
"time"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
)
// ChangeType represents the change type.
@ -81,6 +81,33 @@ func sameFsTimeSpec(a, b syscall.Timespec) bool {
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
@ -105,21 +132,24 @@ func Changes(layers []string, rw string) ([]Change, error) {
return nil
}
// Skip AUFS metadata
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
return err
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, WhiteoutPrefix) {
originalFile := file[len(WhiteoutPrefix):]
change.Path = filepath.Join(filepath.Dir(path), originalFile)
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added

View File

@ -9,7 +9,7 @@ import (
"syscall"
"unsafe"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/system"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
@ -283,3 +283,30 @@ func clen(n []byte) int {
}
return len(n)
}
// OverlayChanges walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, overlayDeletedFile, nil)
}
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
if fi.Mode()&os.ModeCharDevice != 0 {
s := fi.Sys().(*syscall.Stat_t)
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
return path, nil
}
}
if fi.Mode()&os.ModeDir != 0 {
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
if err != nil {
return "", err
}
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
return path, nil
}
}
return "", nil
}

View File

@ -9,7 +9,7 @@ import (
"runtime"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {

View File

@ -6,7 +6,7 @@ import (
"os"
"syscall"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {

View File

@ -3,7 +3,7 @@ package archive
import (
"os"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {

View File

@ -9,8 +9,8 @@ import (
"path/filepath"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/system"
)
// Errors used or returned by this file.
@ -103,7 +103,7 @@ func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err erro
return
}
// Separate the source path between it's directory and
// Separate the source path between its directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)

View File

@ -10,10 +10,10 @@ import (
"runtime"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be

View File

@ -13,8 +13,8 @@ import (
"os"
"path"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
)
var (

View File

@ -10,15 +10,15 @@ import (
"strings"
"text/scanner"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus"
)
// exclusion return true if the specified pattern is an exclusion
// exclusion returns true if the specified pattern is an exclusion
func exclusion(pattern string) bool {
return pattern[0] == '!'
}
// empty return true if the specified pattern is empty
// empty returns true if the specified pattern is empty
func empty(pattern string) bool {
return pattern == ""
}
@ -31,7 +31,7 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
// Loop over exclusion patterns and:
// 1. Clean them up.
// 2. Indicate whether we are dealing with any exception rules.
// 3. Error if we see a single exclusion marker on it's own (!).
// 3. Error if we see a single exclusion marker on its own (!).
cleanedPatterns := []string{}
patternDirs := [][]string{}
exceptions := false
@ -52,7 +52,7 @@ func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
if exclusion(pattern) {
pattern = pattern[1:]
}
patternDirs = append(patternDirs, strings.Split(pattern, "/"))
patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
}
return cleanedPatterns, patternDirs, exceptions, nil
@ -83,8 +83,9 @@ func Matches(file string, patterns []string) (bool, error) {
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
parentPath := filepath.Dir(file)
parentPathDirs := strings.Split(parentPath, "/")
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for i, pattern := range patterns {
negative := false
@ -102,8 +103,8 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(patDirs[i]) <= len(parentPathDirs) {
match, _ = regexpMatch(strings.Join(patDirs[i], "/"),
strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
}
}
@ -125,6 +126,9 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool,
// of directories. This means that we should be backwards compatible
// with filepath.Match(). We'll end up supporting more stuff, due to
// the fact that we're using regexp, but that's ok - it does no harm.
//
// As per the comment in golangs filepath.Match, on Windows, escaping
// is disabled. Instead, '\\' is treated as path separator.
func regexpMatch(pattern, path string) (bool, error) {
regStr := "^"
@ -213,7 +217,7 @@ func regexpMatch(pattern, path string) (bool, error) {
}
// CopyFile copies from src to dst until either EOF is reached
// on src or an error occurs. It verifies src exists and remove
// on src or an error occurs. It verifies src exists and removes
// the dst if it exists.
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)

View File

@ -0,0 +1,27 @@
package fileutils
import (
"os"
"os/exec"
"strconv"
"strings"
)
// GetTotalUsedFds returns the number of used File Descriptors by
// executing `lsof -p PID`
func GetTotalUsedFds() int {
pid := os.Getpid()
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
output, err := cmd.CombinedOutput()
if err != nil {
return -1
}
outputStr := strings.TrimSpace(string(output))
fds := strings.Split(outputStr, "\n")
return len(fds) - 1
}

View File

@ -0,0 +1,7 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors.
// On Solaris these limits are per process and not systemwide
func GetTotalUsedFds() int {
return -1
}

View File

@ -7,7 +7,7 @@ import (
"io/ioutil"
"os"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus"
)
// GetTotalUsedFds Returns the number of used File Descriptors by

View File

@ -4,7 +4,7 @@ import (
"os"
"runtime"
"github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user"
"github.com/opencontainers/runc/libcontainer/user"
)
// Key returns the env var name for the user's home dir based on

View File

@ -155,6 +155,9 @@ func parseSubgid(username string) (ranges, error) {
return parseSubidFile(subgidFileName, username)
}
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
func parseSubidFile(path, username string) (ranges, error) {
var rangeList ranges
@ -171,15 +174,14 @@ func parseSubidFile(path, username string) (ranges, error) {
}
text := strings.TrimSpace(s.Text())
if text == "" {
if text == "" || strings.HasPrefix(text, "#") {
continue
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username {
// return the first entry for a user; ignores potential for multiple ranges per user
if parts[0] == username || username == "ALL" {
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)

View File

@ -6,7 +6,7 @@ import (
"os"
"path/filepath"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/system"
)
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {

View File

@ -5,7 +5,7 @@ package idtools
import (
"os"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/system"
)
// Platforms such as Windows do not support the UID/GID concept. So make this

View File

@ -0,0 +1,188 @@
package idtools
import (
"fmt"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"sync"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
// Linux distribution commands:
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
// useradd -r -s /bin/false <username>
var (
once sync.Once
userCommand string
cmdTemplates = map[string]string{
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
"useradd": "-r -s /bin/false %s",
"usermod": "-%s %d-%d %s",
}
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
// default length for a UID/GID subordinate range
defaultRangeLen = 65536
defaultRangeStart = 100000
userMod = "usermod"
)
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
if err != nil {
return "", err
}
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
if err != nil {
return "", err
}
//only return no error if the final resolved binary basename
//matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
// AddNamespaceRangesUser takes a username and uses the standard system
// utility to create a system user/group pair used to hold the
// /etc/sub{uid,gid} ranges which will be used for user namespace
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
if err := addUser(name); err != nil {
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
}
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
}
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
}
return uid, gid, nil
}
func addUser(userName string) error {
once.Do(func() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
})
if userCommand == "" {
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
out, err := execCmd(userCommand, args)
if err != nil {
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
}
return nil
}
func createSubordinateRanges(name string) error {
// first, we should verify that ranges weren't automatically created
// by the distro tooling
ranges, err := parseSubuid(name)
if err != nil {
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
return fmt.Errorf("Can't find available subuid range: %v", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
}
}
ranges, err = parseSubgid(name)
if err != nil {
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
return fmt.Errorf("Can't find available subgid range: %v", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
}
}
return nil
}
func findNextUIDRange() (int, error) {
ranges, err := parseSubuid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextGIDRange() (int, error) {
ranges, err := parseSubgid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextRangeStart(rangeList ranges) (int, error) {
startID := defaultRangeStart
for _, arange := range rangeList {
if wouldOverlap(arange, startID) {
startID = arange.Start + arange.Length
}
}
return startID, nil
}
func wouldOverlap(arange subIDRange, ID int) bool {
low := ID
high := ID + defaultRangeLen
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
(high <= arange.Start+arange.Length && high >= arange.Start) {
return true
}
return false
}
func execCmd(cmd, args string) ([]byte, error) {
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
return execCmd.CombinedOutput()
}

51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package ioutils
import (
"errors"
"io"
)
var errBufferFull = errors.New("buffer is full")
type fixedBuffer struct {
buf []byte
pos int
lastRead int
}
func (b *fixedBuffer) Write(p []byte) (int, error) {
n := copy(b.buf[b.pos:cap(b.buf)], p)
b.pos += n
if n < len(p) {
if b.pos == cap(b.buf) {
return n, errBufferFull
}
return n, io.ErrShortWrite
}
return n, nil
}
func (b *fixedBuffer) Read(p []byte) (int, error) {
n := copy(p, b.buf[b.lastRead:b.pos])
b.lastRead += n
return n, nil
}
func (b *fixedBuffer) Len() int {
return b.pos - b.lastRead
}
func (b *fixedBuffer) Cap() int {
return cap(b.buf)
}
func (b *fixedBuffer) Reset() {
b.pos = 0
b.lastRead = 0
b.buf = b.buf[:0]
}
func (b *fixedBuffer) String() string {
return string(b.buf[b.lastRead:b.pos])
}

View File

@ -9,12 +9,20 @@ import (
// maxCap is the highest capacity to use in byte slices that buffer data.
const maxCap = 1e6
// minCap is the lowest capacity to use in byte slices that buffer data
const minCap = 64
// blockThreshold is the minimum number of bytes in the buffer which will cause
// a write to BytesPipe to block when allocating a new slice.
const blockThreshold = 1e6
// ErrClosed is returned when Write is called on a closed BytesPipe.
var ErrClosed = errors.New("write to closed BytesPipe")
var (
// ErrClosed is returned when Write is called on a closed BytesPipe.
ErrClosed = errors.New("write to closed BytesPipe")
bufPools = make(map[int]*sync.Pool)
bufPoolsLock sync.Mutex
)
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
// All written data may be read at most once. Also, BytesPipe allocates
@ -23,22 +31,17 @@ var ErrClosed = errors.New("write to closed BytesPipe")
type BytesPipe struct {
mu sync.Mutex
wait *sync.Cond
buf [][]byte // slice of byte-slices of buffered data
lastRead int // index in the first slice to a read point
bufLen int // length of data buffered over the slices
closeErr error // error to return from next Read. set to nil if not closed.
buf []*fixedBuffer
bufLen int
closeErr error // error to return from next Read. set to nil if not closed.
}
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
func NewBytesPipe(buf []byte) *BytesPipe {
if cap(buf) == 0 {
buf = make([]byte, 0, 64)
}
bp := &BytesPipe{
buf: [][]byte{buf[:0]},
}
func NewBytesPipe() *BytesPipe {
bp := &BytesPipe{}
bp.buf = append(bp.buf, getBuffer(minCap))
bp.wait = sync.NewCond(&bp.mu)
return bp
}
@ -47,22 +50,31 @@ func NewBytesPipe(buf []byte) *BytesPipe {
// It can allocate new []byte slices in a process of writing.
func (bp *BytesPipe) Write(p []byte) (int, error) {
bp.mu.Lock()
defer bp.mu.Unlock()
written := 0
loop0:
for {
if bp.closeErr != nil {
bp.mu.Unlock()
return written, ErrClosed
}
// write data to the last buffer
b := bp.buf[len(bp.buf)-1]
// copy data to the current empty allocated area
n := copy(b[len(b):cap(b)], p)
// increment buffered data length
bp.bufLen += n
// include written data in last buffer
bp.buf[len(bp.buf)-1] = b[:len(b)+n]
if len(bp.buf) == 0 {
bp.buf = append(bp.buf, getBuffer(64))
}
// get the last buffer
b := bp.buf[len(bp.buf)-1]
n, err := b.Write(p)
written += n
bp.bufLen += n
// errBufferFull is an error we expect to get if the buffer is full
if err != nil && err != errBufferFull {
bp.wait.Broadcast()
bp.mu.Unlock()
return written, err
}
// if there was enough room to write all then break
if len(p) == n {
@ -72,20 +84,23 @@ func (bp *BytesPipe) Write(p []byte) (int, error) {
// more data: write to the next slice
p = p[n:]
// block if too much data is still in the buffer
// make sure the buffer doesn't grow too big from this write
for bp.bufLen >= blockThreshold {
bp.wait.Wait()
if bp.closeErr != nil {
continue loop0
}
}
// allocate slice that has twice the size of the last unless maximum reached
nextCap := 2 * cap(bp.buf[len(bp.buf)-1])
// add new byte slice to the buffers slice and continue writing
nextCap := b.Cap() * 2
if nextCap > maxCap {
nextCap = maxCap
}
// add new byte slice to the buffers slice and continue writing
bp.buf = append(bp.buf, make([]byte, 0, nextCap))
bp.buf = append(bp.buf, getBuffer(nextCap))
}
bp.wait.Broadcast()
bp.mu.Unlock()
return written, nil
}
@ -107,46 +122,65 @@ func (bp *BytesPipe) Close() error {
return bp.CloseWithError(nil)
}
func (bp *BytesPipe) len() int {
return bp.bufLen - bp.lastRead
}
// Read reads bytes from BytesPipe.
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
bp.mu.Lock()
defer bp.mu.Unlock()
if bp.len() == 0 {
if bp.bufLen == 0 {
if bp.closeErr != nil {
bp.mu.Unlock()
return 0, bp.closeErr
}
bp.wait.Wait()
if bp.len() == 0 && bp.closeErr != nil {
return 0, bp.closeErr
if bp.bufLen == 0 && bp.closeErr != nil {
err := bp.closeErr
bp.mu.Unlock()
return 0, err
}
}
for {
read := copy(p, bp.buf[0][bp.lastRead:])
for bp.bufLen > 0 {
b := bp.buf[0]
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
n += read
bp.lastRead += read
if bp.len() == 0 {
// we have read everything. reset to the beginning.
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = bp.buf[0][:0]
break
bp.bufLen -= read
if b.Len() == 0 {
// it's empty so return it to the pool and move to the next one
returnBuffer(b)
bp.buf[0] = nil
bp.buf = bp.buf[1:]
}
// break if everything was read
if len(p) == read {
break
}
// more buffered data and more asked. read from next slice.
p = p[read:]
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = nil // throw away old slice
bp.buf = bp.buf[1:] // switch to next
}
bp.wait.Broadcast()
bp.mu.Unlock()
return
}
func returnBuffer(b *fixedBuffer) {
b.Reset()
bufPoolsLock.Lock()
pool := bufPools[b.Cap()]
bufPoolsLock.Unlock()
if pool != nil {
pool.Put(b)
}
}
func getBuffer(size int) *fixedBuffer {
bufPoolsLock.Lock()
pool, ok := bufPools[size]
if !ok {
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
bufPools[size] = pool
}
bufPoolsLock.Unlock()
return pool.Get().(*fixedBuffer)
}

View File

@ -0,0 +1,82 @@
package ioutils
import (
"io"
"io/ioutil"
"os"
"path/filepath"
)
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
abspath, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
return &atomicFileWriter{
f: f,
fn: abspath,
perm: perm,
}, nil
}
// AtomicWriteFile atomically writes data to a file named by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := NewAtomicFileWriter(filename, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
f.(*atomicFileWriter).writeErr = err
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
type atomicFileWriter struct {
f *os.File
fn string
writeErr error
perm os.FileMode
}
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
n, err := w.f.Write(dt)
if err != nil {
w.writeErr = err
}
return n, err
}
func (w *atomicFileWriter) Close() (retErr error) {
defer func() {
if retErr != nil || w.writeErr != nil {
os.Remove(w.f.Name())
}
}()
if err := w.f.Sync(); err != nil {
w.f.Close()
return err
}
if err := w.f.Close(); err != nil {
return err
}
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
return err
}
if w.writeErr == nil {
return os.Rename(w.f.Name(), w.fn)
}
return nil
}

View File

@ -5,7 +5,7 @@ import (
"encoding/hex"
"io"
"github.com/fsouza/go-dockerclient/external/golang.org/x/net/context"
"golang.org/x/net/context"
)
type readCloserWrapper struct {
@ -55,7 +55,7 @@ func HashData(src io.Reader) (string, error) {
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
// OnEOFReader wraps a io.ReadCloser and a function
// OnEOFReader wraps an io.ReadCloser and a function
// the function will run at the end of file or close the file.
type OnEOFReader struct {
Rc io.ReadCloser

View File

@ -5,7 +5,7 @@ package ioutils
import (
"io/ioutil"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
"github.com/docker/docker/pkg/longpath"
)
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.

View File

@ -1,9 +1,7 @@
package ioutils
import (
"errors"
"io"
"net/http"
"sync"
)
@ -11,45 +9,43 @@ import (
// is a flush. In addition, the Close method can be called to intercept
// Read/Write calls if the targets lifecycle has already ended.
type WriteFlusher struct {
mu sync.Mutex
w io.Writer
flusher http.Flusher
flushed bool
closed error
// TODO(stevvooe): Use channel for closed instead, remove mutex. Using a
// channel will allow one to properly order the operations.
w io.Writer
flusher flusher
flushed chan struct{}
flushedOnce sync.Once
closed chan struct{}
closeLock sync.Mutex
}
var errWriteFlusherClosed = errors.New("writeflusher: closed")
type flusher interface {
Flush()
}
var errWriteFlusherClosed = io.EOF
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
wf.mu.Lock()
defer wf.mu.Unlock()
if wf.closed != nil {
return 0, wf.closed
select {
case <-wf.closed:
return 0, errWriteFlusherClosed
default:
}
n, err = wf.w.Write(b)
wf.flush() // every write is a flush.
wf.Flush() // every write is a flush.
return n, err
}
// Flush the stream immediately.
func (wf *WriteFlusher) Flush() {
wf.mu.Lock()
defer wf.mu.Unlock()
wf.flush()
}
// flush the stream immediately without taking a lock. Used internally.
func (wf *WriteFlusher) flush() {
if wf.closed != nil {
select {
case <-wf.closed:
return
default:
}
wf.flushed = true
wf.flushedOnce.Do(func() {
close(wf.flushed)
})
wf.flusher.Flush()
}
@ -59,34 +55,38 @@ func (wf *WriteFlusher) Flushed() bool {
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
// be used to detect whether or a response code has been issued or not.
// Another hook should be used instead.
wf.mu.Lock()
defer wf.mu.Unlock()
return wf.flushed
var flushed bool
select {
case <-wf.flushed:
flushed = true
default:
}
return flushed
}
// Close closes the write flusher, disallowing any further writes to the
// target. After the flusher is closed, all calls to write or flush will
// result in an error.
func (wf *WriteFlusher) Close() error {
wf.mu.Lock()
defer wf.mu.Unlock()
wf.closeLock.Lock()
defer wf.closeLock.Unlock()
if wf.closed != nil {
return wf.closed
select {
case <-wf.closed:
return errWriteFlusherClosed
default:
close(wf.closed)
}
wf.closed = errWriteFlusherClosed
return nil
}
// NewWriteFlusher returns a new WriteFlusher.
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
var fl flusher
if f, ok := w.(flusher); ok {
fl = f
} else {
flusher = &NopFlusher{}
fl = &NopFlusher{}
}
return &WriteFlusher{w: w, flusher: flusher}
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
}

View File

@ -14,7 +14,7 @@ import (
"io"
"sync"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/ioutils"
)
var (
@ -28,7 +28,7 @@ const buffer32K = 32 * 1024
// BufioReaderPool is a bufio reader that uses sync.Pool.
type BufioReaderPool struct {
pool sync.Pool
pool *sync.Pool
}
func init() {
@ -39,7 +39,7 @@ func init() {
// newBufioReaderPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
pool := sync.Pool{
pool := &sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
}
return &BufioReaderPool{pool: pool}
@ -80,13 +80,13 @@ func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Rea
// BufioWriterPool is a bufio writer that uses sync.Pool.
type BufioWriterPool struct {
pool sync.Pool
pool *sync.Pool
}
// newBufioWriterPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
pool := sync.Pool{
pool := &sync.Pool{
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
}
return &BufioWriterPool{pool: pool}

View File

@ -1,14 +1,28 @@
package stdcopy
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"sync"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/Sirupsen/logrus"
)
// StdType is the type of standard stream
// a writer can multiplex to.
type StdType byte
const (
// Stdin represents standard input stream type.
Stdin StdType = iota
// Stdout represents standard output stream type.
Stdout
// Stderr represents standard error steam type.
Stderr
stdWriterPrefixLen = 8
stdWriterFdIndex = 0
stdWriterSizeIndex = 4
@ -16,41 +30,40 @@ const (
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
)
// StdType prefixes type and length to standard stream.
type StdType [stdWriterPrefixLen]byte
var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
var (
// Stdin represents standard input stream type.
Stdin = StdType{0: 0}
// Stdout represents standard output stream type.
Stdout = StdType{0: 1}
// Stderr represents standard error steam type.
Stderr = StdType{0: 2}
)
// StdWriter is wrapper of io.Writer with extra customized info.
type StdWriter struct {
// stdWriter is wrapper of io.Writer with extra customized info.
type stdWriter struct {
io.Writer
prefix StdType
sizeBuf []byte
prefix byte
}
func (w *StdWriter) Write(buf []byte) (n int, err error) {
var n1, n2 int
// Write sends the buffer to the underneath writer.
// It inserts the prefix header before the buffer,
// so stdcopy.StdCopy knows where to multiplex the output.
// It makes stdWriter to implement io.Writer.
func (w *stdWriter) Write(p []byte) (n int, err error) {
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instantiated")
}
binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
n1, err = w.Writer.Write(w.prefix[:])
if err != nil {
n = n1 - stdWriterPrefixLen
} else {
n2, err = w.Writer.Write(buf)
n = n1 + n2 - stdWriterPrefixLen
if p == nil {
return 0, nil
}
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
buf := bufPool.Get().(*bytes.Buffer)
buf.Write(header[:])
buf.Write(p)
n, err = w.Writer.Write(buf.Bytes())
n -= stdWriterPrefixLen
if n < 0 {
n = 0
}
buf.Reset()
bufPool.Put(buf)
return
}
@ -60,16 +73,13 @@ func (w *StdWriter) Write(buf []byte) (n int, err error) {
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
// `t` indicates the id of the stream to encapsulate.
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
func NewStdWriter(w io.Writer, t StdType) *StdWriter {
return &StdWriter{
Writer: w,
prefix: t,
sizeBuf: make([]byte, 4),
func NewStdWriter(w io.Writer, t StdType) io.Writer {
return &stdWriter{
Writer: w,
prefix: byte(t),
}
}
var errInvalidStdHeader = errors.New("Unrecognized input header")
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
@ -110,18 +120,18 @@ func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error)
}
// Check the first byte to know where to write
switch buf[stdWriterFdIndex] {
case 0:
switch StdType(buf[stdWriterFdIndex]) {
case Stdin:
fallthrough
case 1:
case Stdout:
// Write on stdout
out = dstout
case 2:
case Stderr:
// Write on stderr
out = dsterr
default:
logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
return 0, errInvalidStdHeader
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
}
// Retrieve the size of the frame

View File

@ -43,5 +43,10 @@ func Chtimes(name string, atime time.Time, mtime time.Time) error {
return err
}
// Take platform specific action for setting create time.
if err := setCTime(name, mtime); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,14 @@
// +build !windows
package system
import (
"time"
)
//setCTime will set the create time on a file. On Unix, the create
//time is updated as a side effect of setting the modified time, so
//no action is required.
func setCTime(path string, ctime time.Time) error {
return nil
}

View File

@ -0,0 +1,27 @@
// +build windows
package system
import (
"syscall"
"time"
)
//setCTime will set the create time on a file. On Windows, this requires
//calling SetFileTime and explicitly including the create time.
func setCTime(path string, ctime time.Time) error {
ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
pathp, e := syscall.UTF16PtrFromString(path)
if e != nil {
return e
}
h, e := syscall.CreateFile(pathp,
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if e != nil {
return e
}
defer syscall.Close(h)
c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
return syscall.SetFileTime(h, &c, nil, nil)
}

Some files were not shown because too many files have changed in this diff Show More