Adding vendored code in pkg directories

This commit is contained in:
Diptanu Choudhury 2016-02-12 13:32:32 -08:00
parent 18da7eea95
commit 9d761afbc3
91 changed files with 6980 additions and 0 deletions

27
vendor/github.com/docker/docker/pkg/mflag/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

92
vendor/github.com/docker/docker/pkg/mount/flags.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
package mount
import (
"fmt"
"strings"
)
// Parse fstab type mount options into mount() flags
// and device specific data
func parseOptions(options string) (int, string) {
var (
flag int
data []string
)
flags := map[string]struct {
clear bool
flag int
}{
"defaults": {false, 0},
"ro": {false, RDONLY},
"rw": {true, RDONLY},
"suid": {true, NOSUID},
"nosuid": {false, NOSUID},
"dev": {true, NODEV},
"nodev": {false, NODEV},
"exec": {true, NOEXEC},
"noexec": {false, NOEXEC},
"sync": {false, SYNCHRONOUS},
"async": {true, SYNCHRONOUS},
"dirsync": {false, DIRSYNC},
"remount": {false, REMOUNT},
"mand": {false, MANDLOCK},
"nomand": {true, MANDLOCK},
"atime": {true, NOATIME},
"noatime": {false, NOATIME},
"diratime": {true, NODIRATIME},
"nodiratime": {false, NODIRATIME},
"bind": {false, BIND},
"rbind": {false, RBIND},
"unbindable": {false, UNBINDABLE},
"runbindable": {false, RUNBINDABLE},
"private": {false, PRIVATE},
"rprivate": {false, RPRIVATE},
"shared": {false, SHARED},
"rshared": {false, RSHARED},
"slave": {false, SLAVE},
"rslave": {false, RSLAVE},
"relatime": {false, RELATIME},
"norelatime": {true, RELATIME},
"strictatime": {false, STRICTATIME},
"nostrictatime": {true, STRICTATIME},
}
for _, o := range strings.Split(options, ",") {
// If the option does not exist in the flags table or the flag
// is not supported on the platform,
// then it is a data value for a specific fs type
if f, exists := flags[o]; exists && f.flag != 0 {
if f.clear {
flag &= ^f.flag
} else {
flag |= f.flag
}
} else {
data = append(data, o)
}
}
return flag, strings.Join(data, ",")
}
// ParseTmpfsOptions parse fstab type mount options into flags and data
func ParseTmpfsOptions(options string) (int, string, error) {
flags, data := parseOptions(options)
validFlags := map[string]bool{
"": true,
"size": true,
"mode": true,
"uid": true,
"gid": true,
"nr_inodes": true,
"nr_blocks": true,
"mpol": true,
}
for _, o := range strings.Split(data, ",") {
opt := strings.SplitN(o, "=", 2)
if !validFlags[opt[0]] {
return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt)
}
}
return flags, data, nil
}

View File

@ -0,0 +1,48 @@
// +build freebsd,cgo
package mount
/*
#include <sys/mount.h>
*/
import "C"
const (
// RDONLY will mount the filesystem as read-only.
RDONLY = C.MNT_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = C.MNT_NOSUID
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = C.MNT_NOEXEC
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
SYNCHRONOUS = C.MNT_SYNCHRONOUS
// NOATIME will not update the file access time when reading from a file.
NOATIME = C.MNT_NOATIME
)
// These flags are unsupported.
const (
BIND = 0
DIRSYNC = 0
MANDLOCK = 0
NODEV = 0
NODIRATIME = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SHARED = 0
RSHARED = 0
SLAVE = 0
RSLAVE = 0
RBIND = 0
RELATIVE = 0
RELATIME = 0
REMOUNT = 0
STRICTATIME = 0
)

View File

@ -0,0 +1,85 @@
package mount
import (
"syscall"
)
const (
// RDONLY will mount the file system read-only.
RDONLY = syscall.MS_RDONLY
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
// take effect.
NOSUID = syscall.MS_NOSUID
// NODEV will not interpret character or block special devices on the file
// system.
NODEV = syscall.MS_NODEV
// NOEXEC will not allow execution of any binaries on the mounted file system.
NOEXEC = syscall.MS_NOEXEC
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
// DIRSYNC will force all directory updates within the file system to be done
// synchronously. This affects the following system calls: create, link,
// unlink, symlink, mkdir, rmdir, mknod and rename.
DIRSYNC = syscall.MS_DIRSYNC
// REMOUNT will attempt to remount an already-mounted file system. This is
// commonly used to change the mount flags for a file system, especially to
// make a readonly file system writeable. It does not change device or mount
// point.
REMOUNT = syscall.MS_REMOUNT
// MANDLOCK will force mandatory locks on a filesystem.
MANDLOCK = syscall.MS_MANDLOCK
// NOATIME will not update the file access time when reading from a file.
NOATIME = syscall.MS_NOATIME
// NODIRATIME will not update the directory access time.
NODIRATIME = syscall.MS_NODIRATIME
// BIND remounts a subtree somewhere else.
BIND = syscall.MS_BIND
// RBIND remounts a subtree and all possible submounts somewhere else.
RBIND = syscall.MS_BIND | syscall.MS_REC
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
UNBINDABLE = syscall.MS_UNBINDABLE
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
// PRIVATE creates a mount which carries no propagation abilities.
PRIVATE = syscall.MS_PRIVATE
// RPRIVATE marks the entire mount tree as PRIVATE.
RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
// SLAVE creates a mount which receives propagation from its master, but not
// vice versa.
SLAVE = syscall.MS_SLAVE
// RSLAVE marks the entire mount tree as SLAVE.
RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
// SHARED creates a mount which provides the ability to create mirrors of
// that mount such that mounts and unmounts within any of the mirrors
// propagate to the other mirrors.
SHARED = syscall.MS_SHARED
// RSHARED marks the entire mount tree as SHARED.
RSHARED = syscall.MS_SHARED | syscall.MS_REC
// RELATIME updates inode access times relative to modify or change time.
RELATIME = syscall.MS_RELATIME
// STRICTATIME allows to explicitly request full atime updates. This makes
// it possible for the kernel to default to relatime or noatime but still
// allow userspace to override it.
STRICTATIME = syscall.MS_STRICTATIME
)

View File

@ -0,0 +1,30 @@
// +build !linux,!freebsd freebsd,!cgo
package mount
// These flags are unsupported.
const (
BIND = 0
DIRSYNC = 0
MANDLOCK = 0
NOATIME = 0
NODEV = 0
NODIRATIME = 0
NOEXEC = 0
NOSUID = 0
UNBINDABLE = 0
RUNBINDABLE = 0
PRIVATE = 0
RPRIVATE = 0
SHARED = 0
RSHARED = 0
SLAVE = 0
RSLAVE = 0
RBIND = 0
RELATIME = 0
RELATIVE = 0
REMOUNT = 0
STRICTATIME = 0
SYNCHRONOUS = 0
RDONLY = 0
)

74
vendor/github.com/docker/docker/pkg/mount/mount.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package mount
import (
"time"
)
// GetMounts retrieves a list of mounts for the current running process.
func GetMounts() ([]*Info, error) {
return parseMountTable()
}
// Mounted looks at /proc/self/mountinfo to determine of the specified
// mountpoint has been mounted
func Mounted(mountpoint string) (bool, error) {
entries, err := parseMountTable()
if err != nil {
return false, err
}
// Search the table for the mountpoint
for _, e := range entries {
if e.Mountpoint == mountpoint {
return true, nil
}
}
return false, nil
}
// Mount will mount filesystem according to the specified configuration, on the
// condition that the target path is *not* already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func Mount(device, target, mType, options string) error {
flag, _ := parseOptions(options)
if flag&REMOUNT != REMOUNT {
if mounted, err := Mounted(target); err != nil || mounted {
return err
}
}
return ForceMount(device, target, mType, options)
}
// ForceMount will mount a filesystem according to the specified configuration,
// *regardless* if the target path is not already mounted. Options must be
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
// flags.go for supported option flags.
func ForceMount(device, target, mType, options string) error {
flag, data := parseOptions(options)
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
return err
}
return nil
}
// Unmount will unmount the target filesystem, so long as it is mounted.
func Unmount(target string) error {
if mounted, err := Mounted(target); err != nil || !mounted {
return err
}
return ForceUnmount(target)
}
// ForceUnmount will force an unmount of the target filesystem, regardless if
// it is mounted or not.
func ForceUnmount(target string) (err error) {
// Simple retry logic for unmount
for i := 0; i < 10; i++ {
if err = unmount(target, 0); err == nil {
return nil
}
time.Sleep(100 * time.Millisecond)
}
return
}

View File

@ -0,0 +1,59 @@
package mount
/*
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <sys/_iovec.h>
#include <sys/mount.h>
#include <sys/param.h>
*/
import "C"
import (
"fmt"
"strings"
"syscall"
"unsafe"
)
func allocateIOVecs(options []string) []C.struct_iovec {
out := make([]C.struct_iovec, len(options))
for i, option := range options {
out[i].iov_base = unsafe.Pointer(C.CString(option))
out[i].iov_len = C.size_t(len(option) + 1)
}
return out
}
func mount(device, target, mType string, flag uintptr, data string) error {
isNullFS := false
xs := strings.Split(data, ",")
for _, x := range xs {
if x == "bind" {
isNullFS = true
}
}
options := []string{"fspath", target}
if isNullFS {
options = append(options, "fstype", "nullfs", "target", device)
} else {
options = append(options, "fstype", mType, "from", device)
}
rawOptions := allocateIOVecs(options)
for _, rawOption := range rawOptions {
defer C.free(rawOption.iov_base)
}
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
reason := C.GoString(C.strerror(*C.__error()))
return fmt.Errorf("Failed to call nmount: %s", reason)
}
return nil
}
func unmount(target string, flag int) error {
return syscall.Unmount(target, flag)
}

View File

@ -0,0 +1,21 @@
package mount
import (
"syscall"
)
func mount(device, target, mType string, flag uintptr, data string) error {
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
return err
}
// If we have a bind mount or remount, remount...
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
}
return nil
}
func unmount(target string, flag int) error {
return syscall.Unmount(target, flag)
}

View File

@ -0,0 +1,11 @@
// +build !linux,!freebsd freebsd,!cgo
package mount
func mount(device, target, mType string, flag uintptr, data string) error {
panic("Not implemented")
}
func unmount(target string, flag int) error {
panic("Not implemented")
}

40
vendor/github.com/docker/docker/pkg/mount/mountinfo.go generated vendored Normal file
View File

@ -0,0 +1,40 @@
package mount
// Info reveals information about a particular mounted filesystem. This
// struct is populated from the content in the /proc/<pid>/mountinfo file.
type Info struct {
// ID is a unique identifier of the mount (may be reused after umount).
ID int
// Parent indicates the ID of the mount parent (or of self for the top of the
// mount tree).
Parent int
// Major indicates one half of the device ID which identifies the device class.
Major int
// Minor indicates one half of the device ID which identifies a specific
// instance of device.
Minor int
// Root of the mount within the filesystem.
Root string
// Mountpoint indicates the mount point relative to the process's root.
Mountpoint string
// Opts represents mount-specific options.
Opts string
// Optional represents optional fields.
Optional string
// Fstype indicates the type of filesystem, such as EXT3.
Fstype string
// Source indicates filesystem specific information or "none".
Source string
// VfsOpts represents per super block options.
VfsOpts string
}

View File

@ -0,0 +1,41 @@
package mount
/*
#include <sys/param.h>
#include <sys/ucred.h>
#include <sys/mount.h>
*/
import "C"
import (
"fmt"
"reflect"
"unsafe"
)
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
// bind mounts.
func parseMountTable() ([]*Info, error) {
var rawEntries *C.struct_statfs
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
if count == 0 {
return nil, fmt.Errorf("Failed to call getmntinfo")
}
var entries []C.struct_statfs
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
header.Cap = count
header.Len = count
header.Data = uintptr(unsafe.Pointer(rawEntries))
var out []*Info
for _, entry := range entries {
var mountinfo Info
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
out = append(out, &mountinfo)
}
return out, nil
}

View File

@ -0,0 +1,95 @@
// +build linux
package mount
import (
"bufio"
"fmt"
"io"
"os"
"strings"
)
const (
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
(1) mount ID: unique identifier of the mount (may be reused after umount)
(2) parent ID: ID of parent (or of self for the top of the mount tree)
(3) major:minor: value of st_dev for files on filesystem
(4) root: root of the mount within the filesystem
(5) mount point: mount point relative to the process's root
(6) mount options: per mount options
(7) optional fields: zero or more fields of the form "tag[:value]"
(8) separator: marks the end of the optional fields
(9) filesystem type: name of filesystem of the form "type[.subtype]"
(10) mount source: filesystem specific information or "none"
(11) super options: per super block options*/
mountinfoFormat = "%d %d %d:%d %s %s %s %s"
)
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
// bind mounts
func parseMountTable() ([]*Info, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return nil, err
}
defer f.Close()
return parseInfoFile(f)
}
func parseInfoFile(r io.Reader) ([]*Info, error) {
var (
s = bufio.NewScanner(r)
out = []*Info{}
)
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
var (
p = &Info{}
text = s.Text()
optionalFields string
)
if _, err := fmt.Sscanf(text, mountinfoFormat,
&p.ID, &p.Parent, &p.Major, &p.Minor,
&p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
}
// Safe as mountinfo encodes mountpoints with spaces as \040.
index := strings.Index(text, " - ")
postSeparatorFields := strings.Fields(text[index+3:])
if len(postSeparatorFields) < 3 {
return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
}
if optionalFields != "-" {
p.Optional = optionalFields
}
p.Fstype = postSeparatorFields[0]
p.Source = postSeparatorFields[1]
p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
out = append(out, p)
}
return out, nil
}
// PidMountInfo collects the mounts for a specific process ID. If the process
// ID is unknown, it is better to use `GetMounts` which will inspect
// "/proc/self/mountinfo" instead.
func PidMountInfo(pid int) ([]*Info, error) {
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
if err != nil {
return nil, err
}
defer f.Close()
return parseInfoFile(f)
}

View File

@ -0,0 +1,12 @@
// +build !linux,!freebsd freebsd,!cgo
package mount
import (
"fmt"
"runtime"
)
func parseMountTable() ([]*Info, error) {
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}

View File

@ -0,0 +1,70 @@
// +build linux
package mount
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
// See the supported options in flags.go for further reference.
func MakeShared(mountPoint string) error {
return ensureMountedAs(mountPoint, "shared")
}
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
// See the supported options in flags.go for further reference.
func MakeRShared(mountPoint string) error {
return ensureMountedAs(mountPoint, "rshared")
}
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
// See the supported options in flags.go for further reference.
func MakePrivate(mountPoint string) error {
return ensureMountedAs(mountPoint, "private")
}
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
// enabled. See the supported options in flags.go for further reference.
func MakeRPrivate(mountPoint string) error {
return ensureMountedAs(mountPoint, "rprivate")
}
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
// See the supported options in flags.go for further reference.
func MakeSlave(mountPoint string) error {
return ensureMountedAs(mountPoint, "slave")
}
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
// See the supported options in flags.go for further reference.
func MakeRSlave(mountPoint string) error {
return ensureMountedAs(mountPoint, "rslave")
}
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
// enabled. See the supported options in flags.go for further reference.
func MakeUnbindable(mountPoint string) error {
return ensureMountedAs(mountPoint, "unbindable")
}
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
// option enabled. See the supported options in flags.go for further reference.
func MakeRUnbindable(mountPoint string) error {
return ensureMountedAs(mountPoint, "runbindable")
}
func ensureMountedAs(mountPoint, options string) error {
mounted, err := Mounted(mountPoint)
if err != nil {
return err
}
if !mounted {
if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
return err
}
}
mounted, err = Mounted(mountPoint)
if err != nil {
return err
}
return ForceMount("", mountPoint, "none", options)
}

View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,27 @@
Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1 @@
This code provides helper functions for dealing with archive files.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,112 @@
// +build !windows
package archive
import (
"archive/tar"
"errors"
"os"
"path/filepath"
"syscall"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return srcPath + string(filepath.Separator) + include
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
return p, nil // already unix-style
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
err = errors.New("cannot convert stat value to syscall.Stat_t")
return
}
inode = uint64(s.Ino)
// Currently go does not fill in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
hdr.Devminor = int64(minor(uint64(s.Rdev)))
}
return
}
func getFileUIDGID(stat interface{}) (int, int, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
}
return int(s.Uid), int(s.Gid), nil
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
func minor(device uint64) uint64 {
return (device & 0xff) | ((device >> 12) & 0xfff00)
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= syscall.S_IFBLK
case tar.TypeChar:
mode |= syscall.S_IFCHR
case tar.TypeFifo:
mode |= syscall.S_IFIFO
}
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
return err
}
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,70 @@
// +build windows
package archive
import (
"archive/tar"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return longpath.AddPrefix(srcPath)
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
// windows: convert windows style relative path with backslashes
// into forward slashes. Since windows does not allow '/' or '\'
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
perm &= 0755
// Add the x bit: make everything +x from windows
perm |= 0111
return perm
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
return
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (int, int, error) {
// no notion of file ownership mapping yet on Windows
return 0, 0, nil
}

View File

@ -0,0 +1,416 @@
package archive
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// ChangeType represents the change type.
type ChangeType int
const (
// ChangeModify represents the modify operation.
ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar and the go tar writer don't have sub-second mtime
// precision, which is problematic when we apply changes via tar
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
)
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
// Skip AUFS metadata
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
return err
}
change := Change{
Path: path,
}
// Find out what kind of modification happened
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, WhiteoutPrefix) {
originalFile := file[len(WhiteoutPrefix):]
change.Path = filepath.Join(filepath.Dir(path), originalFile)
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := info
if path == string(os.PathSeparator) {
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
// As this runs on the daemon side, file paths are OS specific.
return string(os.PathSeparator)
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild, _ := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, newStat) ||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
// As this runs on the daemon side, file paths are OS specific.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
root := &FileInfo{
name: string(os.PathSeparator),
children: make(map[string]*FileInfo),
}
return root
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var (
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := ioutil.TempDir("", "empty")
if err != nil {
return nil, err
}
defer os.Remove(emptyDir)
oldDir = emptyDir
}
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, err := os.Lstat(file)
if err != nil {
logrus.Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
reader, writer := io.Pipe()
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: uidMaps,
GIDMaps: gidMaps,
}
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
logrus.Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@ -0,0 +1,285 @@
package archive
import (
"bytes"
"fmt"
"os"
"path/filepath"
"sort"
"syscall"
"unsafe"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
// method in general returns the entire contents of two directory trees, we
// optimize some FS calls out on linux. In particular, we take advantage of the
// fact that getdents(2) returns the inode of each file in the directory being
// walked, which, when walking two trees in parallel to generate a list of
// changes, can be used to prune subtrees without ever having to lstat(2) them
// directly. Eliminating stat calls in this way can save up to seconds on large
// images.
type walker struct {
dir1 string
dir2 string
root1 *FileInfo
root2 *FileInfo
}
// collectFileInfoForChanges returns a complete representation of the trees
// rooted at dir1 and dir2, with one important exception: any subtree or
// leaf where the inode and device numbers are an exact match between dir1
// and dir2 will be pruned from the results. This method is *only* to be used
// to generating a list of changes between the two directories, as it does not
// reflect the full contents.
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
w := &walker{
dir1: dir1,
dir2: dir2,
root1: newRootFileInfo(),
root2: newRootFileInfo(),
}
i1, err := os.Lstat(w.dir1)
if err != nil {
return nil, nil, err
}
i2, err := os.Lstat(w.dir2)
if err != nil {
return nil, nil, err
}
if err := w.walk("/", i1, i2); err != nil {
return nil, nil, err
}
return w.root1, w.root2, nil
}
// Given a FileInfo, its path info, and a reference to the root of the tree
// being constructed, register this file with the tree.
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
if fi == nil {
return nil
}
parent := root.LookUp(filepath.Dir(path))
if parent == nil {
return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
}
info := &FileInfo{
name: filepath.Base(path),
children: make(map[string]*FileInfo),
parent: parent,
}
cpath := filepath.Join(dir, path)
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
if err != nil {
return err
}
info.stat = stat
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
parent.children[info.name] = info
return nil
}
// Walk a subtree rooted at the same path in both trees being iterated. For
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
// Register these nodes with the return trees, unless we're still at the
// (already-created) roots:
if path != "/" {
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
return err
}
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
return err
}
}
is1Dir := i1 != nil && i1.IsDir()
is2Dir := i2 != nil && i2.IsDir()
sameDevice := false
if i1 != nil && i2 != nil {
si1 := i1.Sys().(*syscall.Stat_t)
si2 := i2.Sys().(*syscall.Stat_t)
if si1.Dev == si2.Dev {
sameDevice = true
}
}
// If these files are both non-existent, or leaves (non-dirs), we are done.
if !is1Dir && !is2Dir {
return nil
}
// Fetch the names of all the files contained in both directories being walked:
var names1, names2 []nameIno
if is1Dir {
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
if err != nil {
return err
}
}
if is2Dir {
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
if err != nil {
return err
}
}
// We have lists of the files contained in both parallel directories, sorted
// in the same order. Walk them in parallel, generating a unique merged list
// of all items present in either or both directories.
var names []string
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
ni1 := names1[ix1]
ni2 := names2[ix2]
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
case -1: // ni1 < ni2 -- advance ni1
// we will not encounter ni1 in names2
names = append(names, ni1.name)
ix1++
case 0: // ni1 == ni2
if ni1.ino != ni2.ino || !sameDevice {
names = append(names, ni1.name)
}
ix1++
ix2++
case 1: // ni1 > ni2 -- advance ni2
// we will not encounter ni2 in names1
names = append(names, ni2.name)
ix2++
}
}
for ix1 < len(names1) {
names = append(names, names1[ix1].name)
ix1++
}
for ix2 < len(names2) {
names = append(names, names2[ix2].name)
ix2++
}
// For each of the names present in either or both of the directories being
// iterated, stat the name under each root, and recurse the pair of them:
for _, name := range names {
fname := filepath.Join(path, name)
var cInfo1, cInfo2 os.FileInfo
if is1Dir {
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if is2Dir {
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
return err
}
}
return nil
}
// {name,inode} pairs used to support the early-pruning logic of the walker type
type nameIno struct {
name string
ino uint64
}
type nameInoSlice []nameIno
func (s nameInoSlice) Len() int { return len(s) }
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
// numbers further up the stack when reading directory contents. Unlike
// os.Readdirnames, which returns a list of filenames, this function returns a
// list of {filename,inode} pairs.
func readdirnames(dirname string) (names []nameIno, err error) {
var (
size = 100
buf = make([]byte, 4096)
nbuf int
bufp int
nb int
)
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
names = make([]nameIno, 0, size) // Empty with room to grow.
for {
// Refill the buffer if necessary
if bufp >= nbuf {
bufp = 0
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
if nbuf < 0 {
nbuf = 0
}
if err != nil {
return nil, os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
break // EOF
}
}
// Drain the buffer
nb, names = parseDirent(buf[bufp:nbuf], names)
bufp += nb
}
sl := nameInoSlice(names)
sort.Sort(sl)
return sl, nil
}
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
// which returns {name,inode} pairs instead of just names.
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
origlen := len(buf)
for len(buf) > 0 {
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
buf = buf[dirent.Reclen:]
if dirent.Ino == 0 { // File absent in directory.
continue
}
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
var name = string(bytes[0:clen(bytes[:])])
if name == "." || name == ".." { // Useless names
continue
}
names = append(names, nameIno{name, dirent.Ino})
}
return origlen - len(buf), names
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}

View File

@ -0,0 +1,97 @@
// +build !linux
package archive
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, nil, err
}
}
return oldRoot, newRoot, nil
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
relPath = filepath.Join(string(os.PathSeparator), relPath)
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
// Temporary workaround. If the returned path starts with two backslashes,
// trim it down to a single backslash. Only relevant on Windows.
if runtime.GOOS == "windows" {
if strings.HasPrefix(relPath, `\\`) {
relPath = relPath[1:]
}
}
if relPath == string(os.PathSeparator) {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
s, err := system.Lstat(path)
if err != nil {
return err
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}

View File

@ -0,0 +1,36 @@
// +build !windows
package archive
import (
"os"
"syscall"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldStat.UID() != newStat.UID() ||
oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
}
func getIno(fi os.FileInfo) uint64 {
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@ -0,0 +1,30 @@
package archive
import (
"os"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.ModTime() != newStat.ModTime() ||
oldStat.Mode() != newStat.Mode() ||
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

View File

@ -0,0 +1,458 @@
package archive
import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in a path separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
// Ensure paths are in platform semantics
cleanedPath = normalizePath(cleanedPath)
originalPath = normalizePath(originalPath)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(filepath.Separator)
}
cleanedPath += "."
}
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
cleanedPath += string(filepath.Separator)
}
return cleanedPath
}
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func assertsDirectory(path string) bool {
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
}
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func hasTrailingPathSeparator(path string) bool {
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
}
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(normalizePath(path))
if specifiesCurrentDir(path) {
cleanedPath += string(filepath.Separator) + "."
}
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
sourcePath = normalizePath(sourcePath)
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
}
// Separate the source path between it's directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
filter := []string{sourceBase}
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
})
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !system.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Lstat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcInfo)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@ -0,0 +1,11 @@
// +build !windows
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.ToSlash(path)
}

View File

@ -0,0 +1,9 @@
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.FromSlash(path)
}

View File

@ -0,0 +1,279 @@
package archive
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return 0, err
}
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
if options == nil {
options = &TarOptions{}
}
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
continue
}
}
// Note as these operations are platform specific, so must the slash be.
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
return 0, err
}
}
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
// Note as these operations are platform specific, so must the slash be.
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
trBuf.Reset(tr)
srcData := io.Reader(trBuf)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
// if the options contain a uid & gid maps, convert header uid/gid
// entries using the maps such that lchown sets the proper mapped
// uid/gid after writing the file. We only perform this mapping if
// the file isn't already owned by the remapped root UID or GID, as
// that specific uid/gid has no mapping from container -> host, and
// those files already have the proper ownership for inside the
// container.
if srcHdr.Uid != remappedRootUID {
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
if err != nil {
return 0, err
}
srcHdr.Uid = xUID
}
if srcHdr.Gid != remappedRootGID {
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
if err != nil {
return 0, err
}
srcHdr.Gid = xGID
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
oldmask, err := system.Umask(0)
if err != nil {
return 0, err
}
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
if decompress {
layer, err = DecompressStream(layer)
if err != nil {
return 0, err
}
}
return UnpackLayer(dest, layer, options)
}

View File

@ -0,0 +1,97 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View File

@ -0,0 +1,16 @@
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@ -0,0 +1,16 @@
// +build !linux
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@ -0,0 +1,23 @@
package archive
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

View File

@ -0,0 +1,59 @@
package archive
import (
"archive/tar"
"bytes"
"io/ioutil"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (Archive, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return ioutil.NopCloser(buf), nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

View File

@ -0,0 +1,279 @@
package fileutils
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"text/scanner"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
)
// exclusion return true if the specified pattern is an exclusion
func exclusion(pattern string) bool {
return pattern[0] == '!'
}
// empty return true if the specified pattern is empty
func empty(pattern string) bool {
return pattern == ""
}
// CleanPatterns takes a slice of patterns returns a new
// slice of patterns cleaned with filepath.Clean, stripped
// of any empty patterns and lets the caller know whether the
// slice contains any exception patterns (prefixed with !).
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
// Loop over exclusion patterns and:
// 1. Clean them up.
// 2. Indicate whether we are dealing with any exception rules.
// 3. Error if we see a single exclusion marker on it's own (!).
cleanedPatterns := []string{}
patternDirs := [][]string{}
exceptions := false
for _, pattern := range patterns {
// Eliminate leading and trailing whitespace.
pattern = strings.TrimSpace(pattern)
if empty(pattern) {
continue
}
if exclusion(pattern) {
if len(pattern) == 1 {
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
}
exceptions = true
}
pattern = filepath.Clean(pattern)
cleanedPatterns = append(cleanedPatterns, pattern)
if exclusion(pattern) {
pattern = pattern[1:]
}
patternDirs = append(patternDirs, strings.Split(pattern, "/"))
}
return cleanedPatterns, patternDirs, exceptions, nil
}
// Matches returns true if file matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
func Matches(file string, patterns []string) (bool, error) {
file = filepath.Clean(file)
if file == "." {
// Don't let them exclude everything, kind of silly.
return false, nil
}
patterns, patDirs, _, err := CleanPatterns(patterns)
if err != nil {
return false, err
}
return OptimizedMatches(file, patterns, patDirs)
}
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function
// doesn't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
matched := false
parentPath := filepath.Dir(file)
parentPathDirs := strings.Split(parentPath, "/")
for i, pattern := range patterns {
negative := false
if exclusion(pattern) {
negative = true
pattern = pattern[1:]
}
match, err := regexpMatch(pattern, file)
if err != nil {
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(patDirs[i]) <= len(parentPathDirs) {
match, _ = regexpMatch(strings.Join(patDirs[i], "/"),
strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
}
}
if match {
matched = !negative
}
}
if matched {
logrus.Debugf("Skipping excluded path: %s", file)
}
return matched, nil
}
// regexpMatch tries to match the logic of filepath.Match but
// does so using regexp logic. We do this so that we can expand the
// wildcard set to include other things, like "**" to mean any number
// of directories. This means that we should be backwards compatible
// with filepath.Match(). We'll end up supporting more stuff, due to
// the fact that we're using regexp, but that's ok - it does no harm.
func regexpMatch(pattern, path string) (bool, error) {
regStr := "^"
// Do some syntax checking on the pattern.
// filepath's Match() has some really weird rules that are inconsistent
// so instead of trying to dup their logic, just call Match() for its
// error state and if there is an error in the pattern return it.
// If this becomes an issue we can remove this since its really only
// needed in the error (syntax) case - which isn't really critical.
if _, err := filepath.Match(pattern, path); err != nil {
return false, err
}
// Go through the pattern and convert it to a regexp.
// We use a scanner so we can support utf-8 chars.
var scan scanner.Scanner
scan.Init(strings.NewReader(pattern))
sl := string(os.PathSeparator)
escSL := sl
if sl == `\` {
escSL += `\`
}
for scan.Peek() != scanner.EOF {
ch := scan.Next()
if ch == '*' {
if scan.Peek() == '*' {
// is some flavor of "**"
scan.Next()
if scan.Peek() == scanner.EOF {
// is "**EOF" - to align with .gitignore just accept all
regStr += ".*"
} else {
// is "**"
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
}
// Treat **/ as ** so eat the "/"
if string(scan.Peek()) == sl {
scan.Next()
}
} else {
// is "*" so map it to anything but "/"
regStr += "[^" + escSL + "]*"
}
} else if ch == '?' {
// "?" is any char except "/"
regStr += "[^" + escSL + "]"
} else if strings.Index(".$", string(ch)) != -1 {
// Escape some regexp special chars that have no meaning
// in golang's filepath.Match
regStr += `\` + string(ch)
} else if ch == '\\' {
// escape next char. Note that a trailing \ in the pattern
// will be left alone (but need to escape it)
if sl == `\` {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
// Windows doesn't allow escaping at all
regStr += escSL
continue
}
if scan.Peek() != scanner.EOF {
regStr += `\` + string(scan.Next())
} else {
regStr += `\`
}
} else {
regStr += string(ch)
}
}
regStr += "$"
res, err := regexp.MatchString(regStr, path)
// Map regexp's error to filepath's so no one knows we're not using filepath
if err != nil {
err = filepath.ErrBadPattern
}
return res, err
}
// CopyFile copies from src to dst until either EOF is reached
// on src or an error occurs. It verifies src exists and remove
// the dst if it exists.
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)
cleanDst := filepath.Clean(dst)
if cleanSrc == cleanDst {
return 0, nil
}
sf, err := os.Open(cleanSrc)
if err != nil {
return 0, err
}
defer sf.Close()
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
return 0, err
}
df, err := os.Create(cleanDst)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
// ReadSymlinkedDirectory returns the target directory of a symlink.
// The target of the symbolic link may not be a file.
func ReadSymlinkedDirectory(path string) (string, error) {
var realPath string
var err error
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
}
realPathInfo, err := os.Stat(realPath)
if err != nil {
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
}
if !realPathInfo.Mode().IsDir() {
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
}
return realPath, nil
}
// CreateIfNotExists creates a file or a directory only if it does not already exist.
func CreateIfNotExists(path string, isDir bool) error {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
if isDir {
return os.MkdirAll(path, 0755)
}
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_CREATE, 0755)
if err != nil {
return err
}
f.Close()
}
}
return nil
}

View File

@ -0,0 +1,22 @@
// +build linux freebsd
package fileutils
import (
"fmt"
"io/ioutil"
"os"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
)
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}

View File

@ -0,0 +1,7 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
// on Windows.
func GetTotalUsedFds() int {
return -1
}

View File

@ -0,0 +1,39 @@
package homedir
import (
"os"
"runtime"
"github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user"
)
// Key returns the env var name for the user's home dir based on
// the platform being run on
func Key() string {
if runtime.GOOS == "windows" {
return "USERPROFILE"
}
return "HOME"
}
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
func Get() string {
home := os.Getenv(Key())
if home == "" && runtime.GOOS != "windows" {
if u, err := user.CurrentUser(); err == nil {
return u.Home
}
}
return home
}
// GetShortcutString returns the string that is shortcut to user's home directory
// in the native shell of the platform running on.
func GetShortcutString() string {
if runtime.GOOS == "windows" {
return "%USERPROFILE%" // be careful while using in format functions
}
return "~"
}

View File

@ -0,0 +1,195 @@
package idtools
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
"strings"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
type subIDRange struct {
Start int
Length int
}
type ranges []subIDRange
func (e ranges) Len() int { return len(e) }
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
subuidFileName string = "/etc/subuid"
subgidFileName string = "/etc/subgid"
)
// MkdirAllAs creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership to the requested uid/gid pair.
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
}
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership will be performed
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
}
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
var uid, gid int
if uidMap != nil {
xUID, err := ToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
uid = xUID
}
if gidMap != nil {
xGID, err := ToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
gid = xGID
}
return uid, gid, nil
}
// ToContainer takes an id mapping, and uses it to translate a
// host ID to the remapped ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id
func ToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
for _, m := range idMap {
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
contID := m.ContainerID + (hostID - m.HostID)
return contID, nil
}
}
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
}
// ToHost takes an id mapping and a remapped ID, and translates the
// ID to the mapped host ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id #
func ToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
for _, m := range idMap {
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
hostID := m.HostID + (contID - m.ContainerID)
return hostID, nil
}
}
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
}
// CreateIDMappings takes a requested user and group name and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
subuidRanges, err := parseSubuid(username)
if err != nil {
return nil, nil, err
}
subgidRanges, err := parseSubgid(groupname)
if err != nil {
return nil, nil, err
}
if len(subuidRanges) == 0 {
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
}
if len(subgidRanges) == 0 {
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
}
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
}
func createIDMap(subidRanges ranges) []IDMap {
idMap := []IDMap{}
// sort the ranges by lowest ID first
sort.Sort(subidRanges)
containerID := 0
for _, idrange := range subidRanges {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: idrange.Start,
Size: idrange.Length,
})
containerID = containerID + idrange.Length
}
return idMap
}
func parseSubuid(username string) (ranges, error) {
return parseSubidFile(subuidFileName, username)
}
func parseSubgid(username string) (ranges, error) {
return parseSubidFile(subgidFileName, username)
}
func parseSubidFile(path, username string) (ranges, error) {
var rangeList ranges
subidFile, err := os.Open(path)
if err != nil {
return rangeList, err
}
defer subidFile.Close()
s := bufio.NewScanner(subidFile)
for s.Scan() {
if err := s.Err(); err != nil {
return rangeList, err
}
text := strings.TrimSpace(s.Text())
if text == "" {
continue
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username {
// return the first entry for a user; ignores potential for multiple ranges per user
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
}
return rangeList, nil
}

View File

@ -0,0 +1,60 @@
// +build !windows
package idtools
import (
"os"
"path/filepath"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
paths = []string{path}
} else if err == nil && chownExisting {
if err := os.Chown(path, ownerUID, ownerGID); err != nil {
return err
}
// short-circuit--we were called with an existing directory and chown was requested
return nil
} else if err == nil {
// nothing to do; directory path fully exists already and chown was NOT requested
return nil
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
} else {
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
return err
}
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,18 @@
// +build windows
package idtools
import (
"os"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
)
// Platforms such as Windows do not support the UID/GID concept. So make this
// just a wrapper around system.MkdirAll.
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
return nil
}

View File

@ -0,0 +1,155 @@
package idtools
import (
"fmt"
"os/exec"
"path/filepath"
"strings"
"syscall"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
// Linux distribution commands:
// adduser --uid <id> --shell /bin/login --no-create-home --disabled-login --ingroup <groupname> <username>
// useradd -M -u <id> -s /bin/nologin -N -g <groupname> <username>
// addgroup --gid <id> <groupname>
// groupadd -g <id> <groupname>
const baseUID int = 10000
const baseGID int = 10000
const idMAX int = 65534
var (
userCommand string
groupCommand string
cmdTemplates = map[string]string{
"adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s",
"useradd": "-M -u %d -s /bin/false -N -g %s %s",
"addgroup": "--gid %d %s",
"groupadd": "-g %d %s",
}
)
func init() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
if _, err := resolveBinary("addgroup"); err == nil {
groupCommand = "addgroup"
} else if _, err := resolveBinary("groupadd"); err == nil {
groupCommand = "groupadd"
}
}
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
if err != nil {
return "", err
}
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
if err != nil {
return "", err
}
//only return no error if the final resolved binary basename
//matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
// This new user's /etc/sub{uid,gid} ranges will be used for user namespace
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
// Find unused uid, gid pair
uid, err := findUnusedUID(baseUID)
if err != nil {
return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err)
}
gid, err := findUnusedGID(baseGID)
if err != nil {
return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err)
}
// First add the group that we will use
if err := addGroup(name, gid); err != nil {
return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err)
}
// Add the user as a member of the group
if err := addUser(name, uid, name); err != nil {
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
}
return uid, gid, nil
}
func addUser(userName string, uid int, groupName string) error {
if userCommand == "" {
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName)
return execAddCmd(userCommand, args)
}
func addGroup(groupName string, gid int) error {
if groupCommand == "" {
return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found")
}
args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName)
// only error out if the error isn't that the group already exists
// if the group exists then our needs are already met
if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") {
return err
}
return nil
}
func execAddCmd(cmd, args string) error {
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
out, err := execCmd.CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out))
}
return nil
}
func findUnusedUID(startUID int) (int, error) {
return findUnused("passwd", startUID)
}
func findUnusedGID(startGID int) (int, error) {
return findUnused("group", startGID)
}
func findUnused(file string, id int) (int, error) {
for {
cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id)
cmd := exec.Command("sh", "-c", cmdStr)
if err := cmd.Run(); err != nil {
// if a non-zero return code occurs, then we know the ID was not found
// and is usable
if exiterr, ok := err.(*exec.ExitError); ok {
// The program has exited with an exit code != 0
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
if status.ExitStatus() == 1 {
//no match, we can use this ID
return id, nil
}
}
}
return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err)
}
id++
if id > idMAX {
return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file)
}
}
}

View File

@ -0,0 +1,12 @@
// +build !linux
package idtools
import "fmt"
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
}

View File

@ -0,0 +1,152 @@
package ioutils
import (
"errors"
"io"
"sync"
)
// maxCap is the highest capacity to use in byte slices that buffer data.
const maxCap = 1e6
// blockThreshold is the minimum number of bytes in the buffer which will cause
// a write to BytesPipe to block when allocating a new slice.
const blockThreshold = 1e6
// ErrClosed is returned when Write is called on a closed BytesPipe.
var ErrClosed = errors.New("write to closed BytesPipe")
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
// All written data may be read at most once. Also, BytesPipe allocates
// and releases new byte slices to adjust to current needs, so the buffer
// won't be overgrown after peak loads.
type BytesPipe struct {
mu sync.Mutex
wait *sync.Cond
buf [][]byte // slice of byte-slices of buffered data
lastRead int // index in the first slice to a read point
bufLen int // length of data buffered over the slices
closeErr error // error to return from next Read. set to nil if not closed.
}
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
func NewBytesPipe(buf []byte) *BytesPipe {
if cap(buf) == 0 {
buf = make([]byte, 0, 64)
}
bp := &BytesPipe{
buf: [][]byte{buf[:0]},
}
bp.wait = sync.NewCond(&bp.mu)
return bp
}
// Write writes p to BytesPipe.
// It can allocate new []byte slices in a process of writing.
func (bp *BytesPipe) Write(p []byte) (int, error) {
bp.mu.Lock()
defer bp.mu.Unlock()
written := 0
for {
if bp.closeErr != nil {
return written, ErrClosed
}
// write data to the last buffer
b := bp.buf[len(bp.buf)-1]
// copy data to the current empty allocated area
n := copy(b[len(b):cap(b)], p)
// increment buffered data length
bp.bufLen += n
// include written data in last buffer
bp.buf[len(bp.buf)-1] = b[:len(b)+n]
written += n
// if there was enough room to write all then break
if len(p) == n {
break
}
// more data: write to the next slice
p = p[n:]
// block if too much data is still in the buffer
for bp.bufLen >= blockThreshold {
bp.wait.Wait()
}
// allocate slice that has twice the size of the last unless maximum reached
nextCap := 2 * cap(bp.buf[len(bp.buf)-1])
if nextCap > maxCap {
nextCap = maxCap
}
// add new byte slice to the buffers slice and continue writing
bp.buf = append(bp.buf, make([]byte, 0, nextCap))
}
bp.wait.Broadcast()
return written, nil
}
// CloseWithError causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) CloseWithError(err error) error {
bp.mu.Lock()
if err != nil {
bp.closeErr = err
} else {
bp.closeErr = io.EOF
}
bp.wait.Broadcast()
bp.mu.Unlock()
return nil
}
// Close causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) Close() error {
return bp.CloseWithError(nil)
}
func (bp *BytesPipe) len() int {
return bp.bufLen - bp.lastRead
}
// Read reads bytes from BytesPipe.
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
bp.mu.Lock()
defer bp.mu.Unlock()
if bp.len() == 0 {
if bp.closeErr != nil {
return 0, bp.closeErr
}
bp.wait.Wait()
if bp.len() == 0 && bp.closeErr != nil {
return 0, bp.closeErr
}
}
for {
read := copy(p, bp.buf[0][bp.lastRead:])
n += read
bp.lastRead += read
if bp.len() == 0 {
// we have read everything. reset to the beginning.
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = bp.buf[0][:0]
break
}
// break if everything was read
if len(p) == read {
break
}
// more buffered data and more asked. read from next slice.
p = p[read:]
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = nil // throw away old slice
bp.buf = bp.buf[1:] // switch to next
}
bp.wait.Broadcast()
return
}

View File

@ -0,0 +1,22 @@
package ioutils
import (
"fmt"
"io"
)
// FprintfIfNotEmpty prints the string value if it's not empty
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
if value != "" {
return fmt.Fprintf(w, format, value)
}
return 0, nil
}
// FprintfIfTrue prints the boolean value if it's true
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
if ok {
return fmt.Fprintf(w, format, ok)
}
return 0, nil
}

View File

@ -0,0 +1,226 @@
package ioutils
import (
"bytes"
"fmt"
"io"
"os"
)
type pos struct {
idx int
offset int64
}
type multiReadSeeker struct {
readers []io.ReadSeeker
pos *pos
posIdx map[io.ReadSeeker]int
}
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
var tmpOffset int64
switch whence {
case os.SEEK_SET:
for i, rdr := range r.readers {
// get size of the current reader
s, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
if offset > tmpOffset+s {
if i == len(r.readers)-1 {
rdrOffset := s + (offset - tmpOffset)
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
return -1, err
}
r.pos = &pos{i, rdrOffset}
return offset, nil
}
tmpOffset += s
continue
}
rdrOffset := offset - tmpOffset
idx := i
rdr.Seek(rdrOffset, os.SEEK_SET)
// make sure all following readers are at 0
for _, rdr := range r.readers[i+1:] {
rdr.Seek(0, os.SEEK_SET)
}
if rdrOffset == s && i != len(r.readers)-1 {
idx++
rdrOffset = 0
}
r.pos = &pos{idx, rdrOffset}
return offset, nil
}
case os.SEEK_END:
for _, rdr := range r.readers {
s, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
tmpOffset += s
}
r.Seek(tmpOffset+offset, os.SEEK_SET)
return tmpOffset + offset, nil
case os.SEEK_CUR:
if r.pos == nil {
return r.Seek(offset, os.SEEK_SET)
}
// Just return the current offset
if offset == 0 {
return r.getCurOffset()
}
curOffset, err := r.getCurOffset()
if err != nil {
return -1, err
}
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
if err != nil {
return -1, err
}
r.pos = &pos{r.posIdx[rdr], rdrOffset}
return curOffset + offset, nil
default:
return -1, fmt.Errorf("Invalid whence: %d", whence)
}
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
}
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
var rdr io.ReadSeeker
var rdrOffset int64
for i, rdr := range r.readers {
offsetTo, err := r.getOffsetToReader(rdr)
if err != nil {
return nil, -1, err
}
if offsetTo > offset {
rdr = r.readers[i-1]
rdrOffset = offsetTo - offset
break
}
if rdr == r.readers[len(r.readers)-1] {
rdrOffset = offsetTo + offset
break
}
}
return rdr, rdrOffset, nil
}
func (r *multiReadSeeker) getCurOffset() (int64, error) {
var totalSize int64
for _, rdr := range r.readers[:r.pos.idx+1] {
if r.posIdx[rdr] == r.pos.idx {
totalSize += r.pos.offset
break
}
size, err := getReadSeekerSize(rdr)
if err != nil {
return -1, fmt.Errorf("error getting seeker size: %v", err)
}
totalSize += size
}
return totalSize, nil
}
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
var offset int64
for _, r := range r.readers {
if r == rdr {
break
}
size, err := getReadSeekerSize(rdr)
if err != nil {
return -1, err
}
offset += size
}
return offset, nil
}
func (r *multiReadSeeker) Read(b []byte) (int, error) {
if r.pos == nil {
r.pos = &pos{0, 0}
}
bCap := int64(cap(b))
buf := bytes.NewBuffer(nil)
var rdr io.ReadSeeker
for _, rdr = range r.readers[r.pos.idx:] {
readBytes, err := io.CopyN(buf, rdr, bCap)
if err != nil && err != io.EOF {
return -1, err
}
bCap -= readBytes
if bCap == 0 {
break
}
}
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
if err != nil {
return -1, err
}
r.pos = &pos{r.posIdx[rdr], rdrPos}
return buf.Read(b)
}
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
// save the current position
pos, err := rdr.Seek(0, os.SEEK_CUR)
if err != nil {
return -1, err
}
// get the size
size, err := rdr.Seek(0, os.SEEK_END)
if err != nil {
return -1, err
}
// reset the position
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
return -1, err
}
return size, nil
}
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
// input readseekers. After calling this method the initial position is set to the
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
// Seek can be used over the sum of lengths of all readseekers.
//
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
// its ReadSeeker components. Also, users should make no assumption on the state
// of individual readseekers while the MultiReadSeeker is used.
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
if len(readers) == 1 {
return readers[0]
}
idx := make(map[io.ReadSeeker]int)
for i, rdr := range readers {
idx[rdr] = i
}
return &multiReadSeeker{
readers: readers,
posIdx: idx,
}
}

View File

@ -0,0 +1,154 @@
package ioutils
import (
"crypto/sha256"
"encoding/hex"
"io"
"github.com/fsouza/go-dockerclient/external/golang.org/x/net/context"
)
type readCloserWrapper struct {
io.Reader
closer func() error
}
func (r *readCloserWrapper) Close() error {
return r.closer()
}
// NewReadCloserWrapper returns a new io.ReadCloser.
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
return &readCloserWrapper{
Reader: r,
closer: closer,
}
}
type readerErrWrapper struct {
reader io.Reader
closer func()
}
func (r *readerErrWrapper) Read(p []byte) (int, error) {
n, err := r.reader.Read(p)
if err != nil {
r.closer()
}
return n, err
}
// NewReaderErrWrapper returns a new io.Reader.
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
return &readerErrWrapper{
reader: r,
closer: closer,
}
}
// HashData returns the sha256 sum of src.
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
// OnEOFReader wraps a io.ReadCloser and a function
// the function will run at the end of file or close the file.
type OnEOFReader struct {
Rc io.ReadCloser
Fn func()
}
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
n, err = r.Rc.Read(p)
if err == io.EOF {
r.runFunc()
}
return
}
// Close closes the file and run the function.
func (r *OnEOFReader) Close() error {
err := r.Rc.Close()
r.runFunc()
return err
}
func (r *OnEOFReader) runFunc() {
if fn := r.Fn; fn != nil {
fn()
r.Fn = nil
}
}
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
// operations.
type cancelReadCloser struct {
cancel func()
pR *io.PipeReader // Stream to read from
pW *io.PipeWriter
}
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
// context is cancelled. The returned io.ReadCloser must be closed when it is
// no longer needed.
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
pR, pW := io.Pipe()
// Create a context used to signal when the pipe is closed
doneCtx, cancel := context.WithCancel(context.Background())
p := &cancelReadCloser{
cancel: cancel,
pR: pR,
pW: pW,
}
go func() {
_, err := io.Copy(pW, in)
select {
case <-ctx.Done():
// If the context was closed, p.closeWithError
// was already called. Calling it again would
// change the error that Read returns.
default:
p.closeWithError(err)
}
in.Close()
}()
go func() {
for {
select {
case <-ctx.Done():
p.closeWithError(ctx.Err())
case <-doneCtx.Done():
return
}
}
}()
return p
}
// Read wraps the Read method of the pipe that provides data from the wrapped
// ReadCloser.
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
return p.pR.Read(buf)
}
// closeWithError closes the wrapper and its underlying reader. It will
// cause future calls to Read to return err.
func (p *cancelReadCloser) closeWithError(err error) {
p.pW.CloseWithError(err)
p.cancel()
}
// Close closes the wrapper its underlying reader. It will cause
// future calls to Read to return io.EOF.
func (p *cancelReadCloser) Close() error {
p.closeWithError(io.EOF)
return nil
}

View File

@ -0,0 +1,6 @@
// +build !gccgo
package ioutils
func callSchedulerIfNecessary() {
}

View File

@ -0,0 +1,13 @@
// +build gccgo
package ioutils
import (
"runtime"
)
func callSchedulerIfNecessary() {
//allow or force Go scheduler to switch context, without explicitly
//forcing this will make it hang when using gccgo implementation
runtime.Gosched()
}

View File

@ -0,0 +1,10 @@
// +build !windows
package ioutils
import "io/ioutil"
// TempDir on Unix systems is equivalent to ioutil.TempDir.
func TempDir(dir, prefix string) (string, error) {
return ioutil.TempDir(dir, prefix)
}

View File

@ -0,0 +1,18 @@
// +build windows
package ioutils
import (
"io/ioutil"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
)
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
func TempDir(dir, prefix string) (string, error) {
tempDir, err := ioutil.TempDir(dir, prefix)
if err != nil {
return "", err
}
return longpath.AddPrefix(tempDir), nil
}

View File

@ -0,0 +1,92 @@
package ioutils
import (
"errors"
"io"
"net/http"
"sync"
)
// WriteFlusher wraps the Write and Flush operation ensuring that every write
// is a flush. In addition, the Close method can be called to intercept
// Read/Write calls if the targets lifecycle has already ended.
type WriteFlusher struct {
mu sync.Mutex
w io.Writer
flusher http.Flusher
flushed bool
closed error
// TODO(stevvooe): Use channel for closed instead, remove mutex. Using a
// channel will allow one to properly order the operations.
}
var errWriteFlusherClosed = errors.New("writeflusher: closed")
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
wf.mu.Lock()
defer wf.mu.Unlock()
if wf.closed != nil {
return 0, wf.closed
}
n, err = wf.w.Write(b)
wf.flush() // every write is a flush.
return n, err
}
// Flush the stream immediately.
func (wf *WriteFlusher) Flush() {
wf.mu.Lock()
defer wf.mu.Unlock()
wf.flush()
}
// flush the stream immediately without taking a lock. Used internally.
func (wf *WriteFlusher) flush() {
if wf.closed != nil {
return
}
wf.flushed = true
wf.flusher.Flush()
}
// Flushed returns the state of flushed.
// If it's flushed, return true, or else it return false.
func (wf *WriteFlusher) Flushed() bool {
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
// be used to detect whether or a response code has been issued or not.
// Another hook should be used instead.
wf.mu.Lock()
defer wf.mu.Unlock()
return wf.flushed
}
// Close closes the write flusher, disallowing any further writes to the
// target. After the flusher is closed, all calls to write or flush will
// result in an error.
func (wf *WriteFlusher) Close() error {
wf.mu.Lock()
defer wf.mu.Unlock()
if wf.closed != nil {
return wf.closed
}
wf.closed = errWriteFlusherClosed
return nil
}
// NewWriteFlusher returns a new WriteFlusher.
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
} else {
flusher = &NopFlusher{}
}
return &WriteFlusher{w: w, flusher: flusher}
}

View File

@ -0,0 +1,66 @@
package ioutils
import "io"
// NopWriter represents a type which write operation is nop.
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
// NopWriteCloser returns a nopWriteCloser.
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
// NopFlusher represents a type which flush operation is nop.
type NopFlusher struct{}
// Flush is a nop operation.
func (f *NopFlusher) Flush() {}
type writeCloserWrapper struct {
io.Writer
closer func() error
}
func (r *writeCloserWrapper) Close() error {
return r.closer()
}
// NewWriteCloserWrapper returns a new io.WriteCloser.
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
return &writeCloserWrapper{
Writer: r,
closer: closer,
}
}
// WriteCounter wraps a concrete io.Writer and hold a count of the number
// of bytes written to the writer during a "session".
// This can be convenient when write return is masked
// (e.g., json.Encoder.Encode())
type WriteCounter struct {
Count int64
Writer io.Writer
}
// NewWriteCounter returns a new WriteCounter.
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
Writer: w,
}
}
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
count, err = wc.Writer.Write(p)
wc.Count += int64(count)
return
}

View File

@ -0,0 +1,26 @@
// longpath introduces some constants and helper functions for handling long paths
// in Windows, which are expected to be prepended with `\\?\` and followed by either
// a drive letter, a UNC server\share, or a volume identifier.
package longpath
import (
"strings"
)
// Prefix is the longpath prefix for Windows file paths.
const Prefix = `\\?\`
// AddPrefix will add the Windows long path prefix to the path provided if
// it does not already have it.
func AddPrefix(path string) string {
if !strings.HasPrefix(path, Prefix) {
if strings.HasPrefix(path, `\\`) {
// This is a UNC path, so we need to add 'UNC' to the path as well.
path = Prefix + `UNC` + path[1:]
} else {
path = Prefix + path
}
}
return path
}

View File

@ -0,0 +1,119 @@
// Package pools provides a collection of pools which provide various
// data types with buffers. These can be used to lower the number of
// memory allocations and reuse buffers.
//
// New pools should be added to this package to allow them to be
// shared across packages.
//
// Utility functions which operate on pools should be added to this
// package to allow them to be reused.
package pools
import (
"bufio"
"io"
"sync"
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils"
)
var (
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
BufioReader32KPool *BufioReaderPool
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
BufioWriter32KPool *BufioWriterPool
)
const buffer32K = 32 * 1024
// BufioReaderPool is a bufio reader that uses sync.Pool.
type BufioReaderPool struct {
pool sync.Pool
}
func init() {
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
}
// newBufioReaderPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
pool := sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
}
return &BufioReaderPool{pool: pool}
}
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
buf := bufPool.pool.Get().(*bufio.Reader)
buf.Reset(r)
return buf
}
// Put puts the bufio.Reader back into the pool.
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
b.Reset(nil)
bufPool.pool.Put(b)
}
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
buf := BufioReader32KPool.Get(src)
written, err = io.Copy(dst, buf)
BufioReader32KPool.Put(buf)
return
}
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
// into the pool and closes the reader if it's an io.ReadCloser.
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
return ioutils.NewReadCloserWrapper(r, func() error {
if readCloser, ok := r.(io.ReadCloser); ok {
readCloser.Close()
}
bufPool.Put(buf)
return nil
})
}
// BufioWriterPool is a bufio writer that uses sync.Pool.
type BufioWriterPool struct {
pool sync.Pool
}
// newBufioWriterPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
pool := sync.Pool{
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
}
return &BufioWriterPool{pool: pool}
}
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
buf := bufPool.pool.Get().(*bufio.Writer)
buf.Reset(w)
return buf
}
// Put puts the bufio.Writer back into the pool.
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
b.Reset(nil)
bufPool.pool.Put(b)
}
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
// into the pool and closes the writer if it's an io.Writecloser.
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
return ioutils.NewWriteCloserWrapper(w, func() error {
buf.Flush()
if writeCloser, ok := w.(io.WriteCloser); ok {
writeCloser.Close()
}
bufPool.Put(buf)
return nil
})
}

View File

@ -0,0 +1,11 @@
package promise
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error, 1)
go func() {
ch <- f()
}()
return ch
}

View File

@ -0,0 +1,175 @@
package stdcopy
import (
"encoding/binary"
"errors"
"io"
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
)
const (
stdWriterPrefixLen = 8
stdWriterFdIndex = 0
stdWriterSizeIndex = 4
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
)
// StdType prefixes type and length to standard stream.
type StdType [stdWriterPrefixLen]byte
var (
// Stdin represents standard input stream type.
Stdin = StdType{0: 0}
// Stdout represents standard output stream type.
Stdout = StdType{0: 1}
// Stderr represents standard error steam type.
Stderr = StdType{0: 2}
)
// StdWriter is wrapper of io.Writer with extra customized info.
type StdWriter struct {
io.Writer
prefix StdType
sizeBuf []byte
}
func (w *StdWriter) Write(buf []byte) (n int, err error) {
var n1, n2 int
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instantiated")
}
binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf)))
n1, err = w.Writer.Write(w.prefix[:])
if err != nil {
n = n1 - stdWriterPrefixLen
} else {
n2, err = w.Writer.Write(buf)
n = n1 + n2 - stdWriterPrefixLen
}
if n < 0 {
n = 0
}
return
}
// NewStdWriter instantiates a new Writer.
// Everything written to it will be encapsulated using a custom format,
// and written to the underlying `w` stream.
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
// `t` indicates the id of the stream to encapsulate.
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
func NewStdWriter(w io.Writer, t StdType) *StdWriter {
return &StdWriter{
Writer: w,
prefix: t,
sizeBuf: make([]byte, 4),
}
}
var errInvalidStdHeader = errors.New("Unrecognized input header")
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
// previously multiplexed together using a StdWriter instance.
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
//
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
// In other words: if `err` is non nil, it indicates a real underlying error.
//
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, startingBufLen)
bufLen = len(buf)
nr, nw int
er, ew error
out io.Writer
frameSize int
)
for {
// Make sure we have at least a full header
for nr < stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < stdWriterPrefixLen {
logrus.Debugf("Corrupted prefix: %v", buf[:nr])
return written, nil
}
break
}
if er != nil {
logrus.Debugf("Error reading header: %s", er)
return 0, er
}
}
// Check the first byte to know where to write
switch buf[stdWriterFdIndex] {
case 0:
fallthrough
case 1:
// Write on stdout
out = dstout
case 2:
// Write on stderr
out = dsterr
default:
logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
return 0, errInvalidStdHeader
}
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
logrus.Debugf("framesize: %d", frameSize)
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+stdWriterPrefixLen > bufLen {
logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf))
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < frameSize+stdWriterPrefixLen {
logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr])
return written, nil
}
break
}
if er != nil {
logrus.Debugf("Error reading frame: %s", er)
return 0, er
}
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
if ew != nil {
logrus.Debugf("Error writing frame: %s", ew)
return 0, ew
}
// If the frame has not been fully written: error
if nw != frameSize {
logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
return 0, io.ErrShortWrite
}
written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+stdWriterPrefixLen:])
// Move the index
nr -= frameSize + stdWriterPrefixLen
}
}

View File

@ -0,0 +1,47 @@
package system
import (
"os"
"syscall"
"time"
"unsafe"
)
var (
maxTime time.Time
)
func init() {
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
// This is a 64 bit timespec
// os.Chtimes limits time to the following
maxTime = time.Unix(0, 1<<63-1)
} else {
// This is a 32 bit timespec
maxTime = time.Unix(1<<31-1, 0)
}
}
// Chtimes changes the access time and modified time of a file at the given path
func Chtimes(name string, atime time.Time, mtime time.Time) error {
unixMinTime := time.Unix(0, 0)
unixMaxTime := maxTime
// If the modified time is prior to the Unix Epoch, or after the
// end of Unix Time, os.Chtimes has undefined behavior
// default to Unix Epoch in this case, just in case
if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
atime = unixMinTime
}
if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
mtime = unixMinTime
}
if err := os.Chtimes(name, atime, mtime); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,10 @@
package system
import (
"errors"
)
var (
// ErrNotSupportedPlatform means the platform is not supported.
ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
)

View File

@ -0,0 +1,83 @@
package system
// This file implements syscalls for Win32 events which are not implemented
// in golang.
import (
"syscall"
"unsafe"
)
var (
procCreateEvent = modkernel32.NewProc("CreateEventW")
procOpenEvent = modkernel32.NewProc("OpenEventW")
procSetEvent = modkernel32.NewProc("SetEvent")
procResetEvent = modkernel32.NewProc("ResetEvent")
procPulseEvent = modkernel32.NewProc("PulseEvent")
)
// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
namep, _ := syscall.UTF16PtrFromString(name)
var _p1 uint32
if manualReset {
_p1 = 1
}
var _p2 uint32
if initialState {
_p2 = 1
}
r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
use(unsafe.Pointer(namep))
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
err = e1
}
return
}
// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
namep, _ := syscall.UTF16PtrFromString(name)
var _p1 uint32
if inheritHandle {
_p1 = 1
}
r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
use(unsafe.Pointer(namep))
handle = syscall.Handle(r0)
if handle == syscall.InvalidHandle {
err = e1
}
return
}
// SetEvent implements win32 SetEvent func in golang.
func SetEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procSetEvent)
}
// ResetEvent implements win32 ResetEvent func in golang.
func ResetEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procResetEvent)
}
// PulseEvent implements win32 PulseEvent func in golang.
func PulseEvent(handle syscall.Handle) (err error) {
return setResetPulse(handle, procPulseEvent)
}
func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) {
r0, _, _ := proc.Call(uintptr(handle))
if r0 != 0 {
err = syscall.Errno(r0)
}
return
}
var temp unsafe.Pointer
// use ensures a variable is kept alive without the GC freeing while still needed
func use(p unsafe.Pointer) {
temp = p
}

View File

@ -0,0 +1,19 @@
// +build !windows
package system
import (
"os"
"path/filepath"
)
// MkdirAll creates a directory named path along with any necessary parents,
// with permission specified by attribute perm for all dir created.
func MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
// IsAbs is a platform-specific wrapper for filepath.IsAbs.
func IsAbs(path string) bool {
return filepath.IsAbs(path)
}

View File

@ -0,0 +1,82 @@
// +build windows
package system
import (
"os"
"path/filepath"
"regexp"
"strings"
"syscall"
)
// MkdirAll implementation that is volume path aware for Windows.
func MkdirAll(path string, perm os.FileMode) error {
if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
return nil
}
// The rest of this method is copied from os.MkdirAll and should be kept
// as-is to ensure compatibility.
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := os.Stat(path)
if err == nil {
if dir.IsDir() {
return nil
}
return &os.PathError{
Op: "mkdir",
Path: path,
Err: syscall.ENOTDIR,
}
}
// Slow path: make sure parent exists and then call Mkdir for path.
i := len(path)
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
i--
}
j := i
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
j--
}
if j > 1 {
// Create parent
err = MkdirAll(path[0:j-1], perm)
if err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
err = os.Mkdir(path, perm)
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.
dir, err1 := os.Lstat(path)
if err1 == nil && dir.IsDir() {
return nil
}
return err
}
return nil
}
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
// as it doesn't start with a drive-letter/colon combination. However, in
// docker we need to verify things such as WORKDIR /windows/system32 in
// a Dockerfile (which gets translated to \windows\system32 when being processed
// by the daemon. This SHOULD be treated as absolute from a docker processing
// perspective.
func IsAbs(path string) bool {
if !filepath.IsAbs(path) {
if !strings.HasPrefix(path, string(os.PathSeparator)) {
return false
}
}
return true
}

View File

@ -0,0 +1,19 @@
// +build !windows
package system
import (
"syscall"
)
// Lstat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Lstat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Lstat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View File

@ -0,0 +1,25 @@
// +build windows
package system
import (
"os"
)
// Lstat calls os.Lstat to get a fileinfo interface back.
// This is then copied into our own locally defined structure.
// Note the Linux version uses fromStatT to do the copy back,
// but that not strictly necessary when already in an OS specific module.
func Lstat(path string) (*StatT, error) {
fi, err := os.Lstat(path)
if err != nil {
return nil, err
}
return &StatT{
name: fi.Name(),
size: fi.Size(),
mode: fi.Mode(),
modTime: fi.ModTime(),
isDir: fi.IsDir()}, nil
}

View File

@ -0,0 +1,17 @@
package system
// MemInfo contains memory statistics of the host system.
type MemInfo struct {
// Total usable RAM (i.e. physical RAM minus a few reserved bits and the
// kernel binary code).
MemTotal int64
// Amount of free memory.
MemFree int64
// Total amount of swap space available.
SwapTotal int64
// Amount of swap space that is currently unused.
SwapFree int64
}

View File

@ -0,0 +1,66 @@
package system
import (
"bufio"
"io"
"os"
"strconv"
"strings"
"github.com/fsouza/go-dockerclient/external/github.com/docker/go-units"
)
// ReadMemInfo retrieves memory statistics of the host system and returns a
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
file, err := os.Open("/proc/meminfo")
if err != nil {
return nil, err
}
defer file.Close()
return parseMemInfo(file)
}
// parseMemInfo parses the /proc/meminfo file into
// a MemInfo object given a io.Reader to the file.
//
// Throws error if there are problems reading from the file
func parseMemInfo(reader io.Reader) (*MemInfo, error) {
meminfo := &MemInfo{}
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
// Expected format: ["MemTotal:", "1234", "kB"]
parts := strings.Fields(scanner.Text())
// Sanity checks: Skip malformed entries.
if len(parts) < 3 || parts[2] != "kB" {
continue
}
// Convert to bytes.
size, err := strconv.Atoi(parts[1])
if err != nil {
continue
}
bytes := int64(size) * units.KiB
switch parts[0] {
case "MemTotal:":
meminfo.MemTotal = bytes
case "MemFree:":
meminfo.MemFree = bytes
case "SwapTotal:":
meminfo.SwapTotal = bytes
case "SwapFree:":
meminfo.SwapFree = bytes
}
}
// Handle errors that may have occurred during the reading of the file.
if err := scanner.Err(); err != nil {
return nil, err
}
return meminfo, nil
}

View File

@ -0,0 +1,8 @@
// +build !linux,!windows
package system
// ReadMemInfo is not supported on platforms other than linux and windows.
func ReadMemInfo() (*MemInfo, error) {
return nil, ErrNotSupportedPlatform
}

View File

@ -0,0 +1,44 @@
package system
import (
"syscall"
"unsafe"
)
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
)
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
type memorystatusex struct {
dwLength uint32
dwMemoryLoad uint32
ullTotalPhys uint64
ullAvailPhys uint64
ullTotalPageFile uint64
ullAvailPageFile uint64
ullTotalVirtual uint64
ullAvailVirtual uint64
ullAvailExtendedVirtual uint64
}
// ReadMemInfo retrieves memory statistics of the host system and returns a
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
msi := &memorystatusex{
dwLength: 64,
}
r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
if r1 == 0 {
return &MemInfo{}, nil
}
return &MemInfo{
MemTotal: int64(msi.ullTotalPhys),
MemFree: int64(msi.ullAvailPhys),
SwapTotal: int64(msi.ullTotalPageFile),
SwapFree: int64(msi.ullAvailPageFile),
}, nil
}

View File

@ -0,0 +1,22 @@
// +build !windows
package system
import (
"syscall"
)
// Mknod creates a filesystem node (file, device special file or named pipe) named path
// with attributes specified by mode and dev.
func Mknod(path string, mode uint32, dev int) error {
return syscall.Mknod(path, mode, dev)
}
// Mkdev is used to build the value of linux devices (in /dev/) which specifies major
// and minor number of the newly created device special file.
// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes.
// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
// then the top 12 bits of the minor.
func Mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}

View File

@ -0,0 +1,13 @@
// +build windows
package system
// Mknod is not implemented on Windows.
func Mknod(path string, mode uint32, dev int) error {
return ErrNotSupportedPlatform
}
// Mkdev is not implemented on Windows.
func Mkdev(major int64, minor int64) uint32 {
panic("Mkdev not implemented on Windows.")
}

View File

@ -0,0 +1,8 @@
// +build !windows
package system
// DefaultPathEnv is unix style list of directories to search for
// executables. Each directory is separated from the next by a colon
// ':' character .
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"

View File

@ -0,0 +1,7 @@
// +build windows
package system
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
// the container. Docker has no context of what the default path should be.
const DefaultPathEnv = ""

View File

@ -0,0 +1,53 @@
// +build !windows
package system
import (
"syscall"
)
// StatT type contains status of a file. It contains metadata
// like permission, owner, group, size, etc about a file.
type StatT struct {
mode uint32
uid uint32
gid uint32
rdev uint64
size int64
mtim syscall.Timespec
}
// Mode returns file's permission mode.
func (s StatT) Mode() uint32 {
return s.mode
}
// UID returns file's user id of owner.
func (s StatT) UID() uint32 {
return s.uid
}
// GID returns file's group id of owner.
func (s StatT) GID() uint32 {
return s.gid
}
// Rdev returns file's device ID (if it's special file).
func (s StatT) Rdev() uint64 {
return s.rdev
}
// Size returns file's size.
func (s StatT) Size() int64 {
return s.size
}
// Mtim returns file's last modification time.
func (s StatT) Mtim() syscall.Timespec {
return s.mtim
}
// GetLastModification returns file's last modification time.
func (s StatT) GetLastModification() syscall.Timespec {
return s.Mtim()
}

View File

@ -0,0 +1,27 @@
package system
import (
"syscall"
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}, nil
}
// Stat takes a path to a file and returns
// a system.Stat_t type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View File

@ -0,0 +1,33 @@
package system
import (
"syscall"
)
// fromStatT converts a syscall.Stat_t type to a system.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: s.Mode,
uid: s.Uid,
gid: s.Gid,
rdev: s.Rdev,
mtim: s.Mtim}, nil
}
// FromStatT exists only on linux, and loads a system.StatT from a
// syscal.Stat_t.
func FromStatT(s *syscall.Stat_t) (*StatT, error) {
return fromStatT(s)
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View File

@ -0,0 +1,17 @@
// +build solaris
package system
import (
"syscall"
)
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtim}, nil
}

View File

@ -0,0 +1,17 @@
// +build !linux,!windows,!freebsd,!solaris
package system
import (
"syscall"
)
// fromStatT creates a system.StatT type from a syscall.Stat_t type
func fromStatT(s *syscall.Stat_t) (*StatT, error) {
return &StatT{size: s.Size,
mode: uint32(s.Mode),
uid: s.Uid,
gid: s.Gid,
rdev: uint64(s.Rdev),
mtim: s.Mtimespec}, nil
}

View File

@ -0,0 +1,43 @@
// +build windows
package system
import (
"os"
"time"
)
// StatT type contains status of a file. It contains metadata
// like name, permission, size, etc about a file.
type StatT struct {
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
}
// Name returns file's name.
func (s StatT) Name() string {
return s.name
}
// Size returns file's size.
func (s StatT) Size() int64 {
return s.size
}
// Mode returns file's permission mode.
func (s StatT) Mode() os.FileMode {
return s.mode
}
// ModTime returns file's last modification time.
func (s StatT) ModTime() time.Time {
return s.modTime
}
// IsDir returns whether file is actually a directory.
func (s StatT) IsDir() bool {
return s.isDir
}

View File

@ -0,0 +1,11 @@
// +build linux freebsd
package system
import "syscall"
// Unmount is a platform-specific helper function to call
// the unmount syscall.
func Unmount(dest string) error {
return syscall.Unmount(dest, 0)
}

View File

@ -0,0 +1,36 @@
package system
import (
"fmt"
"syscall"
)
// OSVersion is a wrapper for Windows version information
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
type OSVersion struct {
Version uint32
MajorVersion uint8
MinorVersion uint8
Build uint16
}
// GetOSVersion gets the operating system version on Windows. Note that
// docker.exe must be manifested to get the correct version information.
func GetOSVersion() (OSVersion, error) {
var err error
osv := OSVersion{}
osv.Version, err = syscall.GetVersion()
if err != nil {
return osv, fmt.Errorf("Failed to call GetVersion()")
}
osv.MajorVersion = uint8(osv.Version & 0xFF)
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
osv.Build = uint16(osv.Version >> 16)
return osv, nil
}
// Unmount is a platform-specific helper function to call
// the unmount syscall. Not supported on Windows
func Unmount(dest string) error {
return nil
}

View File

@ -0,0 +1,13 @@
// +build !windows
package system
import (
"syscall"
)
// Umask sets current process's file mode creation mask to newmask
// and return oldmask.
func Umask(newmask int) (oldmask int, err error) {
return syscall.Umask(newmask), nil
}

View File

@ -0,0 +1,9 @@
// +build windows
package system
// Umask is not supported on the windows platform.
func Umask(newmask int) (oldmask int, err error) {
// should not be called on cli code path
return 0, ErrNotSupportedPlatform
}

View File

@ -0,0 +1,8 @@
package system
import "syscall"
// LUtimesNano is not supported by darwin platform.
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}

View File

@ -0,0 +1,22 @@
package system
import (
"syscall"
"unsafe"
)
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
var _path *byte
_path, err := syscall.BytePtrFromString(path)
if err != nil {
return err
}
if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS {
return err
}
return nil
}

View File

@ -0,0 +1,26 @@
package system
import (
"syscall"
"unsafe"
)
// LUtimesNano is used to change access and modification time of the specified path.
// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm.
func LUtimesNano(path string, ts []syscall.Timespec) error {
// These are not currently available in syscall
atFdCwd := -100
atSymLinkNoFollow := 0x100
var _path *byte
_path, err := syscall.BytePtrFromString(path)
if err != nil {
return err
}
if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS {
return err
}
return nil
}

View File

@ -0,0 +1,10 @@
// +build !linux,!freebsd,!darwin
package system
import "syscall"
// LUtimesNano is not supported on platforms other than linux, freebsd and darwin.
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}

View File

@ -0,0 +1,63 @@
package system
import (
"syscall"
"unsafe"
)
// Lgetxattr retrieves the value of the extended attribute identified by attr
// and associated with the given path in the file system.
// It will returns a nil slice and nil error if the xattr is not set.
func Lgetxattr(path string, attr string) ([]byte, error) {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
return nil, err
}
attrBytes, err := syscall.BytePtrFromString(attr)
if err != nil {
return nil, err
}
dest := make([]byte, 128)
destBytes := unsafe.Pointer(&dest[0])
sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
if errno == syscall.ENODATA {
return nil, nil
}
if errno == syscall.ERANGE {
dest = make([]byte, sz)
destBytes := unsafe.Pointer(&dest[0])
sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0)
}
if errno != 0 {
return nil, errno
}
return dest[:sz], nil
}
var _zero uintptr
// Lsetxattr sets the value of the extended attribute identified by attr
// and associated with the given path in the file system.
func Lsetxattr(path string, attr string, data []byte, flags int) error {
pathBytes, err := syscall.BytePtrFromString(path)
if err != nil {
return err
}
attrBytes, err := syscall.BytePtrFromString(attr)
if err != nil {
return err
}
var dataBytes unsafe.Pointer
if len(data) > 0 {
dataBytes = unsafe.Pointer(&data[0])
} else {
dataBytes = unsafe.Pointer(&_zero)
}
_, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0)
if errno != 0 {
return errno
}
return nil
}

View File

@ -0,0 +1,13 @@
// +build !linux
package system
// Lgetxattr is not supported on platforms other than linux.
func Lgetxattr(path string, attr string) ([]byte, error) {
return nil, ErrNotSupportedPlatform
}
// Lsetxattr is not supported on platforms other than linux.
func Lsetxattr(path string, attr string, data []byte, flags int) error {
return ErrNotSupportedPlatform
}

View File

@ -0,0 +1,27 @@
Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,27 @@
Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.