vendor: update everything

* If possible, update each dependency to the latest available version.

* Use releases over commit IDs and avoid vendoring branches.

Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
This commit is contained in:
Valentin Rothberg
2019-01-08 14:52:57 +01:00
parent 545f244212
commit bd40dcfc2b
920 changed files with 284288 additions and 43641 deletions

4
vendor/github.com/docker/docker/contrib/README.md generated vendored Normal file
View File

@@ -0,0 +1,4 @@
The `contrib` directory contains scripts, images, and other helpful things
which are not part of the core docker distribution. Please note that they
could be out of date, since they do not receive the same attention as the
rest of the repository.

View File

@@ -0,0 +1,10 @@
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
int main(int argc, char *argv[])
{
printf("EUID=%d\n", geteuid());
return 0;
}

View File

@@ -0,0 +1,16 @@
#define _GNU_SOURCE
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
int main(int argc, char **argv)
{
int err = acct("/tmp/t");
if (err == -1) {
fprintf(stderr, "acct failed: %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
exit(EXIT_SUCCESS);
}

View File

@@ -0,0 +1,7 @@
.globl _start
.text
_start:
xorl %eax, %eax
incl %eax
movb $0, %bl
int $0x80

View File

@@ -0,0 +1,63 @@
#define _GNU_SOURCE
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <unistd.h>
#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */
struct clone_args {
char **argv;
};
// child_exec is the func that will be executed as the result of clone
static int child_exec(void *stuff)
{
struct clone_args *args = (struct clone_args *)stuff;
if (execvp(args->argv[0], args->argv) != 0) {
fprintf(stderr, "failed to execvp arguments %s\n",
strerror(errno));
exit(-1);
}
// we should never reach here!
exit(EXIT_FAILURE);
}
int main(int argc, char **argv)
{
struct clone_args args;
args.argv = &argv[1];
int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD;
// allocate stack for child
char *stack; /* Start of stack buffer */
char *child_stack; /* End of stack buffer */
stack =
mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0);
if (stack == MAP_FAILED) {
fprintf(stderr, "mmap failed: %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
child_stack = stack + STACK_SIZE; /* Assume stack grows downward */
// the result of this call is that our child_exec will be run in another
// process returning its pid
pid_t pid = clone(child_exec, child_stack, clone_flags, &args);
if (pid < 0) {
fprintf(stderr, "clone failed: %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
// lets wait on our child process here before we, the parent, exits
if (waitpid(pid, NULL, 0) == -1) {
fprintf(stderr, "failed to wait pid %d\n", pid);
exit(EXIT_FAILURE);
}
exit(EXIT_SUCCESS);
}

View File

@@ -0,0 +1,14 @@
#include <stdio.h>
#include <unistd.h>
#include <sys/socket.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
int main() {
if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) {
perror("socket");
return 1;
}
return 0;
}

View File

@@ -0,0 +1,11 @@
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
int main() {
if (setgid(1) == -1) {
perror("setgid");
return 1;
}
return 0;
}

View File

@@ -0,0 +1,11 @@
#include <sys/types.h>
#include <unistd.h>
#include <stdio.h>
int main() {
if (setuid(1) == -1) {
perror("setuid");
return 1;
}
return 0;
}

View File

@@ -0,0 +1,30 @@
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
int main() {
int s;
struct sockaddr_in sin;
s = socket(AF_INET, SOCK_STREAM, 0);
if (s == -1) {
perror("socket");
return 1;
}
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = INADDR_ANY;
sin.sin_port = htons(80);
if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) == -1) {
perror("bind");
return 1;
}
close(s);
return 0;
}

View File

@@ -0,0 +1,63 @@
#define _GNU_SOURCE
#include <errno.h>
#include <sched.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/wait.h>
#include <unistd.h>
#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */
struct clone_args {
char **argv;
};
// child_exec is the func that will be executed as the result of clone
static int child_exec(void *stuff)
{
struct clone_args *args = (struct clone_args *)stuff;
if (execvp(args->argv[0], args->argv) != 0) {
fprintf(stderr, "failed to execvp arguments %s\n",
strerror(errno));
exit(-1);
}
// we should never reach here!
exit(EXIT_FAILURE);
}
int main(int argc, char **argv)
{
struct clone_args args;
args.argv = &argv[1];
int clone_flags = CLONE_NEWUSER | SIGCHLD;
// allocate stack for child
char *stack; /* Start of stack buffer */
char *child_stack; /* End of stack buffer */
stack =
mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0);
if (stack == MAP_FAILED) {
fprintf(stderr, "mmap failed: %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
child_stack = stack + STACK_SIZE; /* Assume stack grows downward */
// the result of this call is that our child_exec will be run in another
// process returning its pid
pid_t pid = clone(child_exec, child_stack, clone_flags, &args);
if (pid < 0) {
fprintf(stderr, "clone failed: %s\n", strerror(errno));
exit(EXIT_FAILURE);
}
// lets wait on our child process here before we, the parent, exits
if (waitpid(pid, NULL, 0) == -1) {
fprintf(stderr, "failed to wait pid %d\n", pid);
exit(EXIT_FAILURE);
}
exit(EXIT_SUCCESS);
}

View File

@@ -1,60 +0,0 @@
## About
This directory contains a collection of scripts used to build and manage this
repository. If there are any issues regarding the intention of a particular
script (or even part of a certain script), please reach out to us.
It may help us either refine our current scripts, or add on new ones
that are appropriate for a given use case.
## DinD (dind.sh)
DinD is a wrapper script which allows Docker to be run inside a Docker
container. DinD requires the container to
be run with privileged mode enabled.
## Generate Authors (generate-authors.sh)
Generates AUTHORS; a file with all the names and corresponding emails of
individual contributors. AUTHORS can be found in the home directory of
this repository.
## Make
There are two make files, each with different extensions. Neither are supposed
to be called directly; only invoke `make`. Both scripts run inside a Docker
container.
### make.ps1
- The Windows native build script that uses PowerShell semantics; it is limited
unlike `hack\make.sh` since it does not provide support for the full set of
operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
does provide support for local Windows development and Windows to Windows CI.
More information is found within `make.ps1` by the author, @jhowardmsft
### make.sh
- Referenced via `make test` when running tests on a local machine,
or directly referenced when running tests inside a Docker development container.
- When running on a local machine, `make test` to run all tests found in
`test`, `test-unit`, `test-integration`, and `test-docker-py` on
your local machine. The default timeout is set in `make.sh` to 60 minutes
(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
all of the tests.
- When running inside a Docker development container, `hack/make.sh` does
not have a single target that runs all the tests. You need to provide a
single command line with multiple targets that performs the same thing.
An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration test-docker-py`
- For more information related to testing outside the scope of this README,
refer to
[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
## Release (release.sh)
Releases any bundles built by `make` on a public AWS S3 bucket.
For information regarding configuration, please view `release.sh`.
## Vendor (vendor.sh)
A shell script that is a wrapper around Vndr. For information on how to use
this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)

View File

@@ -1,69 +0,0 @@
# Integration Testing on Swarm
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
## Architecture
### Master service
- Works as a funker caller
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
### Worker service
- Works as a funker callee
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
### Client
- Controls master and workers via `docker stack`
- No need to have a local daemon
Typically, the master and workers are supposed to be running on a cloud environment,
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
## Requirement
- Docker daemon 1.13 or later
- Private registry for distributed execution with multiple nodes
## Usage
### Step 1: Prepare images
$ make build-integration-cli-on-swarm
Following environment variables are known to work in this step:
- `BUILDFLAGS`
- `DOCKER_INCREMENTAL_BINARY`
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
### Step 2: Execute tests
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
Following environment variables are known to work in this step:
- `DOCKER_GRAPHDRIVER`
- `DOCKER_EXPERIMENTAL`
#### Flags
Basic flags:
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
Experimental flags for mitigating makespan nonuniformity:
- `-shuffle`: Shuffle the test filter strings
Flags for debugging IT on Swarm itself:
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
- `-dry-run`: skip the actual workload
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm

View File

@@ -1,2 +0,0 @@
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773

View File

@@ -1 +0,0 @@
This code provides helper functions for dealing with archive files.

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,92 +0,0 @@
package archive
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
return overlayWhiteoutConverter{}
}
return nil
}
type overlayWhiteoutConverter struct{}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if hdr.Xattrs != nil {
delete(hdr.Xattrs, "trusted.overlay.opaque")
}
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return
}
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
// don't write the file itself
return false, err
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, err
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@@ -1,7 +0,0 @@
// +build !linux
package archive
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
return nil
}

View File

@@ -1,114 +0,0 @@
// +build !windows
package archive
import (
"archive/tar"
"errors"
"os"
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
"golang.org/x/sys/unix"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return srcPath + string(filepath.Separator) + include
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
return p, nil // already unix-style
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
// Currently go does not fill in the major/minors
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
}
}
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
inode = s.Ino
}
return
}
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
}
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
if rsystem.RunningInUserNS() {
// cannot create a device if running in user namespace
return nil
}
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= unix.S_IFBLK
case tar.TypeChar:
mode |= unix.S_IFCHR
case tar.TypeFifo:
mode |= unix.S_IFIFO
}
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}

View File

@@ -1,79 +0,0 @@
// +build windows
package archive
import (
"archive/tar"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/longpath"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return longpath.AddPrefix(srcPath)
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
// windows: convert windows style relative path with backslashes
// into forward slashes. Since windows does not allow '/' or '\'
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
permPart |= 0111
permPart &= 0755
return noPermPart | permPart
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
// do nothing. no notion of Rdev, Nlink in stat on Windows
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Inode in stat on Windows
return
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
// no notion of file ownership mapping yet on Windows
return idtools.IDPair{0, 0}, nil
}

View File

@@ -1,441 +0,0 @@
package archive
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// ChangeType represents the change type.
type ChangeType int
const (
// ChangeModify represents the modify operation.
ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar and the go tar writer don't have sub-second mtime
// precision, which is problematic when we apply changes via tar
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
)
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := info
if path == string(os.PathSeparator) {
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
// As this runs on the daemon side, file paths are OS specific.
return string(os.PathSeparator)
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, newStat) ||
!bytes.Equal(oldChild.capability, newChild.capability) {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
// As this runs on the daemon side, file paths are OS specific.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
root := &FileInfo{
name: string(os.PathSeparator),
children: make(map[string]*FileInfo),
}
return root
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var (
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := ioutil.TempDir("", "empty")
if err != nil {
return nil, err
}
defer os.Remove(emptyDir)
oldDir = emptyDir
}
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, err := os.Lstat(file)
if err != nil {
logrus.Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
logrus.Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@@ -1,313 +0,0 @@
package archive
import (
"bytes"
"fmt"
"os"
"path/filepath"
"sort"
"syscall"
"unsafe"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
// method in general returns the entire contents of two directory trees, we
// optimize some FS calls out on linux. In particular, we take advantage of the
// fact that getdents(2) returns the inode of each file in the directory being
// walked, which, when walking two trees in parallel to generate a list of
// changes, can be used to prune subtrees without ever having to lstat(2) them
// directly. Eliminating stat calls in this way can save up to seconds on large
// images.
type walker struct {
dir1 string
dir2 string
root1 *FileInfo
root2 *FileInfo
}
// collectFileInfoForChanges returns a complete representation of the trees
// rooted at dir1 and dir2, with one important exception: any subtree or
// leaf where the inode and device numbers are an exact match between dir1
// and dir2 will be pruned from the results. This method is *only* to be used
// to generating a list of changes between the two directories, as it does not
// reflect the full contents.
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
w := &walker{
dir1: dir1,
dir2: dir2,
root1: newRootFileInfo(),
root2: newRootFileInfo(),
}
i1, err := os.Lstat(w.dir1)
if err != nil {
return nil, nil, err
}
i2, err := os.Lstat(w.dir2)
if err != nil {
return nil, nil, err
}
if err := w.walk("/", i1, i2); err != nil {
return nil, nil, err
}
return w.root1, w.root2, nil
}
// Given a FileInfo, its path info, and a reference to the root of the tree
// being constructed, register this file with the tree.
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
if fi == nil {
return nil
}
parent := root.LookUp(filepath.Dir(path))
if parent == nil {
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
}
info := &FileInfo{
name: filepath.Base(path),
children: make(map[string]*FileInfo),
parent: parent,
}
cpath := filepath.Join(dir, path)
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
if err != nil {
return err
}
info.stat = stat
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
parent.children[info.name] = info
return nil
}
// Walk a subtree rooted at the same path in both trees being iterated. For
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
// Register these nodes with the return trees, unless we're still at the
// (already-created) roots:
if path != "/" {
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
return err
}
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
return err
}
}
is1Dir := i1 != nil && i1.IsDir()
is2Dir := i2 != nil && i2.IsDir()
sameDevice := false
if i1 != nil && i2 != nil {
si1 := i1.Sys().(*syscall.Stat_t)
si2 := i2.Sys().(*syscall.Stat_t)
if si1.Dev == si2.Dev {
sameDevice = true
}
}
// If these files are both non-existent, or leaves (non-dirs), we are done.
if !is1Dir && !is2Dir {
return nil
}
// Fetch the names of all the files contained in both directories being walked:
var names1, names2 []nameIno
if is1Dir {
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
if err != nil {
return err
}
}
if is2Dir {
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
if err != nil {
return err
}
}
// We have lists of the files contained in both parallel directories, sorted
// in the same order. Walk them in parallel, generating a unique merged list
// of all items present in either or both directories.
var names []string
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
ni1 := names1[ix1]
ni2 := names2[ix2]
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
case -1: // ni1 < ni2 -- advance ni1
// we will not encounter ni1 in names2
names = append(names, ni1.name)
ix1++
case 0: // ni1 == ni2
if ni1.ino != ni2.ino || !sameDevice {
names = append(names, ni1.name)
}
ix1++
ix2++
case 1: // ni1 > ni2 -- advance ni2
// we will not encounter ni2 in names1
names = append(names, ni2.name)
ix2++
}
}
for ix1 < len(names1) {
names = append(names, names1[ix1].name)
ix1++
}
for ix2 < len(names2) {
names = append(names, names2[ix2].name)
ix2++
}
// For each of the names present in either or both of the directories being
// iterated, stat the name under each root, and recurse the pair of them:
for _, name := range names {
fname := filepath.Join(path, name)
var cInfo1, cInfo2 os.FileInfo
if is1Dir {
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if is2Dir {
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
return err
}
}
return nil
}
// {name,inode} pairs used to support the early-pruning logic of the walker type
type nameIno struct {
name string
ino uint64
}
type nameInoSlice []nameIno
func (s nameInoSlice) Len() int { return len(s) }
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
// numbers further up the stack when reading directory contents. Unlike
// os.Readdirnames, which returns a list of filenames, this function returns a
// list of {filename,inode} pairs.
func readdirnames(dirname string) (names []nameIno, err error) {
var (
size = 100
buf = make([]byte, 4096)
nbuf int
bufp int
nb int
)
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
names = make([]nameIno, 0, size) // Empty with room to grow.
for {
// Refill the buffer if necessary
if bufp >= nbuf {
bufp = 0
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
if nbuf < 0 {
nbuf = 0
}
if err != nil {
return nil, os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
break // EOF
}
}
// Drain the buffer
nb, names = parseDirent(buf[bufp:nbuf], names)
bufp += nb
}
sl := nameInoSlice(names)
sort.Sort(sl)
return sl, nil
}
// parseDirent is a minor modification of unix.ParseDirent (linux version)
// which returns {name,inode} pairs instead of just names.
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
origlen := len(buf)
for len(buf) > 0 {
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
buf = buf[dirent.Reclen:]
if dirent.Ino == 0 { // File absent in directory.
continue
}
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
var name = string(bytes[0:clen(bytes[:])])
if name == "." || name == ".." { // Useless names
continue
}
names = append(names, nameIno{name, dirent.Ino})
}
return origlen - len(buf), names
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}
// OverlayChanges walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, overlayDeletedFile, nil)
}
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
if fi.Mode()&os.ModeCharDevice != 0 {
s := fi.Sys().(*syscall.Stat_t)
if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert
return path, nil
}
}
if fi.Mode()&os.ModeDir != 0 {
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
if err != nil {
return "", err
}
if len(opaque) == 1 && opaque[0] == 'y' {
return path, nil
}
}
return "", nil
}

View File

@@ -1,97 +0,0 @@
// +build !linux
package archive
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, nil, err
}
}
return oldRoot, newRoot, nil
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
relPath = filepath.Join(string(os.PathSeparator), relPath)
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
// Temporary workaround. If the returned path starts with two backslashes,
// trim it down to a single backslash. Only relevant on Windows.
if runtime.GOOS == "windows" {
if strings.HasPrefix(relPath, `\\`) {
relPath = relPath[1:]
}
}
if relPath == string(os.PathSeparator) {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
s, err := system.Lstat(path)
if err != nil {
return err
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}

View File

@@ -1,37 +0,0 @@
// +build !windows
package archive
import (
"os"
"syscall"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldStat.UID() != newStat.UID() ||
oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
}
func getIno(fi os.FileInfo) uint64 {
return fi.Sys().(*syscall.Stat_t).Ino
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@@ -1,30 +0,0 @@
package archive
import (
"os"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mtim() != newStat.Mtim() ||
oldStat.Mode() != newStat.Mode() ||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode().IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

View File

@@ -1,472 +0,0 @@
package archive
import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in the separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
// Ensure paths are in platform semantics
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath, sep) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(sep)
}
cleanedPath += "."
}
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
cleanedPath += string(sep)
}
return cleanedPath
}
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func assertsDirectory(path string, sep byte) bool {
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
}
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func hasTrailingPathSeparator(path string, sep byte) bool {
return len(path) > 0 && path[len(path)-1] == sep
}
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(filepath.FromSlash(path))
if specifiesCurrentDir(path) {
cleanedPath += string(os.PathSeparator) + "."
}
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
sourcePath = normalizePath(sourcePath)
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
}
// Separate the source path between its directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, opts)
}
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
// parameters to be sent to TarWithOptions (the TarOptions struct)
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
filter := []string{sourceBase}
return &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
}
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !system.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Stat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path, os.PathSeparator):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if hdr.Typeflag == tar.TypeLink {
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
}
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// TODO @gupta-ak. These might have to be changed in the future to be
// continuity driver aware as well to support LCOW.
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcInfo)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path, os.PathSeparator) &&
filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) &&
!specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path, os.PathSeparator) &&
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@@ -1,11 +0,0 @@
// +build !windows
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.ToSlash(path)
}

View File

@@ -1,9 +0,0 @@
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.FromSlash(path)
}

View File

@@ -1,256 +0,0 @@
package archive
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
continue
}
}
// Note as these operations are platform specific, so must the slash be.
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0600, "")
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
return 0, err
}
}
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
// Note as these operations are platform specific, so must the slash be.
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
trBuf.Reset(tr)
srcData := io.Reader(trBuf)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := remapIDs(idMappings, srcHdr); err != nil {
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
oldmask, err := system.Umask(0)
if err != nil {
return 0, err
}
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
if decompress {
layer, err = DecompressStream(layer)
if err != nil {
return 0, err
}
}
return UnpackLayer(dest, layer, options)
}

View File

@@ -1,97 +0,0 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View File

@@ -1,16 +0,0 @@
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@@ -1,16 +0,0 @@
// +build !linux
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@@ -1,23 +0,0 @@
package archive
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

View File

@@ -1,59 +0,0 @@
package archive
import (
"archive/tar"
"bytes"
"io"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (io.Reader, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

View File

@@ -1,317 +0,0 @@
package jsonmessage
import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"time"
gotty "github.com/Nvveen/Gotty"
"github.com/docker/docker/pkg/term"
units "github.com/docker/go-units"
)
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
// ensure the formatted time isalways the same number of characters.
const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// JSONError wraps a concrete Code and Message, `Code` is
// is an integer error code, `Message` is the error message.
type JSONError struct {
Code int `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
func (e *JSONError) Error() string {
return e.Message
}
// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
// Start is the initial value for the operation. Current is the current status and
// value of the progress made towards Total. Total is the end value describing when
// we made 100% progress for an operation.
type JSONProgress struct {
terminalFd uintptr
Current int64 `json:"current,omitempty"`
Total int64 `json:"total,omitempty"`
Start int64 `json:"start,omitempty"`
// If true, don't show xB/yB
HideCounts bool `json:"hidecounts,omitempty"`
Units string `json:"units,omitempty"`
}
func (p *JSONProgress) String() string {
var (
width = 200
pbBox string
numbersBox string
timeLeftBox string
)
ws, err := term.GetWinsize(p.terminalFd)
if err == nil {
width = int(ws.Width)
}
if p.Current <= 0 && p.Total <= 0 {
return ""
}
if p.Total <= 0 {
switch p.Units {
case "":
current := units.HumanSize(float64(p.Current))
return fmt.Sprintf("%8v", current)
default:
return fmt.Sprintf("%d %s", p.Current, p.Units)
}
}
percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
if percentage > 50 {
percentage = 50
}
if width > 110 {
// this number can't be negative gh#7136
numSpaces := 0
if 50-percentage > 0 {
numSpaces = 50 - percentage
}
pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
}
switch {
case p.HideCounts:
case p.Units == "": // no units, use bytes
current := units.HumanSize(float64(p.Current))
total := units.HumanSize(float64(p.Total))
numbersBox = fmt.Sprintf("%8v/%v", current, total)
if p.Current > p.Total {
// remove total display if the reported current is wonky.
numbersBox = fmt.Sprintf("%8v", current)
}
default:
numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units)
if p.Current > p.Total {
// remove total display if the reported current is wonky.
numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units)
}
}
if p.Current > 0 && p.Start > 0 && percentage < 50 {
fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
perEntry := fromStart / time.Duration(p.Current)
left := time.Duration(p.Total-p.Current) * perEntry
left = (left / time.Second) * time.Second
if width > 50 {
timeLeftBox = " " + left.String()
}
}
return pbBox + numbersBox + timeLeftBox
}
// JSONMessage defines a message struct. It describes
// the created time, where it from, status, ID of the
// message. It's used for docker events.
type JSONMessage struct {
Stream string `json:"stream,omitempty"`
Status string `json:"status,omitempty"`
Progress *JSONProgress `json:"progressDetail,omitempty"`
ProgressMessage string `json:"progress,omitempty"` //deprecated
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
Error *JSONError `json:"errorDetail,omitempty"`
ErrorMessage string `json:"error,omitempty"` //deprecated
// Aux contains out-of-band data, such as digests for push signing and image id after building.
Aux *json.RawMessage `json:"aux,omitempty"`
}
/* Satisfied by gotty.TermInfo as well as noTermInfo from below */
type termInfo interface {
Parse(attr string, params ...interface{}) (string, error)
}
type noTermInfo struct{} // canary used when no terminfo.
func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) {
return "", fmt.Errorf("noTermInfo")
}
func clearLine(out io.Writer, ti termInfo) {
// el2 (clear whole line) is not exposed by terminfo.
// First clear line from beginning to cursor
if attr, err := ti.Parse("el1"); err == nil {
fmt.Fprintf(out, "%s", attr)
} else {
fmt.Fprintf(out, "\x1b[1K")
}
// Then clear line from cursor to end
if attr, err := ti.Parse("el"); err == nil {
fmt.Fprintf(out, "%s", attr)
} else {
fmt.Fprintf(out, "\x1b[K")
}
}
func cursorUp(out io.Writer, ti termInfo, l int) {
if l == 0 { // Should never be the case, but be tolerant
return
}
if attr, err := ti.Parse("cuu", l); err == nil {
fmt.Fprintf(out, "%s", attr)
} else {
fmt.Fprintf(out, "\x1b[%dA", l)
}
}
func cursorDown(out io.Writer, ti termInfo, l int) {
if l == 0 { // Should never be the case, but be tolerant
return
}
if attr, err := ti.Parse("cud", l); err == nil {
fmt.Fprintf(out, "%s", attr)
} else {
fmt.Fprintf(out, "\x1b[%dB", l)
}
}
// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out`
// is a terminal. If this is the case, it will erase the entire current line
// when displaying the progressbar.
func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error {
if jm.Error != nil {
if jm.Error.Code == 401 {
return fmt.Errorf("authentication is required")
}
return jm.Error
}
var endl string
if termInfo != nil && jm.Stream == "" && jm.Progress != nil {
clearLine(out, termInfo)
endl = "\r"
fmt.Fprintf(out, endl)
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
return nil
}
if jm.TimeNano != 0 {
fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
} else if jm.Time != 0 {
fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
}
if jm.ID != "" {
fmt.Fprintf(out, "%s: ", jm.ID)
}
if jm.From != "" {
fmt.Fprintf(out, "(from %s) ", jm.From)
}
if jm.Progress != nil && termInfo != nil {
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
} else if jm.ProgressMessage != "" { //deprecated
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
} else if jm.Stream != "" {
fmt.Fprintf(out, "%s%s", jm.Stream, endl)
} else {
fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
}
return nil
}
// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
// each line and move the cursor while displaying.
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
var (
dec = json.NewDecoder(in)
ids = make(map[string]int)
)
var termInfo termInfo
if isTerminal {
term := os.Getenv("TERM")
if term == "" {
term = "vt102"
}
var err error
if termInfo, err = gotty.OpenTermInfo(term); err != nil {
termInfo = &noTermInfo{}
}
}
for {
diff := 0
var jm JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
break
}
return err
}
if jm.Aux != nil {
if auxCallback != nil {
auxCallback(jm.Aux)
}
continue
}
if jm.Progress != nil {
jm.Progress.terminalFd = terminalFd
}
if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
line, ok := ids[jm.ID]
if !ok {
// NOTE: This approach of using len(id) to
// figure out the number of lines of history
// only works as long as we clear the history
// when we output something that's not
// accounted for in the map, such as a line
// with no ID.
line = len(ids)
ids[jm.ID] = line
if termInfo != nil {
fmt.Fprintf(out, "\n")
}
}
diff = len(ids) - line
if termInfo != nil {
cursorUp(out, termInfo, diff)
}
} else {
// When outputting something that isn't progress
// output, clear the history of previous lines. We
// don't want progress entries from some previous
// operation to be updated (for example, pull -a
// with multiple tags).
ids = make(map[string]int)
}
err := jm.Display(out, termInfo)
if jm.ID != "" && termInfo != nil {
cursorDown(out, termInfo, diff)
}
if err != nil {
return err
}
}
return nil
}
type stream interface {
io.Writer
FD() uintptr
IsTerminal() bool
}
// DisplayJSONMessagesToStream prints json messages to the output stream
func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error {
return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback)
}

View File

@@ -1,32 +0,0 @@
// +build ignore
package main
import (
"encoding/json"
"io/ioutil"
"os"
"path/filepath"
"github.com/docker/docker/profiles/seccomp"
)
// saves the default seccomp profile as a json file so people can use it as a
// base for their own custom profiles
func main() {
wd, err := os.Getwd()
if err != nil {
panic(err)
}
f := filepath.Join(wd, "default.json")
// write the default profile to the file
b, err := json.MarshalIndent(seccomp.DefaultProfile(), "", "\t")
if err != nil {
panic(err)
}
if err := ioutil.WriteFile(f, b, 0644); err != nil {
panic(err)
}
}